size
int64 0
304k
| ext
stringclasses 1
value | lang
stringclasses 1
value | branch
stringclasses 1
value | content
stringlengths 0
304k
| avg_line_length
float64 0
238
| max_line_length
int64 0
304k
|
---|---|---|---|---|---|---|
6,205 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
from odoo.tests import common
import odoo
GIF = b"R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs="
class test_ir_http_mimetype(common.TransactionCase):
def test_ir_http_mimetype_attachment(self):
""" Test mimetype for attachment """
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'file.gif'})
status, headers, content = self.env['ir.http'].binary_content(
id=attachment.id,
mimetype=None,
default_mimetype='application/octet-stream',
)
mimetype = dict(headers).get('Content-Type')
self.assertEqual(mimetype, 'image/gif')
def test_ir_http_mimetype_attachment_name(self):
""" Test mimetype for attachment with bad name"""
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'file.png'})
status, headers, content = self.env['ir.http'].binary_content(
id=attachment.id,
mimetype=None,
default_mimetype='application/octet-stream',
)
mimetype = dict(headers).get('Content-Type')
# TODO: fix and change it in master, should be image/gif
self.assertEqual(mimetype, 'image/png')
def test_ir_http_mimetype_basic_field(self):
""" Test mimetype for classic field """
partner = self.env['res.partner'].create({
'image_1920': GIF,
'name': 'Test mimetype basic field',
})
status, headers, content = self.env['ir.http'].binary_content(
model='res.partner',
id=partner.id,
field='image_1920',
default_mimetype='application/octet-stream',
)
mimetype = dict(headers).get('Content-Type')
self.assertEqual(mimetype, 'image/gif')
def test_ir_http_mimetype_computed_field(self):
""" Test mimetype for computed field wich resize picture"""
prop = self.env['ir.property'].create({
'fields_id': self.env['ir.model.fields'].search([], limit=1).id,
'name': "Property binary",
'value_binary': GIF,
'type': 'binary',
})
resized = odoo.tools.image_process(prop.value_binary, size=(64, 64))
# Simul computed field which resize and that is not attachement=True (E.G. on product)
prop.write({'value_binary': resized})
status, headers, content = self.env['ir.http'].binary_content(
model='ir.property',
id=prop.id,
field='value_binary',
default_mimetype='application/octet-stream',
)
mimetype = dict(headers).get('Content-Type')
self.assertEqual(mimetype, 'image/gif')
def test_ir_http_attachment_access(self):
""" Test attachment access with and without access token """
public_user = self.env.ref('base.public_user')
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'image.gif'
})
defaults = {
'id': attachment.id,
'default_mimetype': 'image/gif',
}
def test_access(**kwargs):
# DLE P69: `test_ir_http_attachment_access`
# `binary_content` relies on the `__last_update` to determine if a user has the read access to an attachment.
# as the attachment has just been created above as sudo, the data is in cache and if we don't remove it the below
# `test_access` wont have to fetch it and therefore wont raise the accesserror as its already in the cache
# `__last_update` must be removed from the cache when `test_access` is called, which happens and recompute the todos
attachment.flush()
attachment.invalidate_cache()
status, _, _ = self.env['ir.http'].with_user(public_user).binary_content(
**dict(defaults, **kwargs)
)
return status
status = test_access()
self.assertEqual(status, 403, "no access")
status = test_access(access_token=u'Secret')
self.assertEqual(status, 403,
"no access if access token for attachment without access token")
attachment.access_token = u'Secret'
status = test_access(access_token=u'Secret')
self.assertEqual(status, 200, "access for correct access token")
status = test_access(access_token=u'Wrong')
self.assertEqual(status, 403, "no access for wrong access token")
attachment.public = True
status = test_access()
self.assertEqual(status, 200, "access for attachment with access")
status = test_access(access_token=u'Wrong')
self.assertEqual(status, 403,
"no access for wrong access token for attachment with access")
attachment.unlink()
status = test_access()
self.assertEqual(status, 404, "no access for deleted attachment")
status = test_access(access_token=u'Secret')
self.assertEqual(status, 404,
"no access with access token for deleted attachment")
def test_ir_http_default_filename_extension(self):
""" Test attachment extension when the record has a dot in its name """
self.env.user.name = "Mr. John"
self.env.user.image_128 = GIF
_, _, filename, _, _ = self.env['ir.http']._binary_record_content(
self.env.user, 'image_128',
)
self.assertEqual(filename, "Mr. John.gif")
# For attachment, the name is considered to have the extension in the name
# and thus the extension should not be added again.
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'image.gif'
})
_, _, filename, _, _ = self.env['ir.http']._binary_record_content(
attachment,
)
self.assertEqual(filename, 'image.gif')
def test_ir_http_public_user_image(self):
public_user = self.env.ref('base.public_user')
code, *_ = self.env['ir.http']._binary_record_content(public_user.with_user(public_user), 'image_128')
self.assertEqual(code, 404)
| 39.775641 | 6,205 |
7,504 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.base.models.res_users import is_selection_groups, get_selection_groups
from odoo.tests.common import TransactionCase, Form, tagged
class TestUsers(TransactionCase):
def test_name_search(self):
""" Check name_search on user. """
User = self.env['res.users']
test_user = User.create({'name': 'Flad the Impaler', 'login': 'vlad'})
like_user = User.create({'name': 'Wlad the Impaler', 'login': 'vladi'})
other_user = User.create({'name': 'Nothing similar', 'login': 'nothing similar'})
all_users = test_user | like_user | other_user
res = User.name_search('vlad', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, test_user)
res = User.name_search('vlad', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, all_users)
res = User.name_search('', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, all_users)
res = User.name_search('', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, User)
res = User.name_search('lad', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, test_user | like_user)
res = User.name_search('lad', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, other_user)
def test_user_partner(self):
""" Check that the user partner is well created """
User = self.env['res.users']
Partner = self.env['res.partner']
Company = self.env['res.company']
company_1 = Company.create({'name': 'company_1'})
company_2 = Company.create({'name': 'company_2'})
partner = Partner.create({
'name': 'Bob Partner',
'company_id': company_2.id
})
# case 1 : the user has no partner
test_user = User.create({
'name': 'John Smith',
'login': 'jsmith',
'company_ids': [company_1.id],
'company_id': company_1.id
})
self.assertFalse(
test_user.partner_id.company_id,
"The partner_id linked to a user should be created without any company_id")
# case 2 : the user has a partner
test_user = User.create({
'name': 'Bob Smith',
'login': 'bsmith',
'company_ids': [company_1.id],
'company_id': company_1.id,
'partner_id': partner.id
})
self.assertEqual(
test_user.partner_id.company_id,
company_1,
"If the partner_id of a user has already a company, it is replaced by the user company"
)
def test_change_user_company(self):
""" Check the partner company update when the user company is changed """
User = self.env['res.users']
Company = self.env['res.company']
test_user = User.create({'name': 'John Smith', 'login': 'jsmith'})
company_1 = Company.create({'name': 'company_1'})
company_2 = Company.create({'name': 'company_2'})
test_user.company_ids += company_1
test_user.company_ids += company_2
# 1: the partner has no company_id, no modification
test_user.write({
'company_id': company_1.id
})
self.assertFalse(
test_user.partner_id.company_id,
"On user company change, if its partner_id has no company_id,"
"the company_id of the partner_id shall NOT be updated")
# 2: the partner has a company_id different from the new one, update it
test_user.partner_id.write({
'company_id': company_1.id
})
test_user.write({
'company_id': company_2.id
})
self.assertEqual(
test_user.partner_id.company_id,
company_2,
"On user company change, if its partner_id has already a company_id,"
"the company_id of the partner_id shall be updated"
)
@tagged('post_install', '-at_install')
class TestUsers2(TransactionCase):
def test_reified_groups(self):
""" The groups handler doesn't use the "real" view with pseudo-fields
during installation, so it always works (because it uses the normal
groups_id field).
"""
# use the specific views which has the pseudo-fields
f = Form(self.env['res.users'], view='base.view_users_form')
f.name = "bob"
f.login = "bob"
user = f.save()
self.assertIn(self.env.ref('base.group_user'), user.groups_id)
def test_selection_groups(self):
# create 3 groups that should be in a selection
app = self.env['ir.module.category'].create({'name': 'Foo'})
group1, group2, group0 = self.env['res.groups'].create([
{'name': name, 'category_id': app.id}
for name in ('User', 'Manager', 'Visitor')
])
# THIS PART IS NECESSARY TO REPRODUCE AN ISSUE: group1.id < group2.id < group0.id
self.assertLess(group1.id, group2.id)
self.assertLess(group2.id, group0.id)
# implication order is group0 < group1 < group2
group2.implied_ids = group1
group1.implied_ids = group0
groups = group0 + group1 + group2
# determine the name of the field corresponding to groups
fname = next(
name
for name in self.env['res.users'].fields_get()
if is_selection_groups(name) and group0.id in get_selection_groups(name)
)
self.assertCountEqual(get_selection_groups(fname), groups.ids)
# create a user
user = self.env['res.users'].create({'name': 'foo', 'login': 'foo'})
# put user in group0, and check field value
user.write({fname: group0.id})
self.assertEqual(user.groups_id & groups, group0)
self.assertEqual(user.read([fname])[0][fname], group0.id)
# put user in group1, and check field value
user.write({fname: group1.id})
self.assertEqual(user.groups_id & groups, group0 + group1)
self.assertEqual(user.read([fname])[0][fname], group1.id)
# put user in group2, and check field value
user.write({fname: group2.id})
self.assertEqual(user.groups_id & groups, groups)
self.assertEqual(user.read([fname])[0][fname], group2.id)
def test_read_group_with_reified_field(self):
""" Check that read_group gets rid of reified fields"""
User = self.env['res.users']
fnames = ['name', 'email', 'login']
# find some reified field name
reified_fname = next(
fname
for fname in User.fields_get()
if fname.startswith(('in_group_', 'sel_groups_'))
)
# check that the reified field name has no effect in fields
res_with_reified = User.read_group([], fnames + [reified_fname], ['company_id'])
res_without_reified = User.read_group([], fnames, ['company_id'])
self.assertEqual(res_with_reified, res_without_reified, "Reified fields should be ignored")
# Verify that the read_group is raising an error if reified field is used as groupby
with self.assertRaises(ValueError):
User.read_group([], fnames + [reified_fname], [reified_fname])
| 38.680412 | 7,504 |
2,213 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# This assumes an existing but uninitialized database.
from contextlib import contextmanager
import unittest
from odoo import api, registry, SUPERUSER_ID
from odoo.tests import common
from odoo.tests.common import BaseCase
from odoo.modules.registry import Registry
@contextmanager
def environment():
""" Return an environment with a new cursor for the current database; the
cursor is committed and closed after the context block.
"""
reg = registry(common.get_db_name())
with reg.cursor() as cr:
yield api.Environment(cr, SUPERUSER_ID, {})
MODULE = 'test_uninstall'
MODEL = 'test_uninstall.model'
class TestUninstall(BaseCase):
"""
Test the install/uninstall of a test module. The module is available in
`odoo.tests` which should be present in the addons-path.
"""
def test_01_install(self):
""" Check a few things showing the module is installed. """
with environment() as env:
module = env['ir.module.module'].search([('name', '=', MODULE)])
assert len(module) == 1
module.button_install()
Registry.new(common.get_db_name(), update_module=True)
with environment() as env:
self.assertIn('test_uninstall.model', env.registry)
self.assertTrue(env['ir.model.data'].search([('module', '=', MODULE)]))
self.assertTrue(env['ir.model.fields'].search([('model', '=', MODEL)]))
def test_02_uninstall(self):
""" Check a few things showing the module is uninstalled. """
with environment() as env:
module = env['ir.module.module'].search([('name', '=', MODULE)])
assert len(module) == 1
module.button_uninstall()
Registry.new(common.get_db_name(), update_module=True)
with environment() as env:
self.assertNotIn('test_uninstall.model', env.registry)
self.assertFalse(env['ir.model.data'].search([('module', '=', MODULE)]))
self.assertFalse(env['ir.model.fields'].search([('model', '=', MODEL)]))
if __name__ == '__main__':
unittest.main()
| 34.578125 | 2,213 |
54,763 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import collections
import json
import os.path
import re
import markupsafe
from lxml import etree, html
from lxml.builder import E
from odoo.modules import get_module_resource
from odoo.tests.common import TransactionCase
from odoo.addons.base.models.qweb import QWebException
from odoo.tools import misc, mute_logger
from odoo.tools.json import scriptsafe as json_scriptsafe
unsafe_eval = eval
class TestQWebTField(TransactionCase):
def setUp(self):
super(TestQWebTField, self).setUp()
self.env_branding = self.env(context={'inherit_branding': True})
self.engine = self.env_branding['ir.qweb']
def test_trivial(self):
field = etree.Element('span', {'t-field': u'company.name'})
company = self.env['res.company'].create({'name': "My Test Company"})
result = self.engine._render(field, {'company': company})
self.assertEqual(
etree.fromstring(result),
etree.fromstring(u'<span data-oe-model="res.company" data-oe-id="%d" '
u'data-oe-field="name" data-oe-type="char" '
u'data-oe-expression="company.name">%s</span>' % (
company.id,
u"My Test Company",
)),
)
def test_i18n(self):
field = etree.Element('span', {'t-field': u'company.name'})
s = u"Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20% off!"
company = self.env['res.company'].create({'name': s})
result = self.engine._render(field, {'company': company})
self.assertEqual(
etree.fromstring(result),
etree.fromstring(u'<span data-oe-model="res.company" data-oe-id="%d" '
u'data-oe-field="name" data-oe-type="char" '
u'data-oe-expression="company.name">%s</span>' % (
company.id,
misc.html_escape(s),
)),
)
def test_reject_crummy_tags(self):
field = etree.Element('td', {'t-field': u'company.name'})
with self.assertRaisesRegex(QWebException, r'^RTE widgets do not work correctly'):
self.engine._render(field, {'company': None})
def test_reject_t_tag(self):
field = etree.Element('t', {'t-field': u'company.name'})
with self.assertRaisesRegex(QWebException, r'^t-field can not be used on a t element'):
self.engine._render(field, {'company': None})
def test_render_t_options(self):
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': """
<t t-name="base.dummy"><root><span t-esc="5" t-options="{'widget': 'char'}" t-options-widget="'float'" t-options-precision="4"/></root></t>
"""
})
text = etree.fromstring(view1._render()).find('span').text
self.assertEqual(text, u'5.0000')
def test_xss_breakout(self):
view = self.env['ir.ui.view'].create({
'name': 'dummy', 'type': 'qweb',
'arch': """
<t t-name="base.dummy">
<root>
<script type="application/javascript">
var s = <t t-esc="json.dumps({'key': malicious})"/>;
</script>
</root>
</t>
"""
})
rendered = view._render({'malicious': '1</script><script>alert("pwned")</script><script>'})
self.assertIn('alert', rendered, "%r doesn't seem to be rendered" % rendered)
doc = etree.fromstring(rendered)
self.assertEqual(len(doc.xpath('//script')), 1)
def test_default_value(self):
Partner = self.env['res.partner']
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="out-field-default">
<div t-field="record.name">
DEFAULT
<t t-out="'Text'" />
</div>
</t>''',
})
# record.name is non-empty
result = """
<div>My Company</div>
"""
rendered = self.env['ir.qweb']._render(t.id, {
'record': Partner.new({'name': 'My Company'})
})
self.assertEqual(str(rendered.strip()), result.strip(), "")
# record.name is empty but not False or None, we should render depending on force_display
result = """
<div></div>
"""
rendered = self.env['ir.qweb']._render(t.id, {
'record': Partner.new({'name': ''})
})
self.assertEqual(str(rendered.strip()), result.strip())
# record.name is False or None, we should render field default value
result = """
<div>
DEFAULT
Text
</div>
"""
rendered = self.env['ir.qweb']._render(t.id, {
'record': Partner.new({})
})
self.assertEqual(str(rendered.strip()), result.strip())
def test_no_value_no_default_value(self):
# no value, no default value with attributes on t-field
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="out-field-default">
<div t-field="record.name"/>
</t>''',
})
result = """
<div data-oe-model="res.partner" data-oe-field="name" data-oe-type="char" data-oe-expression="record.name" data-oe-xpath="/t[1]/div[1]"></div>
"""
# inherit_branding puts attribute on the field tag as well as force the display in case the field is empty
rendered = self.env['ir.qweb'].with_context(inherit_branding=True)._render(t.id, {
'record': self.env['res.partner'].new({}),
})
self.assertEqual(str(rendered.strip()), result.strip())
class TestQWebNS(TransactionCase):
def test_render_static_xml_with_namespace(self):
""" Test the rendering on a namespaced view with no static content. The resulting string should be untouched.
"""
expected_result = """
<root>
<h:table xmlns:h="http://www.example.org/table">
<h:tr>
<h:td xmlns:h="http://www.w3.org/TD/html4/">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="http://www.example.org/furniture">
<f:width>80</f:width>
</f:table>
</root>
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': """
<t t-name="base.dummy">%s</t>
""" % expected_result
})
self.assertEqual(etree.fromstring(view1._render()), etree.fromstring(expected_result))
def test_render_static_xml_with_namespace_2(self):
""" Test the rendering on a namespaced view with no static content. The resulting string should be untouched.
"""
expected_result = """
<html xmlns="http://www.w3.org/HTML/1998/html4" xmlns:xdc="http://www.xml.com/books">
<head>
<title>Book Review</title>
</head>
<body>
<xdc:bookreview>
<xdc:title>XML: A Primer</xdc:title>
<table>
<tr align="center">
<td>Author</td><td>Price</td>
<td>Pages</td><td>Date</td>
</tr>
<tr align="left">
<td><xdc:author>Simon St. Laurent</xdc:author></td>
<td><xdc:price>31.98</xdc:price></td>
<td><xdc:pages>352</xdc:pages></td>
<td><xdc:date>1998/01</xdc:date></td>
</tr>
</table>
</xdc:bookreview>
</body>
</html>
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': """
<t t-name="base.dummy">%s</t>
""" % expected_result
})
self.assertEqual(etree.fromstring(view1._render()), etree.fromstring(expected_result))
def test_render_static_xml_with_useless_distributed_namespace(self):
""" Test that redundant namespaces are stripped upon rendering.
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': """
<t t-name="base.dummy">
<root>
<h:table xmlns:h="http://www.example.org/table">
<h:tr xmlns:h="http://www.example.org/table">
<h:td xmlns:h="http://www.w3.org/TD/html4/">Apples</h:td>
<h:td xmlns:h="http://www.example.org/table">Bananas</h:td>
</h:tr>
</h:table>
</root>
</t>
"""
})
expected_result = etree.fromstring(u"""
<root>
<h:table xmlns:h="http://www.example.org/table">
<h:tr>
<h:td xmlns:h="http://www.w3.org/TD/html4/">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
</root>
""")
self.assertEqual(etree.fromstring(view1._render()), expected_result)
def test_render_static_xml_with_namespace_3(self):
expected_result = u"""
<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv32.xsd"></cfdi:Comprobante>
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">%s</t>
""" % expected_result
})
self.assertEqual(etree.fromstring(view1._render()), etree.fromstring(expected_result))
def test_render_static_xml_with_namespace_dynamic(self):
""" Test the rendering on a namespaced view with dynamic URI (need default namespace uri).
"""
tempate = u"""
<root xmlns:h="https://default.namespace.url/h">
<h:table t-att="{'xmlns:h': h1}">
<h:tr>
<h:td t-att="{'xmlns:h': h2}">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
</root>
"""
expected_result = u"""
<root xmlns:h="https://default.namespace.url/h">
<h:table xmlns:h="%(h1)s">
<h:tr>
<h:td xmlns:h="%(h2)s">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
</root>
"""
values = dict(h1="http://www.example.org/table", h2="http://www.w3.org/TD/html4/")
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">%s</t>
""" % tempate
})
rendering = view1._render(values, engine='ir.qweb')
self.assertEqual(etree.fromstring(rendering), etree.fromstring(expected_result % values))
def test_render_static_xml_with_namespace_dynamic_2(self):
""" Test the rendering on a namespaced view with dynamic URI (need default namespace uri).
Default URIs must be differents.
"""
tempate = u"""
<root xmlns:f="https://default.namespace.url/f" xmlns:h="https://default.namespace.url/h" >
<h:table t-att="{'xmlns:h': h1}">
<h:tr>
<h:td t-att="{'xmlns:h': h2}">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table t-att="{'xmlns:f': f}">
<f:width>80</f:width>
</f:table>
</root>
"""
expected_result = u"""
<root xmlns:f="https://default.namespace.url/f" xmlns:h="https://default.namespace.url/h">
<h:table xmlns:h="%(h1)s">
<h:tr>
<h:td xmlns:h="%(h2)s">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="%(f)s">
<f:width>80</f:width>
</f:table>
</root>
"""
values = dict(h1="http://www.example.org/table", h2="http://www.w3.org/TD/html4/", f="http://www.example.org/furniture")
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">%s</t>
""" % tempate
})
rendering = view1._render(values, engine='ir.qweb')
self.assertEqual(etree.fromstring(rendering), etree.fromstring(expected_result % values))
def test_render_dynamic_xml_with_namespace_t_esc(self):
""" Test that rendering a template containing a node having both an ns declaration and a t-esc attribute correctly
handles the t-esc attribute and keep the ns declaration.
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<Invoice xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2" t-esc="'test'"/>
</t>
"""
})
expected_result = etree.fromstring(u"""<Invoice xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2">test</Invoice>""")
self.assertEqual(etree.fromstring(view1._render()), expected_result)
def test_render_dynamic_xml_with_namespace_t_esc_with_useless_distributed_namespace(self):
""" Test that rendering a template containing a node having both an ns declaration and a t-esc attribute correctly
handles the t-esc attribute and keep the ns declaration, and distribute correctly the ns declaration to its children.
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<Invoice xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2" t-attf-test="test">
<cac:Test xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2">blabla</cac:Test>
</Invoice>
</t>
"""
})
expected_result = etree.fromstring(u"""
<Invoice xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2" test="test">
<cac:Test>blabla</cac:Test>
</Invoice>
""")
self.assertEqual(etree.fromstring(view1._render()), expected_result)
def test_render_dynamic_xml_with_namespace_t_attf(self):
""" Test that rendering a template containing a node having both an ns declaration and a t-attf attribute correctly
handles the t-attf attribute and keep the ns declaration.
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<root>
<h:table xmlns:h="http://www.example.org/table">
<h:tr>
<h:td xmlns:h="http://www.w3.org/TD/html4/">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="http://www.example.org/furniture">
<f:width t-attf-test="1">80</f:width>
</f:table>
</root>
</t>
"""
})
expected_result = etree.fromstring(u"""
<root>
<h:table xmlns:h="http://www.example.org/table">
<h:tr>
<h:td xmlns:h="http://www.w3.org/TD/html4/">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="http://www.example.org/furniture">
<f:width test="1">80</f:width>
</f:table>
</root>
""")
self.assertEqual(etree.fromstring(view1._render()), expected_result)
def test_render_dynamic_xml_with_namespace_t_attf_with_useless_distributed_namespace(self):
""" Test that rendering a template containing a node having both an ns declaration and a t-attf attribute correctly
handles the t-attf attribute and that redundant namespaces are stripped upon rendering.
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<root>
<h:table xmlns:h="http://www.example.org/table">
<h:tr>
<h:td xmlns:h="http://www.w3.org/TD/html4/">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="http://www.example.org/furniture">
<f:width xmlns:f="http://www.example.org/furniture" t-attf-test="1">80</f:width>
</f:table>
</root>
</t>
"""
})
expected_result = etree.fromstring(u"""
<root>
<h:table xmlns:h="http://www.example.org/table">
<h:tr>
<h:td xmlns:h="http://www.w3.org/TD/html4/">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="http://www.example.org/furniture">
<f:width test="1">80</f:width>
</f:table>
</root>
""")
self.assertEqual(etree.fromstring(view1._render()), expected_result)
def test_render_dynamic_xml_with_namespace_2(self):
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<Invoice xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2" xmlns:cbc="urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2" xmlns="urn:oasis:names:specification:ubl:schema:xsd:Invoice-2">
<cbc:UBLVersionID t-esc="version_id"/>
<t t-foreach="[1, 2, 3, 4]" t-as="value">
Oasis <cac:Test t-esc="value"/>
</t>
</Invoice>
</t>
"""
})
expected_result = etree.fromstring(u"""
<Invoice xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2" xmlns:cbc="urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2" xmlns="urn:oasis:names:specification:ubl:schema:xsd:Invoice-2">
<cbc:UBLVersionID>1.0</cbc:UBLVersionID>
Oasis <cac:Test>1</cac:Test>
Oasis <cac:Test>2</cac:Test>
Oasis <cac:Test>3</cac:Test>
Oasis <cac:Test>4</cac:Test>
</Invoice>
""")
self.assertEqual(etree.fromstring(view1._render({'version_id': 1.0})), expected_result)
def test_render_static_xml_with_namespaced_attributes(self):
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv32.xsd">abc</cfdi:Comprobante>
</t>
"""
})
expected_result = etree.fromstring(u"""<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv32.xsd">abc</cfdi:Comprobante>""")
self.assertEqual(etree.fromstring(view1._render()), expected_result)
def test_render_dynamic_xml_with_namespaced_attributes(self):
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv32.xsd" t-esc="'abc'"/>
</t>
"""
})
expected_result = etree.fromstring("""<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv32.xsd">abc</cfdi:Comprobante>""")
self.assertEqual(etree.fromstring(view1._render()), expected_result)
def test_render_static_xml_with_t_call(self):
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<cac:fruit xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2"
xmlns:cbc="urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2">
<cac:table>
<cbc:td>Appel</cbc:td>
<cbc:td>Pineappel</cbc:td>
</cac:table>
</cac:fruit>
</t>
"""
})
self.env.cr.execute("INSERT INTO ir_model_data(name, model, res_id, module)"
"VALUES ('dummy', 'ir.ui.view', %s, 'base')", [view1.id])
# view2 will t-call view1
view2 = self.env['ir.ui.view'].create({
'name': "dummy2",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy2">
<root xmlns="urn:oasis:names:specification:ubl:schema:xsd:Invoice-2" xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2" xmlns:cbc="urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2">
<cac:line t-foreach="[1, 2]" t-as="i" t-call="base.dummy"/>
</root>
</t>
"""
})
result = view2._render()
result_etree = etree.fromstring(result)
# check that the root tag has all its xmlns
expected_ns = {
(None, 'urn:oasis:names:specification:ubl:schema:xsd:Invoice-2'),
('cac', 'urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2'),
('cbc', 'urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2'),
}
self.assertEqual(set(result_etree.nsmap.items()), expected_ns)
# check that the t-call did its work
cac_lines = result_etree.findall('.//cac:line', namespaces={'cac': 'urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2'})
self.assertEqual(len(cac_lines), 2)
self.assertEqual(result.count('Appel'), 2)
# check that the t-call dit not output again the xmlns declaration
self.assertEqual(result.count('xmlns:cbc="urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2"'), 1)
def test_render_static_xml_with_extension(self):
""" Test the extension of a view by an xpath expression on a ns prefixed element.
"""
# primary view
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<root>
<h:table xmlns:h="http://www.example.org/table">
<h:tr>
<h:td xmlns:h="http://www.w3.org/TD/html4/">Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
</root>
</t>
"""
})
# extension patching the primary view
view2 = self.env['ir.ui.view'].create({
'name': "dummy_ext",
'type': 'qweb',
'inherit_id': view1.id,
'arch': u"""
<xpath expr="//{http://www.example.org/table}table/{http://www.example.org/table}tr">
<h:td xmlns:h="http://www.example.org/table">Oranges</h:td>
</xpath>
"""
})
expected_result = etree.fromstring(u"""
<root>
<h:table xmlns:h="http://www.example.org/table">
<h:tr>
<h:td xmlns:h="http://www.w3.org/TD/html4/">Apples</h:td>
<h:td>Bananas</h:td>
<h:td>Oranges</h:td>
</h:tr>
</h:table>
</root>
""")
self.assertEqual(
etree.fromstring(view1.with_context(check_view_ids=[view1.id, view2.id])._render()),
expected_result
)
def test_render_dynamic_xml_with_code_error(self):
""" Test that, when rendering a template containing a namespaced node
that evaluates code with errors, the proper exception is raised
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': u"""
<t t-name="base.dummy">
<Invoice xmlns:od="http://odoo.com/od">
<od:name t-att-test="'a' + 1"/>
</Invoice>
</t>
"""
})
try:
"" + 0
except TypeError as e:
error_msg = e.args[0]
with self.assertRaises(QWebException, msg=error_msg):
view1._render()
def test_render_t_call_propagates_t_lang(self):
current_lang = 'en_US'
other_lang = 'fr_FR'
lang = self.env['res.lang']._activate_lang(other_lang)
lang.write({
'decimal_point': '*',
'thousands_sep': '/'
})
view1 = self.env['ir.ui.view'].create({
'name': "callee",
'type': 'qweb',
'arch': u"""
<t t-name="base.callee">
<t t-esc="9000000.00" t-options="{'widget': 'float', 'precision': 2}" />
</t>
"""
})
self.env['ir.model.data'].create({
'name': 'callee',
'model': 'ir.ui.view',
'module': 'base',
'res_id': view1.id,
})
view2 = self.env['ir.ui.view'].create({
'name': "calling",
'type': 'qweb',
'arch': u"""
<t t-name="base.calling">
<t t-call="base.callee" t-lang="'%s'" />
</t>
""" % other_lang
})
rendered = view2.with_context(lang=current_lang)._render().strip()
self.assertEqual(rendered, '9/000/000*00')
def test_render_barcode(self):
partner = self.env['res.partner'].create({
'name': 'bacode_test',
'barcode': 'test'
})
view = self.env['ir.ui.view'].create({
'name': "a_barcode_view",
'type': 'qweb',
})
view.arch = u"""<div t-field="partner.barcode" t-options="{'widget': 'barcode', 'width': 100, 'height': 30}"/>"""
rendered = view._render(values={'partner': partner}).strip()
self.assertRegex(rendered, r'<div><img alt="Barcode test" src="data:image/png;base64,\S+"></div>')
partner.barcode = '4012345678901'
view.arch = u"""<div t-field="partner.barcode" t-options="{'widget': 'barcode', 'symbology': 'EAN13', 'width': 100, 'height': 30, 'img_style': 'width:100%;', 'img_alt': 'Barcode'}"/>"""
ean_rendered = view._render(values={'partner': partner}).strip()
self.assertRegex(ean_rendered, r'<div><img style="width:100%;" alt="Barcode" src="data:image/png;base64,\S+"></div>')
view.arch = u"""<div t-field="partner.barcode" t-options="{'widget': 'barcode', 'symbology': 'auto', 'width': 100, 'height': 30, 'img_style': 'width:100%;', 'img_alt': 'Barcode'}"/>"""
auto_rendered = view._render(values={'partner': partner}).strip()
self.assertRegex(auto_rendered, r'<div><img style="width:100%;" alt="Barcode" src="data:image/png;base64,\S+"></div>')
class TestQWebBasic(TransactionCase):
def test_compile_expr(self):
tests = [
#pylint: disable=C0326
# source, values, result
("1 +2+ 3", {}, 6),
("(((1 +2+ 3)))", {}, 6),
("(1) +(2+ (3))", {}, 6),
("a == 5", {'a': 5}, True),
("{'a': True}", {}, {'a': True}),
("object.count(1)", {'object': [1, 2, 1 ,1]}, 3),
("dict(a=True)", {}, {'a': True}),
("fn(a=11, b=22) or a", {'a': 1, 'fn': lambda a,b: 0}, 1),
("fn(a=11, b=22) or a", {'a': 1, 'fn': lambda a,b: b}, 22),
("(lambda a: a)(5)", {}, 5),
("(lambda a: a[0])([5])", {}, 5),
("(lambda test: len(test))('aaa')", {}, 3),
("{'a': lambda a: a[0], 'b': 3}['a']([5])", {}, 5),
("list(map(lambda a: a[0], r))", {'r': [(1,11), (2,22)]}, [1, 2]),
("z + (head or 'z')", {'z': 'a'}, "az"),
("z + (head or 'z')", {'z': 'a', 'head': 'b'}, "ab"),
("{a:b for a, b in [(1,11), (2, 22)]}", {}, {1: 11, 2: 22}),
("any({x == 2 for x in [1,2,3]})", {}, True),
("any({x == 5 for x in [1,2,3]})", {}, False),
("{x:y for x,y in [('a', 11),('b', 22)]}", {}, {'a': 11, 'b': 22}),
("[(y,x) for x,y in [(1, 11),(2, 22)]]", {}, [(11, 1), (22, 2)]),
("(lambda a: a + 5)(x)", {'x': 10}, 15),
("(lambda a: a + x)(5)", {'x': 10}, 15),
("sum(x for x in range(4)) + ((x))", {'x': 10}, 16),
("['test_' + x for x in ['a', 'b']]", {}, ['test_a', 'test_b'])
]
IrQweb = self.env['ir.qweb']
for expr, q_values, result in tests:
expr_namespace = IrQweb._compile_expr(expr)
compiled = compile("""def test(values):\n values['result'] = %s""" % expr_namespace, '<test>', 'exec')
globals_dict = IrQweb._prepare_globals({}, {})
values = {}
unsafe_eval(compiled, globals_dict, values)
test = values['test']
test(q_values)
q_result = dict(q_values, result=result)
self.assertDictEqual(q_values, q_result, "Should compile: %s" % expr)
def test_compile_expr_security(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-escaping">
<div>
<t t-set="o" t-value="(lambda a=open: a)()"/>
<t t-out="o('/etc/passwd').read()"/>
</div>
</t>'''
})
values = {'other': 'any value'}
with self.assertRaises(Exception): # NotImplementedError for 'lambda a=open' and Undefined value 'open'.
self.env['ir.qweb']._render(t.id, values)
def test_foreach_iter_list(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="iter-list">
<t t-foreach="[3, 2, 1]" t-as="item">
[<t t-esc="item_index"/>: <t t-esc="item"/> <t t-esc="item_value"/>]</t>
</t>'''
})
result = u"""
[0: 3 3]
[1: 2 2]
[2: 1 1]
"""
rendered = self.env['ir.qweb']._render(t.id)
self.assertEqual(rendered.strip(), result.strip())
def test_foreach_iter_dict(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="iter-dict">
<t t-foreach="{'a': 3, 'b': 2, 'c': 1}" t-as="item">
[<t t-esc="item_index"/>: <t t-esc="item"/> <t t-esc="item_value"/>]</t>
</t>'''
})
result = u"""
[0: a 3]
[1: b 2]
[2: c 1]
"""
rendered = self.env['ir.qweb']._render(t.id)
self.assertEqual(rendered.strip(), result.strip())
def test_att_escaping_1(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-escaping">
<div t-att-bibi="json.dumps(bibi)">1</div>
<div t-att-toto="toto">2</div>
</t>'''
})
result = """
<div bibi="{"a": "string", "b": 1}">1</div>
<div toto="a'b"c">2</div>
"""
values = {'json': json_scriptsafe, 'bibi': dict(a='string', b=1), 'toto': "a'b\"c"}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_att_escaping_2(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-escaping">
<t t-set="abc"> <t t-if="add_abc"><t t-out="add_abc"/> <span a="b"> | </span></t><t t-out="efg"/> </t>
<div t-att-abc="abc">123</div>
</t>'''
})
result = """
<div abc=" &#34;yes&#34; <span a="b"> | </span>-efg- ">123</div>
"""
values = {'add_abc': '"yes"', 'efg': '-efg-'}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_attf_escaping_1(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-escaping">
<div t-attf-bibi="a, b > c > #{d}">1</div>
</t>'''
})
result = """
<div bibi="a, b > c > a' > b"c">1</div>
"""
values = {'d': "a' > b\"c"}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_attf_escaping_2(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-escaping">
<a t-attf-href="/link/#{ url }/#{other and 'sub'}">link</a>
<a t-attf-href="/link/#{ url }/#{(not other) and 'sub'}">link2</a>
</t>'''
})
result = """
<a href="/link/odoo/sub">link</a>
<a href="/link/odoo/">link2</a>
"""
values = {'url': 'odoo', 'other': True}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_attf_escaping_3(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-escaping">
<div t-attf-abc="abc #{val} { other }">123</div>
</t>'''
})
result = """
<div abc="abc "yes" { other }">123</div>
"""
values = {'val': '"yes"'}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_set_body_1(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-set">
<t t-set="abc"> <span a="b"> [%s] </span> </t>
<div t-att-abc="abc % add_abc">123</div>
</t>'''
})
result = """
<div abc=" <span a="b"> [&#34;yes&#34;] </span> ">123</div>
"""
values = {'add_abc': '"yes"'}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_set_body_2(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-set">
<t t-set="abc"> <span a="b"> toto </span> </t>
<div t-att-abc="'[%s]' % abc">123</div>
<div class="a1" t-out="abc"/>
<div class="a2" t-out="'[%s]' % abc"/>
</t>'''
})
result = """
<div abc="[ <span a="b"> toto </span> ]">123</div>
<div class="a1"> <span a="b"> toto </span> </div>
<div class="a2">[ <span a="b"> toto </span> ]</div>
"""
rendered = self.env['ir.qweb']._render(t.id)
self.assertEqual(rendered.strip(), result.strip())
def test_out_format_1(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="out-format">
<t t-set="final_message">Powered by %s%s</t>
<div t-out="final_message % (a, b and ('-%s' % b) or '')"/>
</t>'''
})
result = u"""
<div>Powered by 1-2</div>
"""
rendered = self.env['ir.qweb']._render(t.id, {'a': 1, 'b': 2})
self.assertEqual(rendered.strip(), result.strip())
def test_out_format_2(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-set">
<t t-set="toto">Toto %s</t>
<t t-set="abc"> <span a="b"> [%s , %s] </span> </t>
<div t-out="(abc % (add_abc, toto)) % 5">123</div>
</t>'''
})
result = """
<div> <span a="b"> ["yes" , Toto 5] </span> </div>
"""
values = {'add_abc': '"yes"'}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_out_format_3(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-set">
<t t-set="toto">Toto %s</t>
<t t-set="abc"> <span a="b"> a </span> </t>
<div t-out="(toto + abc) % v">123</div>
</t>'''
})
result = """
<div>Toto "yes" <span a="b"> a </span> </div>
"""
values = {'v': '"yes"'}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_out_format_4(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-set">
<t t-set="abc"> <span a="b"> a </span> </t>
<div t-out="(v + abc)">123</div>
</t>'''
})
result = """
<div>"yes" <span a="b"> a </span> </div>
"""
values = {'v': '"yes"'}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_out_format_5(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-set">
<t t-set="abc"> <span a="b"> a </span> </t>
<div t-out="(abc + v)">123</div>
</t>'''
})
result = """
<div> <span a="b"> a </span> "yes"</div>
"""
values = {'v': '"yes"'}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_out_format_6(self):
# Use str method will use the string value. t-out will escape this str
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-set">
<t t-set="abc"> <span a="b"> a </span> </t>
<div t-out="(abc.strip() + v)">123</div>
</t>'''
})
result = """
<div><span a="b"> a </span>"yes"</div>
"""
values = {'v': '"yes"'}
rendered = self.env['ir.qweb']._render(t.id, values)
self.assertEqual(rendered.strip(), result.strip())
def test_out_escape_text(self):
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': 'qweb',
'arch': """
<t t-name="base.dummy"><root><span t-out="text" t-options-widget="'text'"/></root></t>
"""
})
html = view1._render({'text': """a
b <b>c</b>"""})
self.assertEqual(html, """<root><span data-oe-type="text" data-oe-expression="text">a<br>
b <b>c</b></span></root>""")
def test_out_markup(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="esc-markup">
<t t-set="content"><span>toto</span></t>
<div t-out="content"/>
</t>'''
})
result = """
<div><span>toto</span></div>
"""
rendered = self.env['ir.qweb']._render(t.id, {})
self.assertEqual(rendered.strip(), result.strip())
def test_out_default_value(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="out-default">
<span rows="10" t-out="a">
DEFAULT
<t t-out="'Text'" />
</span>
</t>'''
})
result = """
<span rows="10">Hello</span>
"""
rendered = self.env['ir.qweb']._render(t.id, {'a': 'Hello'})
self.assertEqual(str(rendered.strip()), result.strip())
result = """
<span rows="10">
DEFAULT
Text
</span>
"""
rendered = self.env['ir.qweb']._render(t.id, {})
self.assertEqual(str(rendered.strip()), result.strip())
def test_esc_markup(self):
# t-esc is equal to t-out
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="esc-markup">
<t t-set="content"><span>toto</span></t>
<div t-esc="content"/>
</t>'''
})
ref = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="esc-markup">
<t t-set="content"><span>toto</span></t>
<div t-out="content"/>
</t>'''
})
rendered = self.env['ir.qweb']._render(t.id, {})
result = self.env['ir.qweb']._render(ref.id, {})
self.assertEqual(rendered.strip(), result.strip())
def test_if_from_body(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="attr-set">
<t t-set="abc"> <span a="b"> a </span> </t>
<div t-if="abc">123</div>
<div t-if="not abc">456</div>
</t>'''
})
result = """
<div>123</div>
"""
rendered = self.env['ir.qweb']._render(t.id)
self.assertEqual(rendered.strip(), result.strip())
def test_error_message_1(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="test">
<section>
<div t-esc="abc + def">
<span>content</span>
</div>
</section>
</t>'''
})
with self.assertRaises(QWebException):
self.env['ir.qweb']._render(t.id)
try:
self.env['ir.qweb']._render(t.id)
except QWebException as e:
self.assertIn('<div t-esc="abc + def"/>', e.message)
def test_error_message_2(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="test">
<section>
<div t-esc="abc + def + (">
<span>content</span>
</div>
</section>
</t>'''
})
with self.assertRaises(QWebException):
self.env['ir.qweb']._render(t.id)
try:
self.env['ir.qweb']._render(t.id)
except QWebException as e:
self.assertIn('Cannot compile expression', e.message)
self.assertIn('<div t-esc="abc + def + ("/>', e.message)
from copy import deepcopy
class FileSystemLoader(object):
def __init__(self, path):
# TODO: support multiple files #add_file() + add cache
self.path = path
self.doc = etree.parse(path).getroot()
def __iter__(self):
for node in self.doc:
name = node.get('t-name')
if name:
yield name
def __call__(self, name, options):
for node in self.doc:
if node.get('t-name') == name:
root = etree.Element('templates')
root.append(deepcopy(node))
arch = etree.tostring(root, encoding='unicode')
return (arch, name)
class TestQWebStaticXml(TransactionCase):
matcher = re.compile(r'^qweb-test-(.*)\.xml$')
@classmethod
def get_cases(cls):
path = cls.qweb_test_file_path()
return (
cls("test_qweb_{}".format(cls.matcher.match(f).group(1)))
for f in os.listdir(path)
# js inheritance
if f != 'qweb-test-extend.xml'
if cls.matcher.match(f)
)
@classmethod
def qweb_test_file_path(cls):
return os.path.dirname(get_module_resource('web', 'static', 'lib', 'qweb', 'qweb2.js'))
def __getattr__(self, item):
if not item.startswith('test_qweb_'):
raise AttributeError("No {} on {}".format(item, self))
f = 'qweb-test-{}.xml'.format(item[10:])
path = self.qweb_test_file_path()
return lambda: self.run_test_file(os.path.join(path, f))
@mute_logger('odoo.addons.base.models.qweb') # tests t-raw which is deprecated
def run_test_file(self, path):
self.env.user.tz = 'Europe/Brussels'
doc = etree.parse(path).getroot()
loader = FileSystemLoader(path)
qweb = self.env['ir.qweb']
for template in loader:
if not template or template.startswith('_'):
continue
param = doc.find('params[@id="{}"]'.format(template))
# OrderedDict to ensure JSON mappings are iterated in source order
# so output is predictable & repeatable
params = {} if param is None else json.loads(param.text, object_pairs_hook=collections.OrderedDict)
params.setdefault('__keep_empty_lines', True)
result = doc.find('result[@id="{}"]'.format(template)).text
self.assertEqual(
qweb._render(template, values=params, load=loader).strip(),
(result or u'').strip().replace('"', '"'),
template
)
def load_tests(loader, suite, _):
# can't override TestQWebStaticXml.__dir__ because dir() called on *class* not
# instance
suite.addTests(TestQWebStaticXml.get_cases())
return suite
class TestPageSplit(TransactionCase):
# need to explicitly assertTreesEqual because I guess it's registered for
# equality between _Element *or* HtmlElement but we're comparing a parsed
# HtmlElement and a convenience _Element
def test_split_before(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name='test'>
<div>
<table>
<tr></tr>
<tr data-pagebreak="before"></tr>
<tr></tr>
</table>
</div>
</t>
'''
})
rendered = html.fromstring(self.env['ir.qweb']._render(t.id))
ref = E.div(
E.table(E.tr()),
E.div({'style': 'page-break-after: always'}),
E.table(E.tr({'data-pagebreak': 'before'}), E.tr())
)
self.assertTreesEqual(rendered, ref)
def test_split_after(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name='test'>
<div>
<table>
<tr></tr>
<tr data-pagebreak="after"></tr>
<tr></tr>
</table>
</div>
</t>
'''
})
rendered = html.fromstring(self.env['ir.qweb']._render(t.id))
self.assertTreesEqual(
rendered,
E.div(
E.table(E.tr(), E.tr({'data-pagebreak': 'after'})),
E.div({'style': 'page-break-after: always'}),
E.table(E.tr())
)
)
def test_dontsplit(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name='test'>
<div>
<table>
<tr></tr>
<tr></tr>
<tr></tr>
</table>
</div>
</t>
'''
})
rendered = html.fromstring(self.env['ir.qweb']._render(t.id))
self.assertTreesEqual(
rendered,
E.div(E.table(E.tr(), E.tr(), E.tr()))
)
class TestEmptyLines(TransactionCase):
arch = '''<t t-name='test'>
<div>
</div>
</t>'''
def test_no_empty_lines(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': self.arch
})
rendered = self.env['ir.qweb']._render(t.id)
self.assertFalse(re.compile('^\s+\n').match(rendered))
self.assertFalse(re.compile('\n\s+\n').match(rendered))
def test_keep_empty_lines(self):
t = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': self.arch
})
rendered = self.env['ir.qweb']._render(t.id, {'__keep_empty_lines': True})
self.assertTrue(re.compile('^\s+\n').match(rendered))
self.assertTrue(re.compile('\n\s+\n').match(rendered))
class TestQWebMisc(TransactionCase):
def test_render_comment_tail(self):
""" Test the rendering of a tail text, near a comment.
"""
view1 = self.env['ir.ui.view'].create({
'name': "dummy",
'type': "qweb",
'arch': """
<t>
<!-- it is a comment -->
<!-- it is another comment -->
Text 1
<!-- it is still another comment -->
Text 2
<t>ok</t>
</t>
"""
})
emptyline = '\n '
expected = markupsafe.Markup('Text 1' + emptyline + 'Text 2' + emptyline + 'ok')
self.assertEqual(view1._render(), expected)
| 39.705584 | 54,754 |
17,620 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import defaultdict
import psycopg2
from odoo.exceptions import AccessError, MissingError
from odoo.tests.common import TransactionCase
from odoo.tools import mute_logger
from odoo import Command
class TestORM(TransactionCase):
""" test special behaviors of ORM CRUD functions """
@mute_logger('odoo.models')
def test_access_deleted_records(self):
""" Verify that accessing deleted records works as expected """
c1 = self.env['res.partner.category'].create({'name': 'W'})
c2 = self.env['res.partner.category'].create({'name': 'Y'})
c1.unlink()
# read() is expected to skip deleted records because our API is not
# transactional for a sequence of search()->read() performed from the
# client-side... a concurrent deletion could therefore cause spurious
# exceptions even when simply opening a list view!
# /!\ Using unprileged user to detect former side effects of ir.rules!
user = self.env['res.users'].create({
'name': 'test user',
'login': 'test2',
'groups_id': [Command.set([self.ref('base.group_user')])],
})
cs = (c1 + c2).with_user(user)
self.assertEqual([{'id': c2.id, 'name': 'Y'}], cs.read(['name']), "read() should skip deleted records")
self.assertEqual([], cs[0].read(['name']), "read() should skip deleted records")
# Deleting an already deleted record should be simply ignored
self.assertTrue(c1.unlink(), "Re-deleting should be a no-op")
@mute_logger('odoo.models')
def test_access_partial_deletion(self):
""" Check accessing a record from a recordset where another record has been deleted. """
Model = self.env['res.country']
self.assertTrue(type(Model).display_name.automatic, "test assumption not satisfied")
# access regular field when another record from the same prefetch set has been deleted
records = Model.create([{'name': name} for name in ('Foo', 'Bar', 'Baz')])
for record in records:
record.name
record.unlink()
# access computed field when another record from the same prefetch set has been deleted
records = Model.create([{'name': name} for name in ('Foo', 'Bar', 'Baz')])
for record in records:
record.display_name
record.unlink()
@mute_logger('odoo.models', 'odoo.addons.base.models.ir_rule')
def test_access_filtered_records(self):
""" Verify that accessing filtered records works as expected for non-admin user """
p1 = self.env['res.partner'].create({'name': 'W'})
p2 = self.env['res.partner'].create({'name': 'Y'})
user = self.env['res.users'].create({
'name': 'test user',
'login': 'test2',
'groups_id': [Command.set([self.ref('base.group_user')])],
})
partner_model = self.env['ir.model'].search([('model','=','res.partner')])
self.env['ir.rule'].create({
'name': 'Y is invisible',
'domain_force': [('id', '!=', p1.id)],
'model_id': partner_model.id,
})
# search as unprivileged user
partners = self.env['res.partner'].with_user(user).search([])
self.assertNotIn(p1, partners, "W should not be visible...")
self.assertIn(p2, partners, "... but Y should be visible")
# read as unprivileged user
with self.assertRaises(AccessError):
p1.with_user(user).read(['name'])
# write as unprivileged user
with self.assertRaises(AccessError):
p1.with_user(user).write({'name': 'foo'})
# unlink as unprivileged user
with self.assertRaises(AccessError):
p1.with_user(user).unlink()
# Prepare mixed case
p2.unlink()
# read mixed records: some deleted and some filtered
with self.assertRaises(AccessError):
(p1 + p2).with_user(user).read(['name'])
# delete mixed records: some deleted and some filtered
with self.assertRaises(AccessError):
(p1 + p2).with_user(user).unlink()
def test_read(self):
partner = self.env['res.partner'].create({'name': 'MyPartner1'})
result = partner.read()
self.assertIsInstance(result, list)
@mute_logger('odoo.models')
def test_search_read(self):
partner = self.env['res.partner']
# simple search_read
partner.create({'name': 'MyPartner1'})
found = partner.search_read([('name', '=', 'MyPartner1')], ['name'])
self.assertEqual(len(found), 1)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertIn('id', found[0])
# search_read correct order
partner.create({'name': 'MyPartner2'})
found = partner.search_read([('name', 'like', 'MyPartner')], ['name'], order="name")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertEqual(found[1]['name'], 'MyPartner2')
found = partner.search_read([('name', 'like', 'MyPartner')], ['name'], order="name desc")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner2')
self.assertEqual(found[1]['name'], 'MyPartner1')
# search_read that finds nothing
found = partner.search_read([('name', '=', 'Does not exists')], ['name'])
self.assertEqual(len(found), 0)
# search_read with an empty array of fields
found = partner.search_read([], [], limit=1)
self.assertEqual(len(found), 1)
self.assertTrue(field in list(found[0]) for field in ['id', 'name', 'display_name', 'email'])
# search_read without fields
found = partner.search_read([], False, limit=1)
self.assertEqual(len(found), 1)
self.assertTrue(field in list(found[0]) for field in ['id', 'name', 'display_name', 'email'])
@mute_logger('odoo.sql_db')
def test_exists(self):
partner = self.env['res.partner']
# check that records obtained from search exist
recs = partner.search([])
self.assertTrue(recs)
self.assertEqual(recs.exists(), recs)
# check that new records exist by convention
recs = partner.new({})
self.assertTrue(recs.exists())
# check that there is no record with id 0
recs = partner.browse([0])
self.assertFalse(recs.exists())
# check that there is no record with string id
recs = partner.browse('xxx')
with self.assertRaises(psycopg2.DataError):
recs.exists()
def test_groupby_date(self):
partners_data = dict(
A='2012-11-19',
B='2012-12-17',
C='2012-12-31',
D='2013-01-07',
E='2013-01-14',
F='2013-01-28',
G='2013-02-11',
)
partner_ids = []
partner_ids_by_day = defaultdict(list)
partner_ids_by_month = defaultdict(list)
partner_ids_by_year = defaultdict(list)
partners = self.env['res.partner']
for name, date in partners_data.items():
p = partners.create(dict(name=name, date=date))
partner_ids.append(p.id)
partner_ids_by_day[date].append(p.id)
partner_ids_by_month[date.rsplit('-', 1)[0]].append(p.id)
partner_ids_by_year[date.split('-', 1)[0]].append(p.id)
def read_group(interval):
domain = [('id', 'in', partner_ids)]
result = {}
for grp in partners.read_group(domain, ['date'], ['date:' + interval]):
result[grp['date:' + interval]] = partners.search(grp['__domain'])
return result
self.assertEqual(len(read_group('day')), len(partner_ids_by_day))
self.assertEqual(len(read_group('month')), len(partner_ids_by_month))
self.assertEqual(len(read_group('year')), len(partner_ids_by_year))
res = partners.read_group([('id', 'in', partner_ids)], ['date'],
['date:month', 'date:day'], lazy=False)
self.assertEqual(len(res), len(partner_ids))
# combine groupby and orderby
months = ['February 2013', 'January 2013', 'December 2012', 'November 2012']
res = partners.read_group([('id', 'in', partner_ids)], ['date'],
groupby=['date:month'], orderby='date:month DESC')
self.assertEqual([item['date:month'] for item in res], months)
# order by date should reorder by date:month
res = partners.read_group([('id', 'in', partner_ids)], ['date'],
groupby=['date:month'], orderby='date DESC')
self.assertEqual([item['date:month'] for item in res], months)
# order by date should reorder by date:day
days = ['11 Feb 2013', '28 Jan 2013', '14 Jan 2013', '07 Jan 2013',
'31 Dec 2012', '17 Dec 2012', '19 Nov 2012']
res = partners.read_group([('id', 'in', partner_ids)], ['date'],
groupby=['date:month', 'date:day'],
orderby='date DESC', lazy=False)
self.assertEqual([item['date:day'] for item in res], days)
def test_write_duplicate(self):
p1 = self.env['res.partner'].create({'name': 'W'})
(p1 + p1).write({'name': 'X'})
def test_m2m_store_trigger(self):
group_user = self.env.ref('base.group_user')
user = self.env['res.users'].create({
'name': 'test',
'login': 'test_m2m_store_trigger',
'groups_id': [Command.set([])],
})
self.assertTrue(user.share)
group_user.write({'users': [Command.link(user.id)]})
self.assertFalse(user.share)
group_user.write({'users': [Command.unlink(user.id)]})
self.assertTrue(user.share)
@mute_logger('odoo.models')
def test_unlink_with_property(self):
""" Verify that unlink removes the related ir.property as unprivileged user """
user = self.env['res.users'].create({
'name': 'Justine Bridou',
'login': 'saucisson',
'groups_id': [Command.set([self.ref('base.group_partner_manager')])],
})
p1 = self.env['res.partner'].with_user(user).create({'name': 'Zorro'})
self.env['ir.property'].with_user(user)._set_multi("ref", "res.partner", {p1.id: "Nain poilu"})
p1_prop = self.env['ir.property'].with_user(user)._get("ref", "res.partner", res_id=p1.id)
self.assertEqual(
p1_prop, "Nain poilu", 'p1_prop should have been created')
# Unlink with unprivileged user
p1.unlink()
# ir.property is deleted
p1_prop = self.env['ir.property'].with_user(user)._get("ref", "res.partner", res_id=p1.id)
self.assertEqual(
p1_prop, False, 'p1_prop should have been deleted')
def test_create_multi(self):
""" create for multiple records """
# assumption: 'res.bank' does not override 'create'
vals_list = [{'name': name} for name in ('Foo', 'Bar', 'Baz')]
vals_list[0]['email'] = '[email protected]'
for vals in vals_list:
record = self.env['res.bank'].create(vals)
self.assertEqual(len(record), 1)
self.assertEqual(record.name, vals['name'])
self.assertEqual(record.email, vals.get('email', False))
records = self.env['res.bank'].create([])
self.assertFalse(records)
records = self.env['res.bank'].create(vals_list)
self.assertEqual(len(records), len(vals_list))
for record, vals in zip(records, vals_list):
self.assertEqual(record.name, vals['name'])
self.assertEqual(record.email, vals.get('email', False))
# create countries and states
vals_list = [{
'name': 'Foo',
'state_ids': [
Command.create({'name': 'North Foo', 'code': 'NF'}),
Command.create({'name': 'South Foo', 'code': 'SF'}),
Command.create({'name': 'West Foo', 'code': 'WF'}),
Command.create({'name': 'East Foo', 'code': 'EF'}),
],
}, {
'name': 'Bar',
'state_ids': [
Command.create({'name': 'North Bar', 'code': 'NB'}),
Command.create({'name': 'South Bar', 'code': 'SB'}),
],
}]
foo, bar = self.env['res.country'].create(vals_list)
self.assertEqual(foo.name, 'Foo')
self.assertCountEqual(foo.mapped('state_ids.code'), ['NF', 'SF', 'WF', 'EF'])
self.assertEqual(bar.name, 'Bar')
self.assertCountEqual(bar.mapped('state_ids.code'), ['NB', 'SB'])
class TestInherits(TransactionCase):
""" test the behavior of the orm for models that use _inherits;
specifically: res.users, that inherits from res.partner
"""
def test_default(self):
""" `default_get` cannot return a dictionary or a new id """
defaults = self.env['res.users'].default_get(['partner_id'])
if 'partner_id' in defaults:
self.assertIsInstance(defaults['partner_id'], (bool, int))
def test_create(self):
""" creating a user should automatically create a new partner """
partners_before = self.env['res.partner'].search([])
user_foo = self.env['res.users'].create({'name': 'Foo', 'login': 'foo'})
self.assertNotIn(user_foo.partner_id, partners_before)
def test_create_with_ancestor(self):
""" creating a user with a specific 'partner_id' should not create a new partner """
partner_foo = self.env['res.partner'].create({'name': 'Foo'})
partners_before = self.env['res.partner'].search([])
user_foo = self.env['res.users'].create({'partner_id': partner_foo.id, 'login': 'foo'})
partners_after = self.env['res.partner'].search([])
self.assertEqual(partners_before, partners_after)
self.assertEqual(user_foo.name, 'Foo')
self.assertEqual(user_foo.partner_id, partner_foo)
@mute_logger('odoo.models')
def test_read(self):
""" inherited fields should be read without any indirection """
user_foo = self.env['res.users'].create({'name': 'Foo', 'login': 'foo'})
user_values, = user_foo.read()
partner_values, = user_foo.partner_id.read()
self.assertEqual(user_values['name'], partner_values['name'])
self.assertEqual(user_foo.name, user_foo.partner_id.name)
@mute_logger('odoo.models')
def test_copy(self):
""" copying a user should automatically copy its partner, too """
user_foo = self.env['res.users'].create({
'name': 'Foo',
'login': 'foo',
'employee': True,
})
foo_before, = user_foo.read()
del foo_before['__last_update']
del foo_before['create_date']
del foo_before['write_date']
user_bar = user_foo.copy({'login': 'bar'})
foo_after, = user_foo.read()
del foo_after['__last_update']
del foo_after['create_date']
del foo_after['write_date']
self.assertEqual(foo_before, foo_after)
self.assertEqual(user_bar.name, 'Foo (copy)')
self.assertEqual(user_bar.login, 'bar')
self.assertEqual(user_foo.employee, user_bar.employee)
self.assertNotEqual(user_foo.id, user_bar.id)
self.assertNotEqual(user_foo.partner_id.id, user_bar.partner_id.id)
@mute_logger('odoo.models')
def test_copy_with_ancestor(self):
""" copying a user with 'parent_id' in defaults should not duplicate the partner """
user_foo = self.env['res.users'].create({'login': 'foo', 'name': 'Foo', 'signature': 'Foo'})
partner_bar = self.env['res.partner'].create({'name': 'Bar'})
foo_before, = user_foo.read()
del foo_before['__last_update']
del foo_before['create_date']
del foo_before['write_date']
del foo_before['login_date']
partners_before = self.env['res.partner'].search([])
user_bar = user_foo.copy({'partner_id': partner_bar.id, 'login': 'bar'})
foo_after, = user_foo.read()
del foo_after['__last_update']
del foo_after['create_date']
del foo_after['write_date']
del foo_after['login_date']
partners_after = self.env['res.partner'].search([])
self.assertEqual(foo_before, foo_after)
self.assertEqual(partners_before, partners_after)
self.assertNotEqual(user_foo.id, user_bar.id)
self.assertEqual(user_bar.partner_id.id, partner_bar.id)
self.assertEqual(user_bar.login, 'bar', "login is given from copy parameters")
self.assertFalse(user_bar.password, "password should not be copied from original record")
self.assertEqual(user_bar.name, 'Bar', "name is given from specific partner")
self.assertEqual(user_bar.signature, user_foo.signature, "signature should be copied")
@mute_logger('odoo.models')
def test_write_date(self):
""" modifying inherited fields must update write_date """
user = self.env.user
write_date_before = user.write_date
# write base64 image
user.write({'image_1920': 'R0lGODlhAQABAIAAAP///////yH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='})
write_date_after = user.write_date
self.assertNotEqual(write_date_before, write_date_after)
| 42.97561 | 17,620 |
19,714 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
from dateutil.relativedelta import relativedelta
import os.path
import pytz
from odoo.tools import config, misc, date_utils, file_open, file_path, merge_sequences, remove_accents
from odoo.tests.common import TransactionCase, BaseCase
class TestCountingStream(BaseCase):
def test_empty_stream(self):
s = misc.CountingStream(iter([]))
self.assertEqual(s.index, -1)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
def test_single(self):
s = misc.CountingStream(range(1))
self.assertEqual(s.index, -1)
self.assertEqual(next(s, None), 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 1)
def test_full(self):
s = misc.CountingStream(range(42))
for _ in s:
pass
self.assertEqual(s.index, 42)
def test_repeated(self):
""" Once the CountingStream has stopped iterating, the index should not
increase anymore (the internal state should not be allowed to change)
"""
s = misc.CountingStream(iter([]))
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
class TestMergeSequences(BaseCase):
def test_merge_sequences(self):
# base case
seq = merge_sequences(['A', 'B', 'C'])
self.assertEqual(seq, ['A', 'B', 'C'])
# 'Z' can be anywhere
seq = merge_sequences(['A', 'B', 'C'], ['Z'])
self.assertEqual(seq, ['A', 'B', 'C', 'Z'])
# 'Y' must precede 'C';
seq = merge_sequences(['A', 'B', 'C'], ['Y', 'C'])
self.assertEqual(seq, ['A', 'B', 'Y', 'C'])
# 'X' must follow 'A' and precede 'C'
seq = merge_sequences(['A', 'B', 'C'], ['A', 'X', 'C'])
self.assertEqual(seq, ['A', 'B', 'X', 'C'])
# all cases combined
seq = merge_sequences(
['A', 'B', 'C'],
['Z'], # 'Z' can be anywhere
['Y', 'C'], # 'Y' must precede 'C';
['A', 'X', 'Y'], # 'X' must follow 'A' and precede 'Y'
)
self.assertEqual(seq, ['A', 'B', 'X', 'Y', 'C', 'Z'])
class TestDateRangeFunction(BaseCase):
""" Test on date_range generator. """
def test_date_range_with_naive_datetimes(self):
""" Check date_range with naive datetimes. """
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
expected = [
datetime.datetime(1985, 1, 1, 0, 0),
datetime.datetime(1985, 2, 1, 0, 0),
datetime.datetime(1985, 3, 1, 0, 0),
datetime.datetime(1985, 4, 1, 0, 0),
datetime.datetime(1985, 5, 1, 0, 0),
datetime.datetime(1985, 6, 1, 0, 0),
datetime.datetime(1985, 7, 1, 0, 0),
datetime.datetime(1985, 8, 1, 0, 0),
datetime.datetime(1985, 9, 1, 0, 0),
datetime.datetime(1985, 10, 1, 0, 0),
datetime.datetime(1985, 11, 1, 0, 0),
datetime.datetime(1985, 12, 1, 0, 0),
datetime.datetime(1986, 1, 1, 0, 0)
]
dates = [date for date in date_utils.date_range(start, end)]
self.assertEqual(dates, expected)
def test_date_range_with_timezone_aware_datetimes_other_than_utc(self):
""" Check date_range with timezone-aware datetimes other than UTC."""
timezone = pytz.timezone('Europe/Brussels')
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
start = timezone.localize(start)
end = timezone.localize(end)
expected = [datetime.datetime(1985, 1, 1, 0, 0),
datetime.datetime(1985, 2, 1, 0, 0),
datetime.datetime(1985, 3, 1, 0, 0),
datetime.datetime(1985, 4, 1, 0, 0),
datetime.datetime(1985, 5, 1, 0, 0),
datetime.datetime(1985, 6, 1, 0, 0),
datetime.datetime(1985, 7, 1, 0, 0),
datetime.datetime(1985, 8, 1, 0, 0),
datetime.datetime(1985, 9, 1, 0, 0),
datetime.datetime(1985, 10, 1, 0, 0),
datetime.datetime(1985, 11, 1, 0, 0),
datetime.datetime(1985, 12, 1, 0, 0),
datetime.datetime(1986, 1, 1, 0, 0)]
expected = [timezone.localize(e) for e in expected]
dates = [date for date in date_utils.date_range(start, end)]
self.assertEqual(expected, dates)
def test_date_range_with_mismatching_zones(self):
""" Check date_range with mismatching zone should raise an exception."""
start_timezone = pytz.timezone('Europe/Brussels')
end_timezone = pytz.timezone('America/Recife')
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
start = start_timezone.localize(start)
end = end_timezone.localize(end)
with self.assertRaises(ValueError):
dates = [date for date in date_utils.date_range(start, end)]
def test_date_range_with_inconsistent_datetimes(self):
""" Check date_range with a timezone-aware datetime and a naive one."""
context_timezone = pytz.timezone('Europe/Brussels')
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
end = context_timezone.localize(end)
with self.assertRaises(ValueError):
dates = [date for date in date_utils.date_range(start, end)]
def test_date_range_with_hour(self):
""" Test date range with hour and naive datetime."""
start = datetime.datetime(2018, 3, 25)
end = datetime.datetime(2018, 3, 26)
step = relativedelta(hours=1)
expected = [
datetime.datetime(2018, 3, 25, 0, 0),
datetime.datetime(2018, 3, 25, 1, 0),
datetime.datetime(2018, 3, 25, 2, 0),
datetime.datetime(2018, 3, 25, 3, 0),
datetime.datetime(2018, 3, 25, 4, 0),
datetime.datetime(2018, 3, 25, 5, 0),
datetime.datetime(2018, 3, 25, 6, 0),
datetime.datetime(2018, 3, 25, 7, 0),
datetime.datetime(2018, 3, 25, 8, 0),
datetime.datetime(2018, 3, 25, 9, 0),
datetime.datetime(2018, 3, 25, 10, 0),
datetime.datetime(2018, 3, 25, 11, 0),
datetime.datetime(2018, 3, 25, 12, 0),
datetime.datetime(2018, 3, 25, 13, 0),
datetime.datetime(2018, 3, 25, 14, 0),
datetime.datetime(2018, 3, 25, 15, 0),
datetime.datetime(2018, 3, 25, 16, 0),
datetime.datetime(2018, 3, 25, 17, 0),
datetime.datetime(2018, 3, 25, 18, 0),
datetime.datetime(2018, 3, 25, 19, 0),
datetime.datetime(2018, 3, 25, 20, 0),
datetime.datetime(2018, 3, 25, 21, 0),
datetime.datetime(2018, 3, 25, 22, 0),
datetime.datetime(2018, 3, 25, 23, 0),
datetime.datetime(2018, 3, 26, 0, 0)
]
dates = [date for date in date_utils.date_range(start, end, step)]
self.assertEqual(dates, expected)
class TestFormatLangDate(TransactionCase):
def test_00_accepted_types(self):
self.env.user.tz = 'Europe/Brussels'
datetime_str = '2017-01-31 12:00:00'
date_datetime = datetime.datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S")
date_date = date_datetime.date()
date_str = '2017-01-31'
time_part = datetime.time(16, 30, 22)
self.assertEqual(misc.format_date(self.env, date_datetime), '01/31/2017')
self.assertEqual(misc.format_date(self.env, date_date), '01/31/2017')
self.assertEqual(misc.format_date(self.env, date_str), '01/31/2017')
self.assertEqual(misc.format_date(self.env, ''), '')
self.assertEqual(misc.format_date(self.env, False), '')
self.assertEqual(misc.format_date(self.env, None), '')
self.assertEqual(misc.format_datetime(self.env, date_datetime), 'Jan 31, 2017, 1:00:00 PM')
self.assertEqual(misc.format_datetime(self.env, datetime_str), 'Jan 31, 2017, 1:00:00 PM')
self.assertEqual(misc.format_datetime(self.env, ''), '')
self.assertEqual(misc.format_datetime(self.env, False), '')
self.assertEqual(misc.format_datetime(self.env, None), '')
self.assertEqual(misc.format_time(self.env, time_part), '4:30:22 PM')
self.assertEqual(misc.format_time(self.env, ''), '')
self.assertEqual(misc.format_time(self.env, False), '')
self.assertEqual(misc.format_time(self.env, None), '')
def test_01_code_and_format(self):
date_str = '2017-01-31'
lang = self.env['res.lang']
# Activate French and Simplified Chinese (test with non-ASCII characters)
lang._activate_lang('fr_FR')
lang._activate_lang('zh_CN')
# -- test `date`
# Change a single parameter
self.assertEqual(misc.format_date(lang.with_context(lang='fr_FR').env, date_str), '31/01/2017')
self.assertEqual(misc.format_date(lang.env, date_str, lang_code='fr_FR'), '31/01/2017')
self.assertEqual(misc.format_date(lang.env, date_str, date_format='MMM d, y'), 'Jan 31, 2017')
# Change 2 parameters
self.assertEqual(misc.format_date(lang.with_context(lang='zh_CN').env, date_str, lang_code='fr_FR'), '31/01/2017')
self.assertEqual(misc.format_date(lang.with_context(lang='zh_CN').env, date_str, date_format='MMM d, y'), u'1\u6708 31, 2017')
self.assertEqual(misc.format_date(lang.env, date_str, lang_code='fr_FR', date_format='MMM d, y'), 'janv. 31, 2017')
# Change 3 parameters
self.assertEqual(misc.format_date(lang.with_context(lang='zh_CN').env, date_str, lang_code='en_US', date_format='MMM d, y'), 'Jan 31, 2017')
# -- test `datetime`
datetime_str = '2017-01-31 10:33:00'
# Change languages and timezones
self.assertEqual(misc.format_datetime(lang.with_context(lang='fr_FR').env, datetime_str, tz='Europe/Brussels'), '31 janv. 2017 à 11:33:00')
self.assertEqual(misc.format_datetime(lang.with_context(lang='zh_CN').env, datetime_str, tz='America/New_York'), '2017\u5E741\u670831\u65E5 \u4E0A\u53485:33:00') # '2017年1月31日 上午5:33:00'
# Change language, timezone and format
self.assertEqual(misc.format_datetime(lang.with_context(lang='fr_FR').env, datetime_str, tz='America/New_York', dt_format='short'), '31/01/2017 05:33')
self.assertEqual(misc.format_datetime(lang.with_context(lang='en_US').env, datetime_str, tz='Europe/Brussels', dt_format='MMM d, y'), 'Jan 31, 2017')
# Check given `lang_code` overwites context lang
self.assertEqual(misc.format_datetime(lang.env, datetime_str, tz='Europe/Brussels', dt_format='long', lang_code='fr_FR'), '31 janvier 2017 à 11:33:00 +0100')
self.assertEqual(misc.format_datetime(lang.with_context(lang='zh_CN').env, datetime_str, tz='Europe/Brussels', dt_format='long', lang_code='en_US'), 'January 31, 2017 at 11:33:00 AM +0100')
# -- test `time`
time_part = datetime.time(16, 30, 22)
time_part_tz = datetime.time(16, 30, 22, tzinfo=pytz.timezone('US/Eastern')) # 4:30 PM timezoned
self.assertEqual(misc.format_time(lang.with_context(lang='fr_FR').env, time_part), '16:30:22')
self.assertEqual(misc.format_time(lang.with_context(lang='zh_CN').env, time_part), '\u4e0b\u53484:30:22')
# Check format in different languages
self.assertEqual(misc.format_time(lang.with_context(lang='fr_FR').env, time_part, time_format='short'), '16:30')
self.assertEqual(misc.format_time(lang.with_context(lang='zh_CN').env, time_part, time_format='short'), '\u4e0b\u53484:30')
# Check timezoned time part
self.assertIn(misc.format_time(lang.with_context(lang='fr_FR').env, time_part_tz, time_format='long'), ['16:30:22 -0504', '16:30:22 HNE'])
self.assertEqual(misc.format_time(lang.with_context(lang='zh_CN').env, time_part_tz, time_format='full'), '\u5317\u7f8e\u4e1c\u90e8\u6807\u51c6\u65f6\u95f4\u0020\u4e0b\u53484:30:22')
#Check timezone conversion in format_time
self.assertEqual(misc.format_time(lang.with_context(lang='fr_FR').env, datetime_str, 'Europe/Brussels', time_format='long'), '11:33:00 +0100')
self.assertEqual(misc.format_time(lang.with_context(lang='fr_FR').env, datetime_str, 'US/Eastern', time_format='long'), '05:33:00 HNE')
# Check given `lang_code` overwites context lang
self.assertEqual(misc.format_time(lang.with_context(lang='fr_FR').env, time_part, time_format='short', lang_code='zh_CN'), '\u4e0b\u53484:30')
self.assertEqual(misc.format_time(lang.with_context(lang='zh_CN').env, time_part, time_format='medium', lang_code='fr_FR'), '16:30:22')
class TestCallbacks(BaseCase):
def test_callback(self):
log = []
callbacks = misc.Callbacks()
# add foo
def foo():
log.append("foo")
callbacks.add(foo)
# add bar
@callbacks.add
def bar():
log.append("bar")
# add foo again
callbacks.add(foo)
# this should call foo(), bar(), foo()
callbacks.run()
self.assertEqual(log, ["foo", "bar", "foo"])
# this should do nothing
callbacks.run()
self.assertEqual(log, ["foo", "bar", "foo"])
def test_aggregate(self):
log = []
callbacks = misc.Callbacks()
# register foo once
@callbacks.add
def foo():
log.append(callbacks.data["foo"])
# aggregate data
callbacks.data.setdefault("foo", []).append(1)
callbacks.data.setdefault("foo", []).append(2)
callbacks.data.setdefault("foo", []).append(3)
# foo() is called once
callbacks.run()
self.assertEqual(log, [[1, 2, 3]])
self.assertFalse(callbacks.data)
callbacks.run()
self.assertEqual(log, [[1, 2, 3]])
def test_reentrant(self):
log = []
callbacks = misc.Callbacks()
# register foo that runs callbacks
@callbacks.add
def foo():
log.append("foo1")
callbacks.run()
log.append("foo2")
@callbacks.add
def bar():
log.append("bar")
# both foo() and bar() are called once
callbacks.run()
self.assertEqual(log, ["foo1", "bar", "foo2"])
callbacks.run()
self.assertEqual(log, ["foo1", "bar", "foo2"])
class TestRemoveAccents(BaseCase):
def test_empty_string(self):
self.assertEqual(remove_accents(False), False)
self.assertEqual(remove_accents(''), '')
self.assertEqual(remove_accents(None), None)
def test_latin(self):
self.assertEqual(remove_accents('Niño Hernández'), 'Nino Hernandez')
self.assertEqual(remove_accents('Anaïs Clémence'), 'Anais Clemence')
def test_non_latin(self):
self.assertEqual(remove_accents('العربية'), 'العربية')
self.assertEqual(remove_accents('русский алфавит'), 'русскии алфавит')
class TestAddonsFileAccess(BaseCase):
def assertCannotAccess(self, path, ExceptionType=FileNotFoundError, filter_ext=None):
with self.assertRaises(ExceptionType):
file_path(path, filter_ext=filter_ext)
def assertCanRead(self, path, needle='', mode='r', filter_ext=None):
with file_open(path, mode, filter_ext) as f:
self.assertIn(needle, f.read())
def assertCannotRead(self, path, ExceptionType=FileNotFoundError, filter_ext=None):
with self.assertRaises(ExceptionType):
file_open(path, filter_ext=filter_ext)
def test_file_path(self):
# absolute path
self.assertEqual(__file__, file_path(__file__))
self.assertEqual(__file__, file_path(__file__, filter_ext=None)) # means "no filter" too
self.assertEqual(__file__, file_path(__file__, filter_ext=('.py',)))
# directory target is ok
self.assertEqual(os.path.dirname(__file__), file_path(os.path.join(__file__, '..')))
# relative path
relpath = os.path.join(*(__file__.split(os.sep)[-3:])) # 'base/tests/test_misc.py'
self.assertEqual(__file__, file_path(relpath))
self.assertEqual(__file__, file_path(relpath, filter_ext=('.py',)))
# leading 'addons/' is ignored if present
self.assertTrue(file_path("addons/web/__init__.py"))
relpath = os.path.join('addons', relpath) # 'addons/base/tests/test_misc.py'
self.assertEqual(__file__, file_path(relpath))
# files in root_path are allowed
self.assertTrue(file_path('tools/misc.py'))
# errors when outside addons_paths
self.assertCannotAccess('/doesnt/exist')
self.assertCannotAccess('/tmp')
self.assertCannotAccess('../../../../../../../../../tmp')
self.assertCannotAccess(os.path.join(__file__, '../../../../../'))
# data_dir is forbidden
self.assertCannotAccess(config['data_dir'])
# errors for illegal extensions
self.assertCannotAccess(__file__, ValueError, filter_ext=('.png',))
# file doesnt exist but has wrong extension
self.assertCannotAccess(__file__.replace('.py', '.foo'), ValueError, filter_ext=('.png',))
def test_file_open(self):
# The needle includes UTF8 so we test reading non-ASCII files at the same time.
# This depends on the system locale and is harder to unit test, but if you manage to run the
# test with a non-UTF8 locale (`LC_ALL=fr_FR.iso8859-1 python3...`) it should not crash ;-)
test_needle = "A needle with non-ascii bytes: ♥"
# absolute path
self.assertCanRead(__file__, test_needle)
self.assertCanRead(__file__, test_needle.encode(), mode='rb')
self.assertCanRead(__file__, test_needle.encode(), mode='rb', filter_ext=('.py',))
# directory target *is* an error
with self.assertRaises(FileNotFoundError):
file_open(os.path.join(__file__, '..'))
# relative path
relpath = os.path.join(*(__file__.split(os.sep)[-3:])) # 'base/tests/test_misc.py'
self.assertCanRead(relpath, test_needle)
self.assertCanRead(relpath, test_needle.encode(), mode='rb')
self.assertCanRead(relpath, test_needle.encode(), mode='rb', filter_ext=('.py',))
# leading 'addons/' is ignored if present
self.assertCanRead("addons/web/__init__.py", "import")
relpath = os.path.join('addons', relpath) # 'addons/base/tests/test_misc.py'
self.assertCanRead(relpath, test_needle)
# files in root_path are allowed
self.assertCanRead('tools/misc.py')
# errors when outside addons_paths
self.assertCannotRead('/doesnt/exist')
self.assertCannotRead('')
self.assertCannotRead('/tmp')
self.assertCannotRead('../../../../../../../../../tmp')
self.assertCannotRead(os.path.join(__file__, '../../../../../'))
# data_dir is forbidden
self.assertCannotRead(config['data_dir'])
# errors for illegal extensions
self.assertCannotRead(__file__, ValueError, filter_ext=('.png',))
# file doesnt exist but has wrong extension
self.assertCannotRead(__file__.replace('.py', '.foo'), ValueError, filter_ext=('.png',))
| 42.819172 | 19,654 |
5,339 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import re
from odoo.tests.common import TransactionCase
from odoo.tools import pdf
from odoo.modules.module import get_module_resource
import io
class TestPdf(TransactionCase):
""" Tests on pdf. """
def setUp(self):
super().setUp()
file_path = get_module_resource('base', 'tests', 'minimal.pdf')
self.file = open(file_path, 'rb').read()
self.minimal_reader_buffer = io.BytesIO(self.file)
self.minimal_pdf_reader = pdf.OdooPdfFileReader(self.minimal_reader_buffer)
def test_odoo_pdf_file_reader(self):
attachments = list(self.minimal_pdf_reader.getAttachments())
self.assertEqual(len(attachments), 0)
pdf_writer = pdf.PdfFileWriter()
pdf_writer.cloneReaderDocumentRoot(self.minimal_pdf_reader)
pdf_writer.addAttachment('test_attachment.txt', b'My awesome attachment')
attachments = list(self.minimal_pdf_reader.getAttachments())
self.assertEqual(len(attachments), 1)
def test_odoo_pdf_file_writer(self):
attachments = list(self.minimal_pdf_reader.getAttachments())
self.assertEqual(len(attachments), 0)
pdf_writer = pdf.OdooPdfFileWriter()
pdf_writer.cloneReaderDocumentRoot(self.minimal_pdf_reader)
pdf_writer.addAttachment('test_attachment.txt', b'My awesome attachment')
attachments = list(self.minimal_pdf_reader.getAttachments())
self.assertEqual(len(attachments), 1)
pdf_writer.addAttachment('another_attachment.txt', b'My awesome OTHER attachment')
attachments = list(self.minimal_pdf_reader.getAttachments())
self.assertEqual(len(attachments), 2)
def test_odoo_pdf_file_reader_with_owner_encryption(self):
pdf_writer = pdf.OdooPdfFileWriter()
pdf_writer.cloneReaderDocumentRoot(self.minimal_pdf_reader)
pdf_writer.addAttachment('test_attachment.txt', b'My awesome attachment')
pdf_writer.addAttachment('another_attachment.txt', b'My awesome OTHER attachment')
pdf_writer.encrypt("", "foo")
with io.BytesIO() as writer_buffer:
pdf_writer.write(writer_buffer)
encrypted_content = writer_buffer.getvalue()
with io.BytesIO(encrypted_content) as reader_buffer:
pdf_reader = pdf.OdooPdfFileReader(reader_buffer)
attachments = list(pdf_reader.getAttachments())
self.assertEqual(len(attachments), 2)
def test_merge_pdf(self):
self.assertEqual(self.minimal_pdf_reader.getNumPages(), 1)
page = self.minimal_pdf_reader.getPage(0)
merged_pdf = pdf.merge_pdf([self.file, self.file])
merged_reader_buffer = io.BytesIO(merged_pdf)
merged_pdf_reader = pdf.OdooPdfFileReader(merged_reader_buffer)
self.assertEqual(merged_pdf_reader.getNumPages(), 2)
merged_reader_buffer.close()
def test_branded_file_writer(self):
# It's not easy to create a PDF with PyPDF2, so instead we copy minimal.pdf with our custom pdf writer
pdf_writer = pdf.PdfFileWriter() # BrandedFileWriter
pdf_writer.cloneReaderDocumentRoot(self.minimal_pdf_reader)
writer_buffer = io.BytesIO()
pdf_writer.write(writer_buffer)
branded_content = writer_buffer.getvalue()
writer_buffer.close()
# Read the metadata of the newly created pdf.
reader_buffer = io.BytesIO(branded_content)
pdf_reader = pdf.PdfFileReader(reader_buffer)
pdf_info = pdf_reader.getDocumentInfo()
self.assertEqual(pdf_info['/Producer'], 'Odoo')
self.assertEqual(pdf_info['/Creator'], 'Odoo')
reader_buffer.close()
def tearDown(self):
super().tearDown()
self.minimal_reader_buffer.close()
def test_download_one_corrupted_pdf(self):
"""
PyPDF2 is not flawless. We can upload a PDF that can be previsualised but that cannot be merged by PyPDF2.
In the case of "Print Original Invoice", we want to be able to download the pdf from the list view.
We test that, when selecting one record, it can be printed (downloaded) without error.
"""
attach_name = 'super_attach.pdf'
# we need to corrupt the file: change count object in the xref table
pattern = re.compile(r"xref\n\d\s+(\d)")
corrupted_file = re.sub(pattern, "xref\n0 5", self.file.decode('utf-8'), 1).encode('utf-8')
self.env['ir.attachment'].create({
'datas': base64.b64encode(corrupted_file),
'name': attach_name,
'mimetype': 'application/pdf',
'res_model': self.env.user._name,
'res_id': self.env.user.id,
})
self.test_report = self.env['ir.actions.report'].create({
'name': 'Super Report',
'model': self.env.user._name,
'report_type': 'qweb-pdf',
'report_name': 'super_report',
'attachment': "'%s'" % attach_name,
'attachment_use': True,
})
test_record_report = self.test_report.with_context(force_report_rendering=True)._render_qweb_pdf(self.env.user.id, data={'report_type': 'pdf'})
self.assertTrue(test_record_report, "The PDF should have been generated")
| 42.373016 | 5,339 |
24,567 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import date
from psycopg2 import IntegrityError, ProgrammingError
import odoo
from odoo.exceptions import UserError, ValidationError, AccessError
from odoo.tools import mute_logger
from odoo.tests import common
from odoo import Command
class TestServerActionsBase(common.TransactionCase):
def setUp(self):
super(TestServerActionsBase, self).setUp()
# Data on which we will run the server action
self.test_country = self.env['res.country'].create({
'name': 'TestingCountry',
'code': 'TY',
'address_format': 'SuperFormat',
})
self.test_partner = self.env['res.partner'].create({
'name': 'TestingPartner',
'city': 'OrigCity',
'country_id': self.test_country.id,
})
self.context = {
'active_model': 'res.partner',
'active_id': self.test_partner.id,
}
# Model data
Model = self.env['ir.model']
Fields = self.env['ir.model.fields']
self.comment_html = '<p>MyComment</p>'
self.res_partner_model = Model.search([('model', '=', 'res.partner')])
self.res_partner_name_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'name')])
self.res_partner_city_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'city')])
self.res_partner_country_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'country_id')])
self.res_partner_parent_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'parent_id')])
self.res_partner_children_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'child_ids')])
self.res_partner_category_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'category_id')])
self.res_country_model = Model.search([('model', '=', 'res.country')])
self.res_country_name_field = Fields.search([('model', '=', 'res.country'), ('name', '=', 'name')])
self.res_country_code_field = Fields.search([('model', '=', 'res.country'), ('name', '=', 'code')])
self.res_partner_category_model = Model.search([('model', '=', 'res.partner.category')])
self.res_partner_category_name_field = Fields.search([('model', '=', 'res.partner.category'), ('name', '=', 'name')])
# create server action to
self.action = self.env['ir.actions.server'].create({
'name': 'TestAction',
'model_id': self.res_partner_model.id,
'model_name': 'res.partner',
'state': 'code',
'code': 'record.write({"comment": "%s"})' % self.comment_html,
})
class TestServerActions(TestServerActionsBase):
def test_00_action(self):
self.action.with_context(self.context).run()
self.assertEqual(self.test_partner.comment, self.comment_html, 'ir_actions_server: invalid condition check')
self.test_partner.write({'comment': False})
# Do: create contextual action
self.action.create_action()
self.assertEqual(self.action.binding_model_id.model, 'res.partner')
# Do: remove contextual action
self.action.unlink_action()
self.assertFalse(self.action.binding_model_id)
def test_10_code(self):
self.action.write({
'state': 'code',
'code': ("partner_name = record.name + '_code'\n"
"record.env['res.partner'].create({'name': partner_name})"),
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: code server action correctly finished should return False')
partners = self.test_partner.search([('name', 'ilike', 'TestingPartner_code')])
self.assertEqual(len(partners), 1, 'ir_actions_server: 1 new partner should have been created')
def test_20_crud_create(self):
# Do: create a new record in another model
self.action.write({
'state': 'object_create',
'crud_model_id': self.res_country_model.id,
'link_field_id': False,
'fields_lines': [Command.clear(),
Command.create({'col1': self.res_country_name_field.id, 'value': 'record.name', 'evaluation_type': 'equation'}),
Command.create({'col1': self.res_country_code_field.id, 'value': 'record.name[0:2]', 'evaluation_type': 'equation'})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
country = self.test_country.search([('name', 'ilike', 'TestingPartner')])
self.assertEqual(len(country), 1, 'ir_actions_server: TODO')
self.assertEqual(country.code, 'TE', 'ir_actions_server: TODO')
def test_20_crud_create_link_many2one(self):
_city = 'TestCity'
_name = 'TestNew'
# Do: create a new record in the same model and link it with a many2one
self.action.write({
'state': 'object_create',
'crud_model_id': self.action.model_id.id,
'link_field_id': self.res_partner_parent_field.id,
'fields_lines': [Command.create({'col1': self.res_partner_name_field.id, 'value': _name}),
Command.create({'col1': self.res_partner_city_field.id, 'value': _city})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
partner = self.test_partner.search([('name', 'ilike', _name)])
self.assertEqual(len(partner), 1, 'ir_actions_server: TODO')
self.assertEqual(partner.city, _city, 'ir_actions_server: TODO')
# Test: new partner linked
self.assertEqual(self.test_partner.parent_id, partner, 'ir_actions_server: TODO')
def test_20_crud_create_link_one2many(self):
_name = 'TestNew'
# Do: create a new record in the same model and link it with a one2many
self.action.write({
'state': 'object_create',
'crud_model_id': self.action.model_id.id,
'link_field_id': self.res_partner_children_field.id,
'fields_lines': [Command.create({'col1': self.res_partner_name_field.id, 'value': _name})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
partner = self.test_partner.search([('name', 'ilike', _name)])
self.assertEqual(len(partner), 1, 'ir_actions_server: TODO')
self.assertEqual(partner.name, _name, 'ir_actions_server: TODO')
# Test: new partner linked
self.assertIn(partner, self.test_partner.child_ids, 'ir_actions_server: TODO')
def test_20_crud_create_link_many2many(self):
# Do: create a new record in another model
self.action.write({
'state': 'object_create',
'crud_model_id': self.res_partner_category_model.id,
'link_field_id': self.res_partner_category_field.id,
'fields_lines': [Command.create({'col1': self.res_partner_category_name_field.id, 'value': 'record.name', 'evaluation_type': 'equation'})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new category created
category = self.env['res.partner.category'].search([('name', 'ilike', 'TestingPartner')])
self.assertEqual(len(category), 1, 'ir_actions_server: TODO')
self.assertIn(category, self.test_partner.category_id)
def test_30_crud_write(self):
_name = 'TestNew'
# Do: update partner name
self.action.write({
'state': 'object_write',
'fields_lines': [Command.create({'col1': self.res_partner_name_field.id, 'value': _name})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: partner updated
partner = self.test_partner.search([('name', 'ilike', _name)])
self.assertEqual(len(partner), 1, 'ir_actions_server: TODO')
self.assertEqual(partner.city, 'OrigCity', 'ir_actions_server: TODO')
@mute_logger('odoo.addons.base.models.ir_model', 'odoo.models')
def test_40_multi(self):
# Data: 2 server actions that will be nested
action1 = self.action.create({
'name': 'Subaction1',
'sequence': 1,
'model_id': self.res_partner_model.id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_window"}',
})
action2 = self.action.create({
'name': 'Subaction2',
'sequence': 2,
'model_id': self.res_partner_model.id,
'crud_model_id': self.res_partner_model.id,
'state': 'object_create',
'fields_lines': [Command.create({'col1': self.res_partner_name_field.id, 'value': 'RaoulettePoiluchette'}),
Command.create({'col1': self.res_partner_city_field.id, 'value': 'TestingCity'})],
})
action3 = self.action.create({
'name': 'Subaction3',
'sequence': 3,
'model_id': self.res_partner_model.id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_url"}',
})
self.action.write({
'state': 'multi',
'child_ids': [Command.set([action1.id, action2.id, action3.id])],
})
# Do: run the action
res = self.action.with_context(self.context).run()
# Test: new partner created
# currently res_partner overrides default['name'] whatever its value
partner = self.test_partner.search([('name', 'ilike', 'RaoulettePoiluchette')])
self.assertEqual(len(partner), 1)
# Test: action returned
self.assertEqual(res.get('type'), 'ir.actions.act_url')
# Test loops
with self.assertRaises(ValidationError):
self.action.write({
'child_ids': [Command.set([self.action.id])]
})
def test_50_groups(self):
""" check the action is returned only for groups dedicated to user """
Actions = self.env['ir.actions.actions']
group0 = self.env['res.groups'].create({'name': 'country group'})
self.context = {
'active_model': 'res.country',
'active_id': self.test_country.id,
}
# Do: update model and group
self.action.write({
'model_id': self.res_country_model.id,
'binding_model_id': self.res_country_model.id,
'groups_id': [Command.link(group0.id)],
'code': 'record.write({"vat_label": "VatFromTest"})',
})
# Test: action is not returned
bindings = Actions.get_bindings('res.country')
self.assertFalse(bindings)
with self.assertRaises(AccessError):
self.action.with_context(self.context).run()
self.assertFalse(self.test_country.vat_label)
# add group to the user, and test again
self.env.user.write({'groups_id': [Command.link(group0.id)]})
bindings = Actions.get_bindings('res.country')
self.assertItemsEqual(bindings.get('action'), self.action.read(['name', 'sequence', 'binding_view_types']))
self.action.with_context(self.context).run()
self.assertEqual(self.test_country.vat_label, 'VatFromTest', 'vat label should be changed to VatFromTest')
def test_60_sort(self):
""" check the actions sorted by sequence """
Actions = self.env['ir.actions.actions']
# Do: update model
self.action.write({
'model_id': self.res_country_model.id,
'binding_model_id': self.res_country_model.id,
})
self.action2 = self.action.copy({'name': 'TestAction2', 'sequence': 1})
# Test: action returned by sequence
bindings = Actions.get_bindings('res.country')
self.assertEqual([vals.get('name') for vals in bindings['action']], ['TestAction2', 'TestAction'])
self.assertEqual([vals.get('sequence') for vals in bindings['action']], [1, 5])
def test_70_copy_action(self):
# first check that the base case (reset state) works normally
r = self.env['ir.actions.todo'].create({
'action_id': self.action.id,
'state': 'done',
})
self.assertEqual(r.state, 'done')
self.assertEqual(
r.copy().state, 'open',
"by default state should be reset by copy"
)
# then check that on server action we've changed that
self.assertEqual(
self.action.copy().state, 'code',
"copying a server action should not reset the state"
)
def test_80_permission(self):
self.action.write({
'state': 'code',
'code': """record.write({'date': datetime.date.today()})""",
})
user_demo = self.env.ref("base.user_demo")
self_demo = self.action.with_user(user_demo.id)
# can write on contact partner
self.test_partner.type = "contact"
self.test_partner.with_user(user_demo.id).check_access_rule("write")
self_demo.with_context(self.context).run()
self.assertEqual(self.test_partner.date, date.today())
# but can not write on private address
self.test_partner.type = "private"
with self.assertRaises(AccessError):
self.test_partner.with_user(user_demo.id).check_access_rule("write")
# nor execute a server action on it
with self.assertRaises(AccessError), mute_logger('odoo.addons.base.models.ir_actions'):
self_demo.with_context(self.context).run()
class TestCustomFields(common.TransactionCase):
MODEL = 'res.partner'
COMODEL = 'res.users'
def setUp(self):
# check that the registry is properly reset
fnames = set(self.registry[self.MODEL]._fields)
@self.addCleanup
def check_registry():
assert set(self.registry[self.MODEL]._fields) == fnames
self.addCleanup(self.registry.reset_changes)
self.addCleanup(self.registry.clear_caches)
super().setUp()
def create_field(self, name, *, field_type='char'):
""" create a custom field and return it """
model = self.env['ir.model'].search([('model', '=', self.MODEL)])
field = self.env['ir.model.fields'].create({
'model_id': model.id,
'name': name,
'field_description': name,
'ttype': field_type,
})
self.assertIn(name, self.env[self.MODEL]._fields)
return field
def create_view(self, name):
""" create a view with the given field name """
return self.env['ir.ui.view'].create({
'name': 'yet another view',
'model': self.MODEL,
'arch': '<tree string="X"><field name="%s"/></tree>' % name,
})
def test_create_custom(self):
""" custom field names must be start with 'x_' """
with self.assertRaises(ValidationError):
self.create_field('foo')
def test_rename_custom(self):
""" custom field names must be start with 'x_' """
field = self.create_field('x_foo')
with self.assertRaises(ValidationError):
field.name = 'foo'
def test_create_valid(self):
""" field names must be valid pg identifiers """
with self.assertRaises(ValidationError):
self.create_field('x_foo bar')
def test_rename_valid(self):
""" field names must be valid pg identifiers """
field = self.create_field('x_foo')
with self.assertRaises(ValidationError):
field.name = 'x_foo bar'
def test_create_unique(self):
""" one cannot create two fields with the same name on a given model """
self.create_field('x_foo')
with self.assertRaises(IntegrityError), mute_logger('odoo.sql_db'):
self.create_field('x_foo')
def test_rename_unique(self):
""" one cannot create two fields with the same name on a given model """
field1 = self.create_field('x_foo')
field2 = self.create_field('x_bar')
with self.assertRaises(IntegrityError), mute_logger('odoo.sql_db'):
field2.name = field1.name
def test_remove_without_view(self):
""" try removing a custom field that does not occur in views """
field = self.create_field('x_foo')
field.unlink()
def test_rename_without_view(self):
""" try renaming a custom field that does not occur in views """
field = self.create_field('x_foo')
field.name = 'x_bar'
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_remove_with_view(self):
""" try removing a custom field that occurs in a view """
field = self.create_field('x_foo')
self.create_view('x_foo')
# try to delete the field, this should fail but not modify the registry
with self.assertRaises(UserError):
field.unlink()
self.assertIn('x_foo', self.env[self.MODEL]._fields)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_rename_with_view(self):
""" try renaming a custom field that occurs in a view """
field = self.create_field('x_foo')
self.create_view('x_foo')
# try to delete the field, this should fail but not modify the registry
with self.assertRaises(UserError):
field.name = 'x_bar'
self.assertIn('x_foo', self.env[self.MODEL]._fields)
def test_unlink_with_inverse(self):
""" create a custom o2m and then delete its m2o inverse """
model = self.env['ir.model']._get(self.MODEL)
comodel = self.env['ir.model']._get(self.COMODEL)
m2o_field = self.env['ir.model.fields'].create({
'model_id': comodel.id,
'name': 'x_my_m2o',
'field_description': 'my_m2o',
'ttype': 'many2one',
'relation': self.MODEL,
})
o2m_field = self.env['ir.model.fields'].create({
'model_id': model.id,
'name': 'x_my_o2m',
'field_description': 'my_o2m',
'ttype': 'one2many',
'relation': self.COMODEL,
'relation_field': m2o_field.name,
})
# normal mode: you cannot break dependencies
with self.assertRaises(UserError):
m2o_field.unlink()
# uninstall mode: unlink dependant fields
m2o_field.with_context(_force_unlink=True).unlink()
self.assertFalse(o2m_field.exists())
def test_unlink_with_dependant(self):
""" create a computed field, then delete its dependency """
# Also applies to compute fields
comodel = self.env['ir.model'].search([('model', '=', self.COMODEL)])
field = self.create_field('x_my_char')
dependant = self.env['ir.model.fields'].create({
'model_id': comodel.id,
'name': 'x_oh_boy',
'field_description': 'x_oh_boy',
'ttype': 'char',
'related': 'partner_id.x_my_char',
})
# normal mode: you cannot break dependencies
with self.assertRaises(UserError):
field.unlink()
# uninstall mode: unlink dependant fields
field.with_context(_force_unlink=True).unlink()
self.assertFalse(dependant.exists())
def test_create_binary(self):
""" binary custom fields should be created as attachment=True to avoid
bloating the DB when creating e.g. image fields via studio
"""
self.create_field('x_image', field_type='binary')
custom_binary = self.env[self.MODEL]._fields['x_image']
self.assertTrue(custom_binary.attachment)
def test_related_field(self):
""" create a custom related field, and check filled values """
#
# Add a custom field equivalent to the following definition:
#
# class Partner(models.Model)
# _inherit = 'res.partner'
# x_oh_boy = fields.Char(related="country_id.code", store=True)
#
# pick N=100 records in comodel
countries = self.env['res.country'].search([('code', '!=', False)], limit=100)
self.assertEqual(len(countries), 100, "Not enough records in comodel 'res.country'")
# create records in model, with N distinct values for the related field
partners = self.env['res.partner'].create([
{'name': country.code, 'country_id': country.id} for country in countries
])
partners.flush()
# determine how many queries it takes to create a non-computed field
query_count = self.cr.sql_log_count
self.env['ir.model.fields'].create({
'model_id': self.env['ir.model']._get_id('res.partner'),
'name': 'x_oh_box',
'field_description': 'x_oh_box',
'ttype': 'char',
})
query_count = self.cr.sql_log_count - query_count
# create the related field, and assert it only takes 1 extra query
with self.assertQueryCount(query_count + 1):
self.env['ir.model.fields'].create({
'model_id': self.env['ir.model']._get_id('res.partner'),
'name': 'x_oh_boy',
'field_description': 'x_oh_boy',
'ttype': 'char',
'related': 'country_id.code',
'store': True,
})
# check the computed values
for partner in partners:
self.assertEqual(partner.x_oh_boy, partner.country_id.code)
def test_selection(self):
""" custom selection field """
Model = self.env[self.MODEL]
model = self.env['ir.model'].search([('model', '=', self.MODEL)])
field = self.env['ir.model.fields'].create({
'model_id': model.id,
'name': 'x_sel',
'field_description': "Custom Selection",
'ttype': 'selection',
'selection_ids': [
Command.create({'value': 'foo', 'name': 'Foo', 'sequence': 0}),
Command.create({'value': 'bar', 'name': 'Bar', 'sequence': 1}),
],
})
x_sel = Model._fields['x_sel']
self.assertEqual(x_sel.type, 'selection')
self.assertEqual(x_sel.selection, [('foo', 'Foo'), ('bar', 'Bar')])
# add selection value 'baz'
field.selection_ids.create({
'field_id': field.id, 'value': 'baz', 'name': 'Baz', 'sequence': 2,
})
x_sel = Model._fields['x_sel']
self.assertEqual(x_sel.type, 'selection')
self.assertEqual(x_sel.selection, [('foo', 'Foo'), ('bar', 'Bar'), ('baz', 'Baz')])
# assign values to records
rec1 = Model.create({'name': 'Rec1', 'x_sel': 'foo'})
rec2 = Model.create({'name': 'Rec2', 'x_sel': 'bar'})
rec3 = Model.create({'name': 'Rec3', 'x_sel': 'baz'})
self.assertEqual(rec1.x_sel, 'foo')
self.assertEqual(rec2.x_sel, 'bar')
self.assertEqual(rec3.x_sel, 'baz')
# remove selection value 'foo'
field.selection_ids[0].unlink()
x_sel = Model._fields['x_sel']
self.assertEqual(x_sel.type, 'selection')
self.assertEqual(x_sel.selection, [('bar', 'Bar'), ('baz', 'Baz')])
self.assertEqual(rec1.x_sel, False)
self.assertEqual(rec2.x_sel, 'bar')
self.assertEqual(rec3.x_sel, 'baz')
# update selection value 'bar'
field.selection_ids[0].value = 'quux'
x_sel = Model._fields['x_sel']
self.assertEqual(x_sel.type, 'selection')
self.assertEqual(x_sel.selection, [('quux', 'Bar'), ('baz', 'Baz')])
self.assertEqual(rec1.x_sel, False)
self.assertEqual(rec2.x_sel, 'quux')
self.assertEqual(rec3.x_sel, 'baz')
| 42.21134 | 24,567 |
28,561 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
import email.policy
import email.message
import re
import threading
from odoo.addons.base.models.ir_mail_server import extract_rfc2822_addresses
from odoo.tests.common import BaseCase, TransactionCase
from odoo.tools import (
is_html_empty, html_sanitize, append_content_to_html, plaintext2html,
email_split, email_domain_normalize,
misc, formataddr,
prepend_html_content,
)
from . import test_mail_examples
class TestSanitizer(BaseCase):
""" Test the html sanitizer that filters html to remove unwanted attributes """
def test_basic_sanitizer(self):
cases = [
("yop", "<p>yop</p>"), # simple
("lala<p>yop</p>xxx", "<p>lala</p><p>yop</p>xxx"), # trailing text
("Merci à l'intérêt pour notre produit.nous vous contacterons bientôt. Merci",
u"<p>Merci à l'intérêt pour notre produit.nous vous contacterons bientôt. Merci</p>"), # unicode
]
for content, expected in cases:
html = html_sanitize(content)
self.assertEqual(html, expected, 'html_sanitize is broken')
def test_evil_malicious_code(self):
# taken from https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#Tests
cases = [
("<IMG SRC=javascript:alert('XSS')>"), # no quotes and semicolons
("<IMG SRC=javascript:alert('XSS')>"), # UTF-8 Unicode encoding
("<IMG SRC=javascript:alert('XSS')>"), # hex encoding
("<IMG SRC=\"jav
ascript:alert('XSS');\">"), # embedded carriage return
("<IMG SRC=\"jav
ascript:alert('XSS');\">"), # embedded newline
("<IMG SRC=\"jav ascript:alert('XSS');\">"), # embedded tab
("<IMG SRC=\"jav	ascript:alert('XSS');\">"), # embedded encoded tab
("<IMG SRC=\"  javascript:alert('XSS');\">"), # spaces and meta-characters
("<IMG SRC=\"javascript:alert('XSS')\""), # half-open html
("<IMG \"\"\"><SCRIPT>alert(\"XSS\")</SCRIPT>\">"), # malformed tag
("<SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT>"), # non-alpha-non-digits
("<SCRIPT/SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT>"), # non-alpha-non-digits
("<<SCRIPT>alert(\"XSS\");//<</SCRIPT>"), # extraneous open brackets
("<SCRIPT SRC=http://ha.ckers.org/xss.js?< B >"), # non-closing script tags
("<INPUT TYPE=\"IMAGE\" SRC=\"javascript:alert('XSS');\">"), # input image
("<BODY BACKGROUND=\"javascript:alert('XSS')\">"), # body image
("<IMG DYNSRC=\"javascript:alert('XSS')\">"), # img dynsrc
("<IMG LOWSRC=\"javascript:alert('XSS')\">"), # img lowsrc
("<TABLE BACKGROUND=\"javascript:alert('XSS')\">"), # table
("<TABLE><TD BACKGROUND=\"javascript:alert('XSS')\">"), # td
("<DIV STYLE=\"background-image: url(javascript:alert('XSS'))\">"), # div background
("<DIV STYLE=\"background-image:\0075\0072\006C\0028'\006a\0061\0076\0061\0073\0063\0072\0069\0070\0074\003a\0061\006c\0065\0072\0074\0028.1027\0058.1053\0053\0027\0029'\0029\">"), # div background with unicoded exploit
("<DIV STYLE=\"background-image: url(javascript:alert('XSS'))\">"), # div background + extra characters
("<IMG SRC='vbscript:msgbox(\"XSS\")'>"), # VBscrip in an image
("<BODY ONLOAD=alert('XSS')>"), # event handler
("<BR SIZE=\"&{alert('XSS')}\>"), # & javascript includes
("<LINK REL=\"stylesheet\" HREF=\"javascript:alert('XSS');\">"), # style sheet
("<LINK REL=\"stylesheet\" HREF=\"http://ha.ckers.org/xss.css\">"), # remote style sheet
("<STYLE>@import'http://ha.ckers.org/xss.css';</STYLE>"), # remote style sheet 2
("<META HTTP-EQUIV=\"Link\" Content=\"<http://ha.ckers.org/xss.css>; REL=stylesheet\">"), # remote style sheet 3
("<STYLE>BODY{-moz-binding:url(\"http://ha.ckers.org/xssmoz.xml#xss\")}</STYLE>"), # remote style sheet 4
("<IMG STYLE=\"xss:expr/*XSS*/ession(alert('XSS'))\">"), # style attribute using a comment to break up expression
]
for content in cases:
html = html_sanitize(content)
self.assertNotIn('javascript', html, 'html_sanitize did not remove a malicious javascript')
self.assertTrue('ha.ckers.org' not in html or 'http://ha.ckers.org/xss.css' in html, 'html_sanitize did not remove a malicious code in %s (%s)' % (content, html))
content = "<!--[if gte IE 4]><SCRIPT>alert('XSS');</SCRIPT><![endif]-->" # down-level hidden block
self.assertEqual(html_sanitize(content, silent=False), '')
def test_html(self):
sanitized_html = html_sanitize(test_mail_examples.MISC_HTML_SOURCE)
for tag in ['<div', '<b', '<i', '<u', '<strike', '<li', '<blockquote', '<a href']:
self.assertIn(tag, sanitized_html, 'html_sanitize stripped too much of original html')
for attr in ['javascript']:
self.assertNotIn(attr, sanitized_html, 'html_sanitize did not remove enough unwanted attributes')
def test_sanitize_unescape_emails(self):
not_emails = [
'<blockquote cite="mid:CAEJSRZvWvud8c6Qp=wfNG6O1+wK3i_jb33qVrF7XyrgPNjnyUA@mail.gmail.com" type="cite">cat</blockquote>',
'<img alt="@github-login" class="avatar" src="/web/image/pi" height="36" width="36">']
for not_email in not_emails:
sanitized = html_sanitize(not_email)
left_part = not_email.split('>')[0] # take only left part, as the sanitizer could add data information on node
self.assertNotIn(misc.html_escape(not_email), sanitized, 'html_sanitize stripped emails of original html')
self.assertIn(left_part, sanitized)
def test_style_parsing(self):
test_data = [
(
'<span style="position: fixed; top: 0px; left: 50px; width: 40%; height: 50%; background-color: red;">Coin coin </span>',
['background-color:red', 'Coin coin'],
['position', 'top', 'left']
), (
"""<div style='before: "Email Address; coincoin cheval: lapin";
font-size: 30px; max-width: 100%; after: "Not sure
this; means: anything ?#ùµ"
; some-property: 2px; top: 3'>youplaboum</div>""",
['font-size:30px', 'youplaboum'],
['some-property', 'top', 'cheval']
), (
'<span style="width">Coincoin</span>',
[],
['width']
)
]
for test, in_lst, out_lst in test_data:
new_html = html_sanitize(test, sanitize_attributes=False, sanitize_style=True, strip_style=False, strip_classes=False)
for text in in_lst:
self.assertIn(text, new_html)
for text in out_lst:
self.assertNotIn(text, new_html)
# style should not be sanitized if removed
new_html = html_sanitize(test_data[0][0], sanitize_attributes=False, strip_style=True, strip_classes=False)
self.assertEqual(new_html, u'<span>Coin coin </span>')
def test_style_class(self):
html = html_sanitize(test_mail_examples.REMOVE_CLASS, sanitize_attributes=True, sanitize_style=True, strip_classes=True)
for ext in test_mail_examples.REMOVE_CLASS_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.REMOVE_CLASS_OUT:
self.assertNotIn(ext, html,)
def test_style_class_only(self):
html = html_sanitize(test_mail_examples.REMOVE_CLASS, sanitize_attributes=False, sanitize_style=True, strip_classes=True)
for ext in test_mail_examples.REMOVE_CLASS_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.REMOVE_CLASS_OUT:
self.assertNotIn(ext, html,)
def test_edi_source(self):
html = html_sanitize(test_mail_examples.EDI_LIKE_HTML_SOURCE)
self.assertIn(
'font-family: \'Lucida Grande\', Ubuntu, Arial, Verdana, sans-serif;', html,
'html_sanitize removed valid styling')
self.assertIn(
'src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"', html,
'html_sanitize removed valid img')
self.assertNotIn('</body></html>', html, 'html_sanitize did not remove extra closing tags')
def test_quote_blockquote(self):
html = html_sanitize(test_mail_examples.QUOTE_BLOCKQUOTE)
for ext in test_mail_examples.QUOTE_BLOCKQUOTE_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_BLOCKQUOTE_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s' % misc.html_escape(ext), html)
def test_quote_thunderbird(self):
html = html_sanitize(test_mail_examples.QUOTE_THUNDERBIRD_1)
for ext in test_mail_examples.QUOTE_THUNDERBIRD_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_THUNDERBIRD_1_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
def test_quote_hotmail_html(self):
html = html_sanitize(test_mail_examples.QUOTE_HOTMAIL_HTML)
for ext in test_mail_examples.QUOTE_HOTMAIL_HTML_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_HOTMAIL_HTML_OUT:
self.assertIn(ext, html)
html = html_sanitize(test_mail_examples.HOTMAIL_1)
for ext in test_mail_examples.HOTMAIL_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.HOTMAIL_1_OUT:
self.assertIn(ext, html)
def test_quote_outlook_html(self):
html = html_sanitize(test_mail_examples.QUOTE_OUTLOOK_HTML)
for ext in test_mail_examples.QUOTE_OUTLOOK_HTML_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_OUTLOOK_HTML_OUT:
self.assertIn(ext, html)
def test_quote_thunderbird_html(self):
html = html_sanitize(test_mail_examples.QUOTE_THUNDERBIRD_HTML)
for ext in test_mail_examples.QUOTE_THUNDERBIRD_HTML_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_THUNDERBIRD_HTML_OUT:
self.assertIn(ext, html)
def test_quote_yahoo_html(self):
html = html_sanitize(test_mail_examples.QUOTE_YAHOO_HTML)
for ext in test_mail_examples.QUOTE_YAHOO_HTML_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_YAHOO_HTML_OUT:
self.assertIn(ext, html)
def test_quote_basic_text(self):
test_data = [
(
"""This is Sparta!\n--\nAdministrator\n+9988776655""",
['This is Sparta!'],
['\n--\nAdministrator\n+9988776655']
), (
"""<p>This is Sparta!\n--\nAdministrator</p>""",
[],
['\n--\nAdministrator']
), (
"""<p>This is Sparta!<br/>--<br>Administrator</p>""",
['This is Sparta!'],
[]
), (
"""This is Sparta!\n>Ah bon ?\nCertes\n> Chouette !\nClair""",
['This is Sparta!', 'Certes', 'Clair'],
['\n>Ah bon ?', '\n> Chouette !']
)
]
for test, in_lst, out_lst in test_data:
new_html = html_sanitize(test)
for text in in_lst:
self.assertIn(text, new_html)
for text in out_lst:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(text), new_html)
def test_quote_signature(self):
test_data = [
(
"""<div>Hello<pre>--<br />Administrator</pre></div>""",
["<pre data-o-mail-quote=\"1\">--", "<br data-o-mail-quote=\"1\">"],
)
]
for test, in_lst in test_data:
new_html = html_sanitize(test)
for text in in_lst:
self.assertIn(text, new_html)
def test_quote_gmail(self):
html = html_sanitize(test_mail_examples.GMAIL_1)
for ext in test_mail_examples.GMAIL_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.GMAIL_1_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
def test_quote_text(self):
html = html_sanitize(test_mail_examples.TEXT_1)
for ext in test_mail_examples.TEXT_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.TEXT_1_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
html = html_sanitize(test_mail_examples.TEXT_2)
for ext in test_mail_examples.TEXT_2_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.TEXT_2_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
def test_quote_bugs(self):
html = html_sanitize(test_mail_examples.BUG1)
for ext in test_mail_examples.BUG_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.BUG_1_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
def test_misc(self):
# False / void should not crash
html = html_sanitize('')
self.assertEqual(html, '')
html = html_sanitize(False)
self.assertEqual(html, False)
# Message with xml and doctype tags don't crash
html = html_sanitize(u'<?xml version="1.0" encoding="iso-8859-1"?>\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\n "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">\n <head>\n <title>404 - Not Found</title>\n </head>\n <body>\n <h1>404 - Not Found</h1>\n </body>\n</html>\n')
self.assertNotIn('encoding', html)
self.assertNotIn('<title>404 - Not Found</title>', html)
self.assertIn('<h1>404 - Not Found</h1>', html)
def test_cid_with_at(self):
img_tag = '<img src="@">'
sanitized = html_sanitize(img_tag, sanitize_tags=False, strip_classes=True)
self.assertEqual(img_tag, sanitized, "img with can have cid containing @ and shouldn't be escaped")
# ms office is currently not supported, have to find a way to support it
# def test_30_email_msoffice(self):
# new_html = html_sanitize(test_mail_examples.MSOFFICE_1, remove=True)
# for ext in test_mail_examples.MSOFFICE_1_IN:
# self.assertIn(ext, new_html)
# for ext in test_mail_examples.MSOFFICE_1_OUT:
# self.assertNotIn(ext, new_html)
class TestHtmlTools(BaseCase):
""" Test some of our generic utility functions about html """
def test_plaintext2html(self):
cases = [
("First \nSecond \nThird\n \nParagraph\n\r--\nSignature paragraph", 'div',
"<div><p>First <br/>Second <br/>Third</p><p>Paragraph</p><p>--<br/>Signature paragraph</p></div>"),
("First<p>It should be escaped</p>\nSignature", False,
"<p>First<p>It should be escaped</p><br/>Signature</p>")
]
for content, container_tag, expected in cases:
html = plaintext2html(content, container_tag)
self.assertEqual(html, expected, 'plaintext2html is broken')
def test_append_to_html(self):
test_samples = [
('<!DOCTYPE...><HTML encoding="blah">some <b>content</b></HtMl>', '--\nYours truly', True, True, False,
'<!DOCTYPE...><html encoding="blah">some <b>content</b>\n<pre>--\nYours truly</pre>\n</html>'),
('<!DOCTYPE...><HTML encoding="blah">some <b>content</b></HtMl>', '--\nYours truly', True, False, False,
'<!DOCTYPE...><html encoding="blah">some <b>content</b>\n<p>--<br/>Yours truly</p>\n</html>'),
('<html><body>some <b>content</b></body></html>', '--\nYours & <truly>', True, True, False,
'<html><body>some <b>content</b>\n<pre>--\nYours & <truly></pre>\n</body></html>'),
('<html><body>some <b>content</b></body></html>', '<!DOCTYPE...>\n<html><body>\n<p>--</p>\n<p>Yours truly</p>\n</body>\n</html>', False, False, False,
'<html><body>some <b>content</b>\n\n\n<p>--</p>\n<p>Yours truly</p>\n\n\n</body></html>'),
]
for html, content, plaintext_flag, preserve_flag, container_tag, expected in test_samples:
self.assertEqual(append_content_to_html(html, content, plaintext_flag, preserve_flag, container_tag), expected, 'append_content_to_html is broken')
def test_is_html_empty(self):
void_strings_samples = ['', False, ' ']
for content in void_strings_samples:
self.assertTrue(is_html_empty(content))
void_html_samples = [
'<p><br></p>', '<p><br> </p>', '<p><br /></p >',
'<p style="margin: 4px"></p>',
'<div style="margin: 4px"></div>',
'<p class="oe_testing"><br></p>',
'<p><span style="font-weight: bolder;"><font style="color: rgb(255, 0, 0);" class=" "></font></span><br></p>',
]
for content in void_html_samples:
self.assertTrue(is_html_empty(content), 'Failed with %s' % content)
valid_html_samples = [
'<p><br>1</p>', '<p>1<br > </p>', '<p style="margin: 4px">Hello World</p>',
'<div style="margin: 4px"><p>Hello World</p></div>',
'<p><span style="font-weight: bolder;"><font style="color: rgb(255, 0, 0);" class=" ">W</font></span><br></p>',
]
for content in valid_html_samples:
self.assertFalse(is_html_empty(content))
def test_prepend_html_content(self):
body = """
<html>
<body>
<div>test</div>
</body>
</html>
"""
content = "<span>content</span>"
result = prepend_html_content(body, content)
result = re.sub(r'[\s\t]', '', result)
self.assertEqual(result, "<html><body><span>content</span><div>test</div></body></html>")
body = "<div>test</div>"
content = "<span>content</span>"
result = prepend_html_content(body, content)
result = re.sub(r'[\s\t]', '', result)
self.assertEqual(result, "<span>content</span><div>test</div>")
body = """
<body>
<div>test</div>
</body>
"""
result = prepend_html_content(body, content)
result = re.sub(r'[\s\t]', '', result)
self.assertEqual(result, "<body><span>content</span><div>test</div></body>")
body = """
<html>
<body>
<div>test</div>
</body>
</html>
"""
content = """
<html>
<body>
<div>test</div>
</body>
</html>
"""
result = prepend_html_content(body, content)
result = re.sub(r'[\s\t]', '', result)
self.assertEqual(result, "<html><body><div>test</div><div>test</div></body></html>")
class TestEmailTools(BaseCase):
""" Test some of our generic utility functions for emails """
def test_email_split(self):
cases = [
("John <[email protected]>", ['[email protected]']), # regular form
("d@x; 1@2", ['d@x', '1@2']), # semi-colon + extra space
("'(ss)' <[email protected]>, 'foo' <foo@bar>", ['[email protected]', 'foo@bar']), # comma + single-quoting
('"[email protected]"<[email protected]>', ['[email protected]']), # double-quoting
('"<jg>" <[email protected]>', ['[email protected]']), # double-quoting with brackets
]
for text, expected in cases:
self.assertEqual(email_split(text), expected, 'email_split is broken')
def test_email_formataddr(self):
email = '[email protected]'
email_idna = 'joe@examplé.com'
cases = [
# (name, address), charsets expected
(('', email), ['ascii', 'utf-8'], '[email protected]'),
(('joe', email), ['ascii', 'utf-8'], '"joe" <[email protected]>'),
(('joe doe', email), ['ascii', 'utf-8'], '"joe doe" <[email protected]>'),
(('joe"doe', email), ['ascii', 'utf-8'], '"joe\\"doe" <[email protected]>'),
(('joé', email), ['ascii'], '=?utf-8?b?am/DqQ==?= <[email protected]>'),
(('joé', email), ['utf-8'], '"joé" <[email protected]>'),
(('', email_idna), ['ascii'], '[email protected]'),
(('', email_idna), ['utf-8'], 'joe@examplé.com'),
(('joé', email_idna), ['ascii'], '=?utf-8?b?am/DqQ==?= <[email protected]>'),
(('joé', email_idna), ['utf-8'], '"joé" <joe@examplé.com>'),
(('', 'joé@example.com'), ['ascii', 'utf-8'], 'joé@example.com'),
]
for pair, charsets, expected in cases:
for charset in charsets:
with self.subTest(pair=pair, charset=charset):
self.assertEqual(formataddr(pair, charset), expected)
def test_extract_rfc2822_addresses(self):
tests = [
('"Admin" <[email protected]>', ['[email protected]']),
('"Admin" <[email protected]>, Demo <[email protected]>', ['[email protected]', '[email protected]']),
('[email protected]', ['[email protected]']),
('"Admin" <[email protected]>, Demo <malformed email>', ['[email protected]']),
('admin@éxample.com', ['[email protected]']),
('"admin@éxample.com" <admin@éxample.com>', ['[email protected]']),
]
for (rfc2822_email, expected) in tests:
self.assertEqual(extract_rfc2822_addresses(rfc2822_email), expected)
def test_email_domain_normalize(self):
self.assertEqual(email_domain_normalize("Test.Com"), "test.com", "Should have normalized the domain")
self.assertEqual(email_domain_normalize("[email protected]"), False, "The domain is not valid, should return False")
self.assertEqual(email_domain_normalize(False), False, "The domain is not valid, should return False")
class EmailConfigCase(TransactionCase):
@patch.dict("odoo.tools.config.options", {"email_from": "[email protected]"})
def test_default_email_from(self, *args):
"""Email from setting is respected."""
# ICP setting is more important
ICP = self.env["ir.config_parameter"].sudo()
ICP.set_param("mail.catchall.domain", "example.org")
ICP.set_param("mail.default.from", "icp")
message = self.env["ir.mail_server"].build_email(
False, "[email protected]", "Subject",
"The body of an email",
)
self.assertEqual(message["From"], "[email protected]")
# Without ICP, the config file/CLI setting is used
ICP.set_param("mail.default.from", False)
message = self.env["ir.mail_server"].build_email(
False, "[email protected]", "Subject",
"The body of an email",
)
self.assertEqual(message["From"], "[email protected]")
class _FakeSMTP:
"""SMTP stub"""
def __init__(self):
self.messages = []
self.from_filter = 'example.com'
# Python 3 before 3.7.4
def sendmail(self, smtp_from, smtp_to_list, message_str,
mail_options=(), rcpt_options=()):
self.messages.append(message_str)
# Python 3.7.4+
def send_message(self, message, smtp_from, smtp_to_list,
mail_options=(), rcpt_options=()):
self.messages.append(message.as_string())
class TestEmailMessage(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._fake_smtp = _FakeSMTP()
def build_email(self, **kwargs):
kwargs.setdefault('email_from', '[email protected]')
kwargs.setdefault('email_to', '[email protected]')
kwargs.setdefault('subject', 'subject')
return self.env['ir.mail_server'].build_email(**kwargs)
def send_email(self, msg):
with patch.object(threading.current_thread(), 'testing', False):
self.env['ir.mail_server'].send_email(msg, smtp_session=self._fake_smtp)
return self._fake_smtp.messages.pop()
def test_bpo_34424_35805(self):
"""Ensure all email sent are bpo-34424 and bpo-35805 free"""
msg = email.message.EmailMessage(policy=email.policy.SMTP)
msg['From'] = '"Joé Doe" <[email protected]>'
msg['To'] = '"Joé Doe" <[email protected]>'
# Message-Id & References fields longer than 77 chars (bpo-35805)
msg['Message-Id'] = '<929227342217024.1596730490.324691772460938-example-30661-some.reference@test-123.example.com>'
msg['References'] = '<345227342212345.1596730777.324691772483620-example-30453-other.reference@test-123.example.com>'
msg_on_the_wire = self.send_email(msg)
self.assertEqual(msg_on_the_wire,
'From: =?utf-8?q?Jo=C3=A9?= Doe <[email protected]>\r\n'
'To: =?utf-8?q?Jo=C3=A9?= Doe <[email protected]>\r\n'
'Message-Id: <929227342217024.1596730490.324691772460938-example-30661-some.reference@test-123.example.com>\r\n'
'References: <345227342212345.1596730777.324691772483620-example-30453-other.reference@test-123.example.com>\r\n'
'\r\n'
)
def test_alternative_correct_order(self):
"""
RFC-1521 7.2.3. The Multipart/alternative subtype
> the alternatives appear in an order of increasing faithfulness
> to the original content. In general, the best choice is the
> LAST part of a type supported by the recipient system's local
> environment.
Also, the MIME-Version header should be present in BOTH the
enveloppe AND the parts
"""
msg = self.build_email(body='<p>Hello world</p>', subtype='html')
msg_on_the_wire = self.send_email(msg)
self.assertGreater(msg_on_the_wire.index('text/html'), msg_on_the_wire.index('text/plain'),
"The html part should be preferred (=appear after) to the text part")
self.assertEqual(msg_on_the_wire.count('==============='), 2 + 2, # +2 for the header and the footer
"There should be 2 parts: one text and one html")
self.assertEqual(msg_on_the_wire.count('MIME-Version: 1.0'), 3,
"There should be 3 headers MIME-Version: one on the enveloppe, "
"one on the html part, one on the text part")
def test_comment_malformed(self):
html = '''<!-- malformed-close --!> <img src='x' onerror='alert(1)'></img> --> comment <!-- normal comment --> --> out of context balise --!>'''
html_result = html_sanitize(html)
self.assertNotIn('alert(1)', html_result)
def test_multiline(self):
payload = """
<div> <!--
multi line comment
--!> </div> <script> alert(1) </script> -->
"""
html_result = html_sanitize(payload)
self.assertNotIn('alert(1)', html_result)
def test_abrupt_close(self):
payload = """<!--> <script> alert(1) </script> -->"""
html_result = html_sanitize(payload)
self.assertNotIn('alert(1)', html_result)
payload = """<!---> <script> alert(1) </script> -->"""
html_result = html_sanitize(payload)
self.assertNotIn('alert(1)', html_result)
def test_abrut_malformed(self):
payload = """<!--!> <script> alert(1) </script> -->"""
html_result = html_sanitize(payload)
self.assertNotIn('alert(1)', html_result)
payload = """<!---!> <script> alert(1) </script> -->"""
html_result = html_sanitize(payload)
self.assertNotIn('alert(1)', html_result)
| 49.113597 | 28,535 |
83,387 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import psycopg2
from odoo.addons.base.tests.common import SavepointCaseWithUserDemo
from odoo.fields import Date
from odoo.models import BaseModel
from odoo.tests.common import TransactionCase
from odoo.tools import mute_logger
from odoo.osv import expression
from odoo import Command
class TestExpression(SavepointCaseWithUserDemo):
@classmethod
def setUpClass(cls):
super(TestExpression, cls).setUpClass()
cls._load_partners_set()
cls.env['res.currency'].with_context({'active_test': False}).search([('name', 'in', ['EUR', 'USD'])]).write({'active': True})
def _search(self, obj, domain, init_domain=[]):
sql = obj.search(domain)
allobj = obj.search(init_domain)
fil = allobj.filtered_domain(domain)
self.assertEqual(sql, fil, "filtered_domain do not match SQL search for domain: "+str(domain))
return sql
def test_00_in_not_in_m2m(self):
# Create 4 partners with no category, or one or two categories (out of two categories).
categories = self.env['res.partner.category']
cat_a = categories.create({'name': 'test_expression_category_A'})
cat_b = categories.create({'name': 'test_expression_category_B'})
partners = self.env['res.partner']
a = partners.create({'name': 'test_expression_partner_A', 'category_id': [Command.set([cat_a.id])]})
b = partners.create({'name': 'test_expression_partner_B', 'category_id': [Command.set([cat_b.id])]})
ab = partners.create({'name': 'test_expression_partner_AB', 'category_id': [Command.set([cat_a.id, cat_b.id])]})
c = partners.create({'name': 'test_expression_partner_C'})
# The tests.
# On a one2many or many2many field, `in` should be read `contains` (and
# `not in` should be read `doesn't contain`.
with_a = self._search(partners, [('category_id', 'in', [cat_a.id])])
self.assertEqual(a + ab, with_a, "Search for category_id in cat_a failed.")
with_b = self._search(partners, [('category_id', 'in', [cat_b.id])])
self.assertEqual(b + ab, with_b, "Search for category_id in cat_b failed.")
# Partners with the category A or the category B.
with_a_or_b = self._search(partners, [('category_id', 'in', [cat_a.id, cat_b.id])])
self.assertEqual(a + b + ab, with_a_or_b, "Search for category_id contains cat_a or cat_b failed.")
# Show that `contains list` is really `contains element or contains element`.
with_a_or_with_b = self._search(partners, ['|', ('category_id', 'in', [cat_a.id]), ('category_id', 'in', [cat_b.id])])
self.assertEqual(a + b + ab, with_a_or_with_b, "Search for category_id contains cat_a or contains cat_b failed.")
# If we change the OR in AND...
with_a_and_b = self._search(partners, [('category_id', 'in', [cat_a.id]), ('category_id', 'in', [cat_b.id])])
self.assertEqual(ab, with_a_and_b, "Search for category_id contains cat_a and cat_b failed.")
# Partners without category A and without category B.
without_a_or_b = self._search(partners, [('category_id', 'not in', [cat_a.id, cat_b.id])])
self.assertFalse(without_a_or_b & (a + b + ab), "Search for category_id doesn't contain cat_a or cat_b failed (1).")
self.assertTrue(c in without_a_or_b, "Search for category_id doesn't contain cat_a or cat_b failed (2).")
# Show that `doesn't contain list` is really `doesn't contain element and doesn't contain element`.
without_a_and_without_b = self._search(partners, [('category_id', 'not in', [cat_a.id]), ('category_id', 'not in', [cat_b.id])])
self.assertFalse(without_a_and_without_b & (a + b + ab), "Search for category_id doesn't contain cat_a and cat_b failed (1).")
self.assertTrue(c in without_a_and_without_b, "Search for category_id doesn't contain cat_a and cat_b failed (2).")
# We can exclude any partner containing the category A.
without_a = self._search(partners, [('category_id', 'not in', [cat_a.id])])
self.assertTrue(a not in without_a, "Search for category_id doesn't contain cat_a failed (1).")
self.assertTrue(ab not in without_a, "Search for category_id doesn't contain cat_a failed (2).")
self.assertLessEqual(b + c, without_a, "Search for category_id doesn't contain cat_a failed (3).")
# (Obviously we can do the same for cateory B.)
without_b = self._search(partners, [('category_id', 'not in', [cat_b.id])])
self.assertTrue(b not in without_b, "Search for category_id doesn't contain cat_b failed (1).")
self.assertTrue(ab not in without_b, "Search for category_id doesn't contain cat_b failed (2).")
self.assertLessEqual(a + c, without_b, "Search for category_id doesn't contain cat_b failed (3).")
def test_05_not_str_m2m(self):
partners = self.env['res.partner']
categories = self.env['res.partner.category']
cids = {}
for name in 'A B AB'.split():
cids[name] = categories.create({'name': name}).id
partners_config = {
'0': [],
'a': [cids['A']],
'b': [cids['B']],
'ab': [cids['AB']],
'a b': [cids['A'], cids['B']],
'b ab': [cids['B'], cids['AB']],
}
pids = {}
for name, cat_ids in partners_config.items():
pids[name] = partners.create({'name': name, 'category_id': [Command.set(cat_ids)]}).id
base_domain = [('id', 'in', list(pids.values()))]
def test(op, value, expected):
found_ids = self._search(partners, base_domain + [('category_id', op, value)]).ids
expected_ids = [pids[name] for name in expected]
self.assertItemsEqual(found_ids, expected_ids, '%s %r should return %r' % (op, value, expected))
test('=', 'A', ['a', 'a b'])
test('!=', 'B', ['0', 'a', 'ab'])
test('like', 'A', ['a', 'ab', 'a b', 'b ab'])
test('not ilike', 'B', ['0', 'a'])
test('not like', 'AB', ['0', 'a', 'b', 'a b'])
def test_09_hierarchy_filtered_domain(self):
Partner = self.env['res.partner']
p = Partner.create({'name': 'dummy'})
# hierarchy without parent
self.assertFalse(p.parent_id)
p2 = self._search(Partner, [('parent_id', 'child_of', p.id)], [('id', '=', p.id)])
self.assertEqual(p2, p)
p3 = self._search(Partner, [('parent_id', 'parent_of', p.id)], [('id', '=', p.id)])
self.assertEqual(p3, p)
def test_10_hierarchy_in_m2m(self):
Partner = self.env['res.partner']
Category = self.env['res.partner.category']
# search through m2m relation
partners = self._search(Partner, [('category_id', 'child_of', self.partner_category.id)])
self.assertTrue(partners)
# setup test partner categories
categ_root = Category.create({'name': 'Root category'})
categ_0 = Category.create({'name': 'Parent category', 'parent_id': categ_root.id})
categ_1 = Category.create({'name': 'Child1', 'parent_id': categ_0.id})
# test hierarchical search in m2m with child id (list of ids)
cats = self._search(Category, [('id', 'child_of', categ_root.ids)])
self.assertEqual(len(cats), 3)
# test hierarchical search in m2m with child id (single id)
cats = self._search(Category, [('id', 'child_of', categ_root.id)])
self.assertEqual(len(cats), 3)
# test hierarchical search in m2m with child ids
cats = self._search(Category, [('id', 'child_of', (categ_0 + categ_1).ids)])
self.assertEqual(len(cats), 2)
# test hierarchical search in m2m with child ids
cats = self._search(Category, [('id', 'child_of', categ_0.ids)])
self.assertEqual(len(cats), 2)
# test hierarchical search in m2m with child ids
cats = self._search(Category, [('id', 'child_of', categ_1.ids)])
self.assertEqual(len(cats), 1)
# test hierarchical search in m2m with an empty list
cats = self._search(Category, [('id', 'child_of', [])])
self.assertEqual(len(cats), 0)
# test hierarchical search in m2m with 'False' value
with self.assertLogs('odoo.osv.expression'):
cats = self._search(Category, [('id', 'child_of', False)])
self.assertEqual(len(cats), 0)
# test hierarchical search in m2m with parent id (list of ids)
cats = self._search(Category, [('id', 'parent_of', categ_1.ids)])
self.assertEqual(len(cats), 3)
# test hierarchical search in m2m with parent id (single id)
cats = self._search(Category, [('id', 'parent_of', categ_1.id)])
self.assertEqual(len(cats), 3)
# test hierarchical search in m2m with parent ids
cats = self._search(Category, [('id', 'parent_of', (categ_root + categ_0).ids)])
self.assertEqual(len(cats), 2)
# test hierarchical search in m2m with parent ids
cats = self._search(Category, [('id', 'parent_of', categ_0.ids)])
self.assertEqual(len(cats), 2)
# test hierarchical search in m2m with parent ids
cats = self._search(Category, [('id', 'parent_of', categ_root.ids)])
self.assertEqual(len(cats), 1)
# test hierarchical search in m2m with an empty list
cats = self._search(Category, [('id', 'parent_of', [])])
self.assertEqual(len(cats), 0)
# test hierarchical search in m2m with 'False' value
with self.assertLogs('odoo.osv.expression'):
cats = self._search(Category, [('id', 'parent_of', False)])
self.assertEqual(len(cats), 0)
@mute_logger('odoo.models.unlink')
def test_10_hierarchy_access(self):
Partner = self.env['res.partner'].with_user(self.user_demo)
top = Partner.create({'name': 'Top'})
med = Partner.create({'name': 'Medium', 'parent_id': top.id})
bot = Partner.create({'name': 'Bottom', 'parent_id': med.id})
# restrict access of user Demo to partners Top and Bottom
accessible = top + bot
self.env['ir.rule'].search([]).unlink()
self.env['ir.rule'].create({
'name': 'partners rule',
'model_id': self.env['ir.model']._get('res.partner').id,
'domain_force': str([('id', 'in', accessible.ids)]),
})
# these searches should return the subset of accessible nodes that are
# in the given hierarchy
self.assertEqual(Partner.search([]), accessible)
self.assertEqual(Partner.search([('id', 'child_of', top.ids)]), accessible)
self.assertEqual(Partner.search([('id', 'parent_of', bot.ids)]), accessible)
# same kind of search from another model
Bank = self.env['res.partner.bank'].with_user(self.user_demo)
bank_top, _bank_med, bank_bot = Bank.create([
{'acc_number': '1', 'partner_id': top.id},
{'acc_number': '2', 'partner_id': med.id},
{'acc_number': '3', 'partner_id': bot.id},
])
self.assertEqual(Bank.search([('partner_id', 'in', accessible.ids)]), bank_top + bank_bot)
self.assertEqual(Bank.search([('partner_id', 'child_of', top.ids)]), bank_top + bank_bot)
self.assertEqual(Bank.search([('partner_id', 'parent_of', bot.ids)]), bank_top + bank_bot)
def test_10_eq_lt_gt_lte_gte(self):
# test if less/greater than or equal operators work
currency = self.env['res.currency'].search([], limit=1)
# test equal
res = self._search(currency, [('rounding', '=', currency.rounding)])
self.assertTrue(currency in res)
# test not equal
res = self._search(currency, [('rounding', '!=', currency.rounding)])
self.assertTrue(currency not in res)
# test greater than
res = self._search(currency, [('rounding', '>', currency.rounding)])
self.assertTrue(currency not in res)
# test greater than or equal
res = self._search(currency, [('rounding', '>=', currency.rounding)])
self.assertTrue(currency in res)
# test less than
res = self._search(currency, [('rounding', '<', currency.rounding)])
self.assertTrue(currency not in res)
# test less than or equal
res = self._search(currency, [('rounding', '<=', currency.rounding)])
self.assertTrue(currency in res)
def test_10_equivalent_id(self):
# equivalent queries
Currency = self.env['res.currency']
non_currency_id = max(Currency.search([]).ids) + 1003
res_0 = self._search(Currency, [])
res_1 = self._search(Currency, [('name', 'not like', 'probably_unexisting_name')])
self.assertEqual(res_0, res_1)
res_2 = self._search(Currency, [('id', 'not in', [non_currency_id])])
self.assertEqual(res_0, res_2)
res_3 = self._search(Currency, [('id', 'not in', [])])
self.assertEqual(res_0, res_3)
res_4 = self._search(Currency, [('id', '!=', False)])
self.assertEqual(res_0, res_4)
# equivalent queries, integer and string
Partner = self.env['res.partner']
all_partners = self._search(Partner, [])
self.assertTrue(len(all_partners) > 1)
one = self.env.ref('base.main_partner')
others = all_partners - one
res_1 = self._search(Partner, [('id', '=', one.id)])
self.assertEqual(one, res_1)
# Partner.search([('id', '!=', others)]) # not permitted
res_2 = self._search(Partner, [('id', 'not in', others.ids)])
self.assertEqual(one, res_2)
res_3 = self._search(Partner, ['!', ('id', '!=', one.id)])
self.assertEqual(one, res_3)
res_4 = self._search(Partner, ['!', ('id', 'in', others.ids)])
self.assertEqual(one, res_4)
# res_5 = Partner.search([('id', 'in', one)]) # TODO make it permitted, just like for child_of
# self.assertEqual(one, res_5)
res_6 = self._search(Partner, [('id', 'in', [one.id])])
self.assertEqual(one, res_6)
res_7 = self._search(Partner, [('name', '=', one.name)])
self.assertEqual(one, res_7)
res_8 = self._search(Partner, [('name', 'in', [one.name])])
# res_9 = Partner.search([('name', 'in', one.name)]) # TODO
def test_15_m2o(self):
Partner = self.env['res.partner']
# testing equality with name
partners = self._search(Partner, [('parent_id', '=', 'Pepper Street')])
self.assertTrue(partners)
# testing the in operator with name
partners = self._search(Partner, [('parent_id', 'in', 'Pepper Street')])
self.assertTrue(partners)
# testing the in operator with a list of names
partners = self._search(Partner, [('parent_id', 'in', ['Pepper Street', 'Inner Works'])])
self.assertTrue(partners)
# check if many2one works with empty search list
partners = self._search(Partner, [('company_id', 'in', [])])
self.assertFalse(partners)
# create new company with partners, and partners with no company
company2 = self.env['res.company'].create({'name': 'Acme 2'})
for i in range(4):
Partner.create({'name': 'P of Acme %s' % i, 'company_id': company2.id})
Partner.create({'name': 'P of All %s' % i, 'company_id': False})
# check if many2one works with negative empty list
all_partners = Partner.search([])
res_partners = self._search(Partner, ['|', ('company_id', 'not in', []), ('company_id', '=', False)])
self.assertEqual(all_partners, res_partners, "not in [] fails")
# check that many2one will pick the correct records with a list
partners = self._search(Partner, [('company_id', 'in', [False])])
self.assertTrue(len(partners) >= 4, "We should have at least 4 partners with no company")
# check that many2one will exclude the correct records with a list
partners = self._search(Partner, [('company_id', 'not in', [1])])
self.assertTrue(len(partners) >= 4, "We should have at least 4 partners not related to company #1")
# check that many2one will exclude the correct records with a list and False
partners = self._search(Partner, ['|', ('company_id', 'not in', [1]),
('company_id', '=', False)])
self.assertTrue(len(partners) >= 8, "We should have at least 8 partners not related to company #1")
# check that multi-level expressions also work
partners = self._search(Partner, [('company_id.partner_id', 'in', [])])
self.assertFalse(partners)
# check multi-level expressions with magic columns
partners = self._search(Partner, [('create_uid.active', '=', True)])
# check that multi-level expressions with negative op work
all_partners = self._search(Partner, [('company_id', '!=', False)])
# FP Note: filtered_domain differs
res_partners = Partner.search([('company_id.partner_id', 'not in', [])])
self.assertEqual(all_partners, res_partners, "not in [] fails")
# Test the '(not) like/in' behavior. res.partner and its parent_id
# column are used because parent_id is a many2one, allowing to test the
# Null value, and there are actually some null and non-null values in
# the demo data.
all_partners = self._search(Partner, [])
non_partner_id = max(all_partners.ids) + 1
with_parent = all_partners.filtered(lambda p: p.parent_id)
without_parent = all_partners.filtered(lambda p: not p.parent_id)
with_website = all_partners.filtered(lambda p: p.website)
# We treat null values differently than in SQL. For instance in SQL:
# SELECT id FROM res_partner WHERE parent_id NOT IN (0)
# will return only the records with non-null parent_id.
# SELECT id FROM res_partner WHERE parent_id IN (0)
# will return expectedly nothing (our ids always begin at 1).
# This means the union of those two results will give only some
# records, but not all present in database.
#
# When using domains and the ORM's search method, we think it is
# more intuitive that the union returns all the records, and that
# a domain like ('parent_id', 'not in', [0]) will return all
# the records. For instance, if you perform a search for the companies
# that don't have OpenERP has a parent company, you expect to find,
# among others, the companies that don't have parent company.
#
# existing values be treated similarly if we simply check that some
# existing value belongs to them.
res_0 = self._search(Partner, [('parent_id', 'not like', 'probably_unexisting_name')]) # get all rows, included null parent_id
self.assertEqual(res_0, all_partners)
res_1 = self._search(Partner, [('parent_id', 'not in', [non_partner_id])]) # get all rows, included null parent_id
self.assertEqual(res_1, all_partners)
res_2 = self._search(Partner, [('parent_id', '!=', False)]) # get rows with not null parent_id, deprecated syntax
self.assertEqual(res_2, with_parent)
res_3 = self._search(Partner, [('parent_id', 'not in', [])]) # get all rows, included null parent_id
self.assertEqual(res_3, all_partners)
res_4 = self._search(Partner, [('parent_id', 'not in', [False])]) # get rows with not null parent_id
self.assertEqual(res_4, with_parent)
res_4b = self._search(Partner, [('parent_id', 'not ilike', '')]) # get only rows without parent
self.assertEqual(res_4b, without_parent)
# The results of these queries, when combined with queries 0..4 must
# give the whole set of ids.
res_5 = self._search(Partner, [('parent_id', 'like', 'probably_unexisting_name')])
self.assertFalse(res_5)
res_6 = self._search(Partner, [('parent_id', 'in', [non_partner_id])])
self.assertFalse(res_6)
res_7 = self._search(Partner, [('parent_id', '=', False)])
self.assertEqual(res_7, without_parent)
res_8 = self._search(Partner, [('parent_id', 'in', [])])
self.assertFalse(res_8)
res_9 = self._search(Partner, [('parent_id', 'in', [False])])
self.assertEqual(res_9, without_parent)
res_9b = self._search(Partner, [('parent_id', 'ilike', '')]) # get those with a parent
self.assertEqual(res_9b, with_parent)
# These queries must return exactly the results than the queries 0..4,
# i.e. not ... in ... must be the same as ... not in ... .
res_10 = self._search(Partner, ['!', ('parent_id', 'like', 'probably_unexisting_name')])
self.assertEqual(res_0, res_10)
res_11 = self._search(Partner, ['!', ('parent_id', 'in', [non_partner_id])])
self.assertEqual(res_1, res_11)
res_12 = self._search(Partner, ['!', ('parent_id', '=', False)])
self.assertEqual(res_2, res_12)
res_13 = self._search(Partner, ['!', ('parent_id', 'in', [])])
self.assertEqual(res_3, res_13)
res_14 = self._search(Partner, ['!', ('parent_id', 'in', [False])])
self.assertEqual(res_4, res_14)
# Testing many2one field is not enough, a regular char field is tested
res_15 = self._search(Partner, [('website', 'in', [])])
self.assertFalse(res_15)
res_16 = self._search(Partner, [('website', 'not in', [])])
self.assertEqual(res_16, all_partners)
res_17 = self._search(Partner, [('website', '!=', False)])
self.assertEqual(res_17, with_website)
# check behavior for required many2one fields: currency_id is required
companies = self.env['res.company'].search([])
res_101 = self._search(companies, [('currency_id', 'not ilike', '')]) # get no companies
self.assertFalse(res_101)
res_102 = self._search(companies, [('currency_id', 'ilike', '')]) # get all companies
self.assertEqual(res_102, companies)
def test_in_operator(self):
""" check that we can use the 'in' operator for plain fields """
menu = self.env['ir.ui.menu']
menus = self._search(menu, [('sequence', 'in', [1, 2, 10, 20])])
self.assertTrue(menus)
def test_in_boolean(self):
""" Check the 'in' operator for boolean fields. """
Partner = self.env['res.partner']
self.assertIn('active', Partner._fields, "I need a model with field 'active'")
count_true = Partner.search_count([('active', '=', True)])
self.assertTrue(count_true, "I need an active partner")
count_false = Partner.search_count([('active', '=', False)])
self.assertTrue(count_false, "I need an inactive partner")
count = Partner.search_count([('active', 'in', [True])])
self.assertEqual(count, count_true)
count = Partner.search_count([('active', 'in', [False])])
self.assertEqual(count, count_false)
count = Partner.search_count([('active', 'in', [True, False])])
self.assertEqual(count, count_true + count_false)
def test_15_o2m(self):
Partner = self.env['res.partner']
# test one2many operator with empty search list
partners = self._search(Partner, [('child_ids', 'in', [])])
self.assertFalse(partners)
# test one2many operator with False
partners = self._search(Partner, [('child_ids', '=', False)])
for partner in partners:
self.assertFalse(partner.child_ids)
# verify domain evaluation for one2many != False and one2many == False
categories = self.env['res.partner.category'].search([])
parents = self._search(categories, [('child_ids', '!=', False)])
self.assertEqual(parents, categories.filtered(lambda c: c.child_ids))
leafs = self._search(categories, [('child_ids', '=', False)])
self.assertEqual(leafs, categories.filtered(lambda c: not c.child_ids))
# test many2many operator with empty search list
partners = self._search(Partner, [('category_id', 'in', [])])
self.assertFalse(partners)
# test many2many operator with False
partners = self._search(Partner, [('category_id', '=', False)])
self.assertTrue(partners)
for partner in partners:
self.assertFalse(partner.category_id)
partners = self._search(Partner, [('category_id', '!=', False)])
self.assertTrue(partners)
for partner in partners:
self.assertTrue(partner.category_id)
# filtering on nonexistent value across x2many should return nothing
partners = self._search(Partner, [('child_ids.city', '=', 'foo')])
self.assertFalse(partners)
def test_15_equivalent_one2many_1(self):
Company = self.env['res.company']
company3 = Company.create({'name': 'Acme 3'})
company4 = Company.create({'name': 'Acme 4', 'parent_id': company3.id})
# one2many towards same model
res_1 = self._search(Company, [('child_ids', 'in', company3.child_ids.ids)]) # any company having a child of company3 as child
self.assertEqual(res_1, company3)
res_2 = self._search(Company, [('child_ids', 'in', company3.child_ids[0].ids)]) # any company having the first child of company3 as child
self.assertEqual(res_2, company3)
# child_of x returns x and its children (direct or not).
expected = company3 + company4
res_1 = self._search(Company, [('id', 'child_of', [company3.id])])
self.assertEqual(res_1, expected)
res_2 = self._search(Company, [('id', 'child_of', company3.id)])
self.assertEqual(res_2, expected)
res_3 = self._search(Company, [('id', 'child_of', [company3.name])])
self.assertEqual(res_3, expected)
res_4 = self._search(Company, [('id', 'child_of', company3.name)])
self.assertEqual(res_4, expected)
# parent_of x returns x and its parents (direct or not).
expected = company3 + company4
res_1 = self._search(Company, [('id', 'parent_of', [company4.id])])
self.assertEqual(res_1, expected)
res_2 = self._search(Company, [('id', 'parent_of', company4.id)])
self.assertEqual(res_2, expected)
res_3 = self._search(Company, [('id', 'parent_of', [company4.name])])
self.assertEqual(res_3, expected)
res_4 = self._search(Company, [('id', 'parent_of', company4.name)])
self.assertEqual(res_4, expected)
# try testing real subsets with IN/NOT IN
Partner = self.env['res.partner']
Users = self.env['res.users']
p1, _ = Partner.name_create("Dédé Boitaclou")
p2, _ = Partner.name_create("Raoulette Pizza O'poil")
u1a = Users.create({'login': 'dbo', 'partner_id': p1}).id
u1b = Users.create({'login': 'dbo2', 'partner_id': p1}).id
u2 = Users.create({'login': 'rpo', 'partner_id': p2}).id
res = self._search(Partner, [('user_ids', 'in', u1a)])
self.assertEqual([p1], res.ids, "o2m IN accept single int on right side")
res = self._search(Partner, [('user_ids', '=', 'Dédé Boitaclou')])
self.assertEqual([p1], res.ids, "o2m NOT IN matches none on the right side")
res = self._search(Partner, [('user_ids', 'in', [10000])])
self.assertEqual([], res.ids, "o2m NOT IN matches none on the right side")
res = self._search(Partner, [('user_ids', 'in', [u1a,u2])])
self.assertEqual([p1,p2], res.ids, "o2m IN matches any on the right side")
all_ids = self._search(Partner, []).ids
res = self._search(Partner, [('user_ids', 'not in', u1a)])
self.assertEqual(set(all_ids) - set([p1]), set(res.ids), "o2m NOT IN matches none on the right side")
res = self._search(Partner, [('user_ids', '!=', 'Dédé Boitaclou')])
self.assertEqual(set(all_ids) - set([p1]), set(res.ids), "o2m NOT IN matches none on the right side")
res = self._search(Partner, [('user_ids', 'not in', [u1b, u2])])
self.assertEqual(set(all_ids) - set([p1,p2]), set(res.ids), "o2m NOT IN matches none on the right side")
def test_15_equivalent_one2many_2(self):
Currency = self.env['res.currency']
CurrencyRate = self.env['res.currency.rate']
CurrencyRate.create([
{
'currency_id': self.env.ref('base.EUR').id,
'name': '2000-01-01',
'rate': 1.0,
}, {
'currency_id': self.env.ref('base.USD').id,
'name': '2000-01-01',
'rate': 1.2834,
}, {
'currency_id': self.env.ref('base.USD').id,
'name': '2000-01-02',
'rate': 1.5289,
}
])
# create a currency and a currency rate
currency = Currency.create({'name': 'ZZZ', 'symbol': 'ZZZ', 'rounding': 1.0})
currency_rate = CurrencyRate.create({'name': '2010-01-01', 'currency_id': currency.id, 'rate': 1.0})
non_currency_id = currency_rate.id + 1000
default_currency = Currency.browse(1)
# search the currency via its rates one2many (the one2many must point back at the currency)
currency_rate1 = self._search(CurrencyRate, [('currency_id', 'not like', 'probably_unexisting_name')])
currency_rate2 = self._search(CurrencyRate, [('id', 'not in', [non_currency_id])])
self.assertEqual(currency_rate1, currency_rate2)
currency_rate3 = self._search(CurrencyRate, [('id', 'not in', [])])
self.assertEqual(currency_rate1, currency_rate3)
# one2many towards another model
res_3 = self._search(Currency, [('rate_ids', 'in', default_currency.rate_ids.ids)]) # currencies having a rate of main currency
self.assertEqual(res_3, default_currency)
res_4 = self._search(Currency, [('rate_ids', 'in', default_currency.rate_ids[0].ids)]) # currencies having first rate of main currency
self.assertEqual(res_4, default_currency)
res_5 = self._search(Currency, [('rate_ids', 'in', default_currency.rate_ids[0].id)]) # currencies having first rate of main currency
self.assertEqual(res_5, default_currency)
# res_6 = Currency.search([('rate_ids', 'in', [default_currency.rate_ids[0].name])])
# res_7 = Currency.search([('rate_ids', '=', default_currency.rate_ids[0].name)])
# res_8 = Currency.search([('rate_ids', 'like', default_currency.rate_ids[0].name)])
res_9 = self._search(Currency, [('rate_ids', 'like', 'probably_unexisting_name')])
self.assertFalse(res_9)
# Currency.search([('rate_ids', 'unexisting_op', 'probably_unexisting_name')]) # TODO expected exception
# get the currencies referenced by some currency rates using a weird negative domain
res_10 = self._search(Currency, [('rate_ids', 'not like', 'probably_unexisting_name')])
res_11 = self._search(Currency, [('rate_ids', 'not in', [non_currency_id])])
self.assertEqual(res_10, res_11)
res_12 = self._search(Currency, [('rate_ids', '!=', False)])
self.assertEqual(res_10, res_12)
res_13 = self._search(Currency, [('rate_ids', 'not in', [])])
self.assertEqual(res_10, res_13)
def test_20_expression_parse(self):
# TDE note: those tests have been added when refactoring the expression.parse() method.
# They come in addition to the already existing tests; maybe some tests
# will be a bit redundant
Users = self.env['res.users']
# Create users
a = Users.create({'name': 'test_A', 'login': 'test_A'})
b1 = Users.create({'name': 'test_B', 'login': 'test_B'})
b2 = Users.create({'name': 'test_B2', 'login': 'test_B2', 'parent_id': b1.partner_id.id})
# Test1: simple inheritance
users = self._search(Users, [('name', 'like', 'test')])
self.assertEqual(users, a + b1 + b2, 'searching through inheritance failed')
users = self._search(Users, [('name', '=', 'test_B')])
self.assertEqual(users, b1, 'searching through inheritance failed')
# Test2: inheritance + relational fields
users = self._search(Users, [('child_ids.name', 'like', 'test_B')])
self.assertEqual(users, b1, 'searching through inheritance failed')
# Special =? operator mean "is equal if right is set, otherwise always True"
users = self._search(Users, [('name', 'like', 'test'), ('parent_id', '=?', False)])
self.assertEqual(users, a + b1 + b2, '(x =? False) failed')
users = self._search(Users, [('name', 'like', 'test'), ('parent_id', '=?', b1.partner_id.id)])
self.assertEqual(users, b2, '(x =? id) failed')
def test_30_normalize_domain(self):
norm_domain = domain = ['&', (1, '=', 1), ('a', '=', 'b')]
self.assertEqual(norm_domain, expression.normalize_domain(domain), "Normalized domains should be left untouched")
domain = [('x', 'in', ['y', 'z']), ('a.v', '=', 'e'), '|', '|', ('a', '=', 'b'), '!', ('c', '>', 'd'), ('e', '!=', 'f'), ('g', '=', 'h')]
norm_domain = ['&', '&', '&'] + domain
self.assertEqual(norm_domain, expression.normalize_domain(domain), "Non-normalized domains should be properly normalized")
def test_35_negating_thruty_leafs(self):
self.assertEqual(expression.distribute_not(['!', '!', expression.TRUE_LEAF]), [expression.TRUE_LEAF], "distribute_not applied wrongly")
self.assertEqual(expression.distribute_not(['!', '!', expression.FALSE_LEAF]), [expression.FALSE_LEAF], "distribute_not applied wrongly")
self.assertEqual(expression.distribute_not(['!', '!', '!', '!', expression.TRUE_LEAF]), [expression.TRUE_LEAF], "distribute_not applied wrongly")
self.assertEqual(expression.distribute_not(['!', '!', '!', '!', expression.FALSE_LEAF]), [expression.FALSE_LEAF], "distribute_not applied wrongly")
self.assertEqual(expression.distribute_not(['!', expression.TRUE_LEAF]), [expression.FALSE_LEAF], "distribute_not applied wrongly")
self.assertEqual(expression.distribute_not(['!', expression.FALSE_LEAF]), [expression.TRUE_LEAF], "distribute_not applied wrongly")
self.assertEqual(expression.distribute_not(['!', '!', '!', expression.TRUE_LEAF]), [expression.FALSE_LEAF], "distribute_not applied wrongly")
self.assertEqual(expression.distribute_not(['!', '!', '!', expression.FALSE_LEAF]), [expression.TRUE_LEAF], "distribute_not applied wrongly")
def test_40_negating_long_expression(self):
source = ['!', '&', ('user_id', '=', 4), ('partner_id', 'in', [1, 2])]
expect = ['|', ('user_id', '!=', 4), ('partner_id', 'not in', [1, 2])]
self.assertEqual(expression.distribute_not(source), expect,
"distribute_not on expression applied wrongly")
pos_leaves = [[('a', 'in', [])], [('d', '!=', 3)]]
neg_leaves = [[('a', 'not in', [])], [('d', '=', 3)]]
source = expression.OR([expression.AND(pos_leaves)] * 1000)
expect = source
self.assertEqual(expression.distribute_not(source), expect,
"distribute_not on long expression without negation operator should not alter it")
source = ['!'] + source
expect = expression.AND([expression.OR(neg_leaves)] * 1000)
self.assertEqual(expression.distribute_not(source), expect,
"distribute_not on long expression applied wrongly")
def test_accent(self):
if not self.registry.has_unaccent:
return
Company = self.env['res.company']
helene = Company.create({'name': u'Hélène'})
self.assertEqual(helene, Company.search([('name','ilike','Helene')]))
self.assertEqual(helene, Company.search([('name','ilike','hélène')]))
self.assertNotIn(helene, Company.search([('name','not ilike','Helene')]))
self.assertNotIn(helene, Company.search([('name','not ilike','hélène')]))
def test_pure_function(self):
orig_false = expression.FALSE_DOMAIN.copy()
orig_true = expression.TRUE_DOMAIN.copy()
false = orig_false.copy()
true = orig_true.copy()
domain = expression.AND([])
domain += [('id', '=', 1)]
domain = expression.AND([])
self.assertEqual(domain, orig_true)
domain = expression.AND([false])
domain += [('id', '=', 1)]
domain = expression.AND([false])
self.assertEqual(domain, orig_false)
domain = expression.OR([])
domain += [('id', '=', 1)]
domain = expression.OR([])
self.assertEqual(domain, orig_false)
domain = expression.OR([true])
domain += [('id', '=', 1)]
domain = expression.OR([true])
self.assertEqual(domain, orig_true)
domain = expression.normalize_domain([])
domain += [('id', '=', 1)]
domain = expression.normalize_domain([])
self.assertEqual(domain, orig_true)
def test_like_wildcards(self):
# check that =like/=ilike expressions are working on an untranslated field
Partner = self.env['res.partner']
partners = self._search(Partner, [('name', '=like', 'I_ner_W_rk_')])
self.assertTrue(all(partner.name == 'Inner Works' for partner in partners), "Must match only 'Inner Works'")
partners = self._search(Partner, [('name', '=ilike', 'G%')])
self.assertTrue(len(partners) >= 1, "Must match one partner (Gemini Furniture)")
# check that =like/=ilike expressions are working on translated field
Country = self.env['res.country']
countries = self._search(Country, [('name', '=like', 'Ind__')])
self.assertTrue(len(countries) == 1, "Must match India only")
countries = self._search(Country, [('name', '=ilike', 'z%')])
self.assertTrue(len(countries) == 2, "Must match only countries with names starting with Z (currently 2)")
def test_translate_search(self):
Country = self.env['res.country']
belgium = self.env.ref('base.be')
domains = [
[('name', '=', 'Belgium')],
[('name', 'ilike', 'Belgi')],
[('name', 'in', ['Belgium', 'Care Bears'])],
]
for domain in domains:
countries = self._search(Country, domain)
self.assertEqual(countries, belgium)
countries = self._search(Country, [('name', 'not in', ['No country'])])
all_countries = self._search(Country, [])
self.assertEqual(countries, all_countries)
@mute_logger('odoo.sql_db')
def test_invalid(self):
""" verify that invalid expressions are refused, even for magic fields """
Country = self.env['res.country']
with self.assertRaises(ValueError):
Country.search([('does_not_exist', '=', 'foo')])
with self.assertRaises(KeyError):
Country.search([]).filtered_domain([('does_not_exist', '=', 'foo')])
with self.assertRaises(ValueError):
Country.search([('create_date', '>>', 'foo')])
with self.assertRaises(ValueError):
Country.search([]).filtered_domain([('create_date', '>>', 'foo')])
with self.assertRaises(psycopg2.DataError):
Country.search([('create_date', '=', "1970-01-01'); --")])
def test_active(self):
# testing for many2many field with category office and active=False
Partner = self.env['res.partner']
vals = {
'name': 'OpenERP Test',
'active': False,
'category_id': [Command.set([self.partner_category.id])],
'child_ids': [Command.create({'name': 'address of OpenERP Test', 'country_id': self.ref("base.be")})],
}
Partner.create(vals)
partner = self._search(Partner, [('category_id', 'ilike', 'sellers'), ('active', '=', False)], [('active', '=', False)])
self.assertTrue(partner, "Record not Found with category sellers and active False.")
# testing for one2many field with country Belgium and active=False
partner = self._search(Partner, [('child_ids.country_id','=','Belgium'),('active','=',False)], [('active', '=', False)])
self.assertTrue(partner, "Record not Found with country Belgium and active False.")
def test_lp1071710(self):
""" Check that we can exclude translated fields (bug lp:1071710) """
# first install french language
self.env['res.lang']._activate_lang('fr_FR')
self.env['res.partner'].search([('name', '=', 'Pepper Street')]).country_id = self.env.ref('base.be')
# actual test
Country = self.env['res.country'].with_context(lang='fr_FR')
be = self.env.ref('base.be')
be.with_context(lang='fr_FR').name = "Belgique"
self.assertNotEqual(be.name, "Belgique", "Setting a translation should not impact other languages")
not_be = self._search(Country, [('name', '!=', 'Belgique')])
self.assertNotIn(be, not_be)
# indirect search via m2o
Partner = self.env['res.partner']
deco_addict = self._search(Partner, [('name', '=', 'Pepper Street')])
not_be = self._search(Partner, [('country_id', '!=', 'Belgium')])
self.assertNotIn(deco_addict, not_be)
Partner = Partner.with_context(lang='fr_FR')
not_be = self._search(Partner, [('country_id', '!=', 'Belgique')])
self.assertNotIn(deco_addict, not_be)
def test_or_with_implicit_and(self):
# Check that when using expression.OR on a list of domains with at least one
# implicit '&' the returned domain is the expected result.
# from #24038
d1 = [('foo', '=', 1), ('bar', '=', 1)]
d2 = ['&', ('foo', '=', 2), ('bar', '=', 2)]
expected = ['|', '&', ('foo', '=', 1), ('bar', '=', 1),
'&', ('foo', '=', 2), ('bar', '=', 2)]
self.assertEqual(expression.OR([d1, d2]), expected)
def test_proper_combine_unit_leaves(self):
# test that unit leaves (TRUE_LEAF, FALSE_LEAF) are properly handled in specific cases
false = expression.FALSE_DOMAIN
true = expression.TRUE_DOMAIN
normal = [('foo', '=', 'bar')]
# OR with single FALSE_LEAF
expr = expression.OR([false])
self.assertEqual(expr, false)
# OR with multiple FALSE_LEAF
expr = expression.OR([false, false])
self.assertEqual(expr, false)
# OR with FALSE_LEAF and a normal leaf
expr = expression.OR([false, normal])
self.assertEqual(expr, normal)
# OR with AND of single TRUE_LEAF and normal leaf
expr = expression.OR([expression.AND([true]), normal])
self.assertEqual(expr, true)
# AND with single TRUE_LEAF
expr = expression.AND([true])
self.assertEqual(expr, true)
# AND with multiple TRUE_LEAF
expr = expression.AND([true, true])
self.assertEqual(expr, true)
# AND with TRUE_LEAF and normal leaves
expr = expression.AND([true, normal])
self.assertEqual(expr, normal)
# AND with OR with single FALSE_LEAF and normal leaf
expr = expression.AND([expression.OR([false]), normal])
self.assertEqual(expr, false)
def test_filtered_domain_order(self):
domain = [('name', 'ilike', 'a')]
countries = self.env['res.country'].search(domain)
self.assertGreater(len(countries), 1)
# same ids, same order
self.assertEqual(countries.filtered_domain(domain)._ids, countries._ids)
# again, trying the other way around
countries = countries.browse(reversed(countries._ids))
self.assertEqual(countries.filtered_domain(domain)._ids, countries._ids)
class TestExpression2(TransactionCase):
def test_long_table_alias(self):
# To test the 64 characters limit for table aliases in PostgreSQL
self.patch(self.registry['res.users'], '_order', 'partner_id')
self.patch(self.registry['res.partner'], '_order', 'commercial_partner_id,company_id,name')
self.env['res.users'].search([('name', '=', 'test')])
class TestAutoJoin(TransactionCase):
def test_auto_join(self):
# Get models
partner_obj = self.env['res.partner']
state_obj = self.env['res.country.state']
bank_obj = self.env['res.partner.bank']
# Get test columns
def patch_auto_join(model, fname, value):
self.patch(model._fields[fname], 'auto_join', value)
def patch_domain(model, fname, value):
self.patch(model._fields[fname], 'domain', value)
# Get country/state data
Country = self.env['res.country']
country_us = Country.search([('code', 'like', 'US')], limit=1)
State = self.env['res.country.state']
states = State.search([('country_id', '=', country_us.id)], limit=2)
# Create demo data: partners and bank object
p_a = partner_obj.create({'name': 'test__A', 'state_id': states[0].id})
p_b = partner_obj.create({'name': 'test__B', 'state_id': states[1].id})
p_c = partner_obj.create({'name': 'test__C', 'state_id': False})
p_aa = partner_obj.create({'name': 'test__AA', 'parent_id': p_a.id, 'state_id': states[0].id})
p_ab = partner_obj.create({'name': 'test__AB', 'parent_id': p_a.id, 'state_id': states[1].id})
p_ba = partner_obj.create({'name': 'test__BA', 'parent_id': p_b.id, 'state_id': states[0].id})
b_aa = bank_obj.create({'acc_number': '123', 'acc_type': 'bank', 'partner_id': p_aa.id})
b_ab = bank_obj.create({'acc_number': '456', 'acc_type': 'bank', 'partner_id': p_ab.id})
b_ba = bank_obj.create({'acc_number': '789', 'acc_type': 'bank', 'partner_id': p_ba.id})
# --------------------------------------------------
# Test1: basics about the attribute
# --------------------------------------------------
patch_auto_join(partner_obj, 'category_id', True)
with self.assertRaises(NotImplementedError):
partner_obj.search([('category_id.name', '=', 'foo')])
# --------------------------------------------------
# Test2: one2many
# --------------------------------------------------
name_test = '12'
# Do: one2many without _auto_join
partners = partner_obj.search([('bank_ids.sanitized_acc_number', 'like', name_test)])
self.assertEqual(partners, p_aa,
"_auto_join off: ('bank_ids.sanitized_acc_number', 'like', '..'): incorrect result")
partners = partner_obj.search(['|', ('name', 'like', 'C'), ('bank_ids.sanitized_acc_number', 'like', name_test)])
self.assertIn(p_aa, partners,
"_auto_join off: '|', ('name', 'like', 'C'), ('bank_ids.sanitized_acc_number', 'like', '..'): incorrect result")
self.assertIn(p_c, partners,
"_auto_join off: '|', ('name', 'like', 'C'), ('bank_ids.sanitized_acc_number', 'like', '..'): incorrect result")
# Do: cascaded one2many without _auto_join
partners = partner_obj.search([('child_ids.bank_ids.id', 'in', [b_aa.id, b_ba.id])])
self.assertEqual(partners, p_a + p_b,
"_auto_join off: ('child_ids.bank_ids.id', 'in', [..]): incorrect result")
# Do: one2many with _auto_join
patch_auto_join(partner_obj, 'bank_ids', True)
partners = partner_obj.search([('bank_ids.sanitized_acc_number', 'like', name_test)])
self.assertEqual(partners, p_aa,
"_auto_join on: ('bank_ids.sanitized_acc_number', 'like', '..') incorrect result")
partners = partner_obj.search(['|', ('name', 'like', 'C'), ('bank_ids.sanitized_acc_number', 'like', name_test)])
self.assertIn(p_aa, partners,
"_auto_join on: '|', ('name', 'like', 'C'), ('bank_ids.sanitized_acc_number', 'like', '..'): incorrect result")
self.assertIn(p_c, partners,
"_auto_join on: '|', ('name', 'like', 'C'), ('bank_ids.sanitized_acc_number', 'like', '..'): incorrect result")
# Do: one2many with _auto_join, test final leaf is an id
bank_ids = [b_aa.id, b_ab.id]
partners = partner_obj.search([('bank_ids.id', 'in', bank_ids)])
self.assertEqual(partners, p_aa + p_ab,
"_auto_join on: ('bank_ids.id', 'in', [..]) incorrect result")
# Do: 2 cascaded one2many with _auto_join, test final leaf is an id
patch_auto_join(partner_obj, 'child_ids', True)
bank_ids = [b_aa.id, b_ba.id]
partners = partner_obj.search([('child_ids.bank_ids.id', 'in', bank_ids)])
self.assertEqual(partners, p_a + p_b,
"_auto_join on: ('child_ids.bank_ids.id', 'not in', [..]): incorrect result")
# --------------------------------------------------
# Test3: many2one
# --------------------------------------------------
name_test = 'US'
# Do: many2one without _auto_join
partners = partner_obj.search([('state_id.country_id.code', 'like', name_test)])
self.assertLessEqual(p_a + p_b + p_aa + p_ab + p_ba, partners,
"_auto_join off: ('state_id.country_id.code', 'like', '..') incorrect result")
partners = partner_obj.search(['|', ('state_id.code', '=', states[0].code), ('name', 'like', 'C')])
self.assertIn(p_a, partners, '_auto_join off: disjunction incorrect result')
self.assertIn(p_c, partners, '_auto_join off: disjunction incorrect result')
# Do: many2one with 1 _auto_join on the first many2one
patch_auto_join(partner_obj, 'state_id', True)
partners = partner_obj.search([('state_id.country_id.code', 'like', name_test)])
self.assertLessEqual(p_a + p_b + p_aa + p_ab + p_ba, partners,
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') incorrect result")
partners = partner_obj.search(['|', ('state_id.code', '=', states[0].code), ('name', 'like', 'C')])
self.assertIn(p_a, partners, '_auto_join: disjunction incorrect result')
self.assertIn(p_c, partners, '_auto_join: disjunction incorrect result')
# Do: many2one with 1 _auto_join on the second many2one
patch_auto_join(partner_obj, 'state_id', False)
patch_auto_join(state_obj, 'country_id', True)
partners = partner_obj.search([('state_id.country_id.code', 'like', name_test)])
self.assertLessEqual(p_a + p_b + p_aa + p_ab + p_ba, partners,
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') incorrect result")
# Do: many2one with 2 _auto_join
patch_auto_join(partner_obj, 'state_id', True)
patch_auto_join(state_obj, 'country_id', True)
partners = partner_obj.search([('state_id.country_id.code', 'like', name_test)])
self.assertLessEqual(p_a + p_b + p_aa + p_ab + p_ba, partners,
"_auto_join on: ('state_id.country_id.code', 'like', '..') incorrect result")
# --------------------------------------------------
# Test4: domain attribute on one2many fields
# --------------------------------------------------
patch_auto_join(partner_obj, 'child_ids', True)
patch_auto_join(partner_obj, 'bank_ids', True)
patch_domain(partner_obj, 'child_ids', lambda self: ['!', ('name', '=', self._name)])
patch_domain(partner_obj, 'bank_ids', [('sanitized_acc_number', 'like', '2')])
# Do: 2 cascaded one2many with _auto_join, test final leaf is an id
partners = partner_obj.search(['&', (1, '=', 1), ('child_ids.bank_ids.id', 'in', [b_aa.id, b_ba.id])])
self.assertLessEqual(p_a, partners,
"_auto_join on one2many with domains incorrect result")
self.assertFalse((p_ab + p_ba) & partners,
"_auto_join on one2many with domains incorrect result")
patch_domain(partner_obj, 'child_ids', lambda self: [('name', '=', '__%s' % self._name)])
partners = partner_obj.search(['&', (1, '=', 1), ('child_ids.bank_ids.id', 'in', [b_aa.id, b_ba.id])])
self.assertFalse(partners,
"_auto_join on one2many with domains incorrect result")
# ----------------------------------------
# Test5: result-based tests
# ----------------------------------------
patch_auto_join(partner_obj, 'bank_ids', False)
patch_auto_join(partner_obj, 'child_ids', False)
patch_auto_join(partner_obj, 'state_id', False)
patch_auto_join(partner_obj, 'parent_id', False)
patch_auto_join(state_obj, 'country_id', False)
patch_domain(partner_obj, 'child_ids', [])
patch_domain(partner_obj, 'bank_ids', [])
# Do: ('child_ids.state_id.country_id.code', 'like', '..') without _auto_join
partners = partner_obj.search([('child_ids.state_id.country_id.code', 'like', name_test)])
self.assertLessEqual(p_a + p_b, partners,
"_auto_join off: ('child_ids.state_id.country_id.code', 'like', '..') incorrect result")
# Do: ('child_ids.state_id.country_id.code', 'like', '..') with _auto_join
patch_auto_join(partner_obj, 'child_ids', True)
patch_auto_join(partner_obj, 'state_id', True)
patch_auto_join(state_obj, 'country_id', True)
partners = partner_obj.search([('child_ids.state_id.country_id.code', 'like', name_test)])
self.assertLessEqual(p_a + p_b, partners,
"_auto_join on: ('child_ids.state_id.country_id.code', 'like', '..') incorrect result")
def test_nullfields(self):
obj1 = self.env['res.bank'].create({'name': 'c0'})
obj2 = self.env['res.bank'].create({'name': 'c1', 'city': 'Ljósálfaheimr'})
obj3 = self.env['res.bank'].create({'name': 'c2', 'city': 'York'})
obj4 = self.env['res.bank'].create({'name': 'c3', 'city': 'Springfield'})
self.assertEqual(
self.env['res.bank'].search([
('id', 'in', (obj1 | obj2 | obj3 | obj4).ids),
('city', '!=', 'York'),
]),
(obj1 | obj2 | obj4),
"Should have returned all banks whose city is not York"
)
self.assertEqual(
self.env['res.bank'].search([
('id', 'in', (obj1 | obj2 | obj3 | obj4).ids),
('city', 'not ilike', 'field'),
]),
(obj1 | obj2 | obj3),
"Should have returned all banks whose city doesn't contain field"
)
class TestQueries(TransactionCase):
def test_logic(self):
Model = self.env['res.partner']
domain = [
'&', ('name', 'like', 'foo'),
'|', ('title', '=', 1), '!', ('ref', '=', '42'),
]
Model.search(domain)
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE (("res_partner"."active" = %s) AND (
("res_partner"."name"::text LIKE %s) AND (
("res_partner"."title" = %s) OR (
("res_partner"."ref" != %s) OR
"res_partner"."ref" IS NULL
)
)
))
ORDER BY "res_partner"."display_name"
''']):
Model.search(domain)
def test_order(self):
Model = self.env['res.partner']
Model.search([('name', 'like', 'foo')])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE (("res_partner"."active" = %s) AND ("res_partner"."name"::text LIKE %s))
ORDER BY "res_partner"."display_name"
''']):
Model.search([('name', 'like', 'foo')])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE (("res_partner"."active" = %s) AND ("res_partner"."name"::text LIKE %s))
ORDER BY "res_partner"."id"
''']):
Model.search([('name', 'like', 'foo')], order='id')
def test_count(self):
Model = self.env['res.partner']
Model.search([('name', 'like', 'foo')])
with self.assertQueries(['''
SELECT count(1)
FROM "res_partner"
WHERE (("res_partner"."active" = %s) AND ("res_partner"."name"::text LIKE %s))
''']):
Model.search_count([('name', 'like', 'foo')])
def test_translated_field(self):
self.env['res.lang']._activate_lang('fr_FR')
Model = self.env['res.partner.title'].with_context(lang='fr_FR')
Model.search([('name', 'ilike', 'foo')])
with self.assertQueries(['''
SELECT "res_partner_title".id
FROM "res_partner_title"
LEFT JOIN "ir_translation" AS "res_partner_title__name" ON
("res_partner_title"."id" = "res_partner_title__name"."res_id"
AND "res_partner_title__name"."type" = 'model'
AND "res_partner_title__name"."name" = %s
AND "res_partner_title__name"."lang" = %s
AND "res_partner_title__name"."value" != %s)
WHERE COALESCE("res_partner_title__name"."value", "res_partner_title"."name") LIKE %s
ORDER BY COALESCE("res_partner_title__name"."value", "res_partner_title"."name")
''']):
Model.search([('name', 'like', 'foo')])
with self.assertQueries(['''
SELECT COUNT(1)
FROM "res_partner_title"
WHERE ("res_partner_title"."id" = %s)
''']):
Model.search_count([('id', '=', 1)])
@mute_logger('odoo.models.unlink')
def test_access_rules(self):
Model = self.env['res.users'].with_user(self.env.ref('base.user_admin'))
self.env['ir.rule'].search([]).unlink()
self.env['ir.rule'].create([{
'name': 'users rule',
'model_id': self.env['ir.model']._get('res.users').id,
'domain_force': str([('id', '=', 1)]),
}, {
'name': 'partners rule',
'model_id': self.env['ir.model']._get('res.partner').id,
'domain_force': str([('id', '=', 1)]),
}])
Model.search([])
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
LEFT JOIN "res_partner" AS "res_users__partner_id" ON
("res_users"."partner_id" = "res_users__partner_id"."id")
WHERE ("res_users"."active" = %s)
AND ("res_users"."id" = %s)
AND ("res_users__partner_id"."id" = %s)
ORDER BY "res_users__partner_id"."name", "res_users"."login"
''']):
Model.search([])
class TestMany2one(TransactionCase):
def setUp(self):
super().setUp()
self.Partner = self.env['res.partner'].with_context(active_test=False)
self.User = self.env['res.users'].with_context(active_test=False)
self.company = self.env['res.company'].browse(1)
def test_inherited(self):
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
LEFT JOIN "res_partner" AS "res_users__partner_id" ON
("res_users"."partner_id" = "res_users__partner_id"."id")
WHERE ("res_users__partner_id"."name"::text LIKE %s)
ORDER BY "res_users__partner_id"."name", "res_users"."login"
''']):
self.User.search([('name', 'like', 'foo')])
# the field supporting the inheritance should be auto_join, too
# TODO: use another model, since 'res.users' has explicit auto_join
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
LEFT JOIN "res_partner" AS "res_users__partner_id" ON
("res_users"."partner_id" = "res_users__partner_id"."id")
WHERE ("res_users__partner_id"."name"::text LIKE %s)
ORDER BY "res_users__partner_id"."name", "res_users"."login"
''']):
self.User.search([('partner_id.name', 'like', 'foo')])
def test_regular(self):
self.Partner.search([('company_id.partner_id.name', 'like', self.company.name)])
self.Partner.search([('country_id.code', 'like', 'BE')])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."company_id" = %s)
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('company_id', '=', self.company.id)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."company_id" IN (
SELECT "res_company".id
FROM "res_company"
WHERE ("res_company"."name"::text like %s)
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('company_id.name', 'like', self.company.name)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."company_id" IN (
SELECT "res_company".id
FROM "res_company"
WHERE ("res_company"."partner_id" IN (
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."name"::text LIKE %s)
))
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('company_id.partner_id.name', 'like', self.company.name)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE (("res_partner"."company_id" IN (
SELECT "res_company".id
FROM "res_company"
WHERE ("res_company"."name"::text LIKE %s)
)) OR ("res_partner"."country_id" IN (
SELECT "res_country".id
FROM "res_country"
WHERE ("res_country"."code"::text LIKE %s)
)))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([
'|',
('company_id.name', 'like', self.company.name),
('country_id.code', 'like', 'BE'),
])
def test_explicit_subquery(self):
self.Partner.search([('company_id.name', 'like', self.company.name)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."company_id" IN (
SELECT "res_company".id
FROM "res_company"
WHERE ("res_company"."name"::text like %s)
))
ORDER BY "res_partner"."display_name"
''']):
company_ids = self.company._search([('name', 'like', self.company.name)], order='id')
self.Partner.search([('company_id', 'in', company_ids)])
# special case, with a LIMIT to make ORDER BY necessary
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."company_id" IN (
SELECT "res_company".id
FROM "res_company"
WHERE ("res_company"."name"::text like %s)
ORDER BY "res_company"."id"
LIMIT 1
))
ORDER BY "res_partner"."display_name"
''']):
company_ids = self.company._search([('name', 'like', self.company.name)], order='id', limit=1)
self.Partner.search([('company_id', 'in', company_ids)])
def test_autojoin(self):
# auto_join on the first many2one
self.patch(self.Partner._fields['company_id'], 'auto_join', True)
self.patch(self.company._fields['partner_id'], 'auto_join', False)
self.Partner.search([('company_id.partner_id.name', 'like', self.company.name)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
LEFT JOIN "res_company" AS "res_partner__company_id" ON
("res_partner"."company_id" = "res_partner__company_id"."id")
WHERE ("res_partner__company_id"."name"::text LIKE %s)
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('company_id.name', 'like', self.company.name)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
LEFT JOIN "res_company" AS "res_partner__company_id" ON
("res_partner"."company_id" = "res_partner__company_id"."id")
WHERE ("res_partner__company_id"."partner_id" IN (
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."name"::text LIKE %s)
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('company_id.partner_id.name', 'like', self.company.name)])
# auto_join on the second many2one
self.patch(self.Partner._fields['company_id'], 'auto_join', False)
self.patch(self.company._fields['partner_id'], 'auto_join', True)
self.Partner.search([('company_id.partner_id.name', 'like', self.company.name)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."company_id" IN (
SELECT "res_company".id
FROM "res_company"
LEFT JOIN "res_partner" AS "res_company__partner_id" ON
("res_company"."partner_id" = "res_company__partner_id"."id")
WHERE ("res_company__partner_id"."name"::text LIKE %s)
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('company_id.partner_id.name', 'like', self.company.name)])
# auto_join on both many2one
self.patch(self.Partner._fields['company_id'], 'auto_join', True)
self.patch(self.company._fields['partner_id'], 'auto_join', True)
self.Partner.search([('company_id.partner_id.name', 'like', self.company.name)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
LEFT JOIN "res_company" AS "res_partner__company_id" ON
("res_partner"."company_id" = "res_partner__company_id"."id")
LEFT JOIN "res_partner" AS "res_partner__company_id__partner_id" ON
("res_partner__company_id"."partner_id" = "res_partner__company_id__partner_id"."id")
WHERE ("res_partner__company_id__partner_id"."name"::text LIKE %s)
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('company_id.partner_id.name', 'like', self.company.name)])
# union with two auto_join
self.patch(self.Partner._fields['company_id'], 'auto_join', True)
self.patch(self.Partner._fields['country_id'], 'auto_join', True)
self.Partner.search([
'|',
('company_id.name', 'like', self.company.name),
('country_id.code', 'like', 'BE'),
])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
LEFT JOIN "res_country" AS "res_partner__country_id" ON
("res_partner"."country_id" = "res_partner__country_id"."id")
LEFT JOIN "res_company" AS "res_partner__company_id" ON
("res_partner"."company_id" = "res_partner__company_id"."id")
WHERE (("res_partner__company_id"."name"::text LIKE %s)
OR ("res_partner__country_id"."code"::text LIKE %s))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([
'|',
('company_id.name', 'like', self.company.name),
('country_id.code', 'like', 'BE'),
])
def test_name_search(self):
self.Partner.search([('company_id', 'like', self.company.name)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."company_id" IN (
SELECT "res_company".id
FROM "res_company"
WHERE ("res_company"."name"::text LIKE %s)
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('company_id', 'like', self.company.name)])
class TestOne2many(TransactionCase):
def setUp(self):
super().setUp()
self.Partner = self.env['res.partner'].with_context(active_test=False)
self.partner = self.Partner.create({
'name': 'Foo',
'bank_ids': [
Command.create({'acc_number': '123', 'acc_type': 'bank'}),
Command.create({'acc_number': '456', 'acc_type': 'bank'}),
Command.create({'acc_number': '789', 'acc_type': 'bank'}),
],
})
def test_regular(self):
self.Partner.search([('bank_ids', 'in', self.partner.bank_ids.ids)])
self.Partner.search([('bank_ids.sanitized_acc_number', 'like', '12')])
self.Partner.search([('child_ids.bank_ids.sanitized_acc_number', 'like', '12')])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "partner_id" FROM "res_partner_bank" WHERE "id" IN %s
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('bank_ids', 'in', self.partner.bank_ids.ids)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "res_partner_bank"."partner_id"
FROM "res_partner_bank"
WHERE ("res_partner_bank"."sanitized_acc_number"::text LIKE %s)
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('bank_ids.sanitized_acc_number', 'like', '12')])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "res_partner"."parent_id"
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "res_partner_bank"."partner_id"
FROM "res_partner_bank"
WHERE ("res_partner_bank"."sanitized_acc_number"::text LIKE %s)
)) AND "res_partner"."parent_id" IS NOT NULL
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('child_ids.bank_ids.sanitized_acc_number', 'like', '12')])
def test_autojoin(self):
self.patch(self.Partner._fields['bank_ids'], 'auto_join', True)
self.patch(self.Partner._fields['child_ids'], 'auto_join', True)
self.Partner.search([('bank_ids', 'in', self.partner.bank_ids.ids)])
self.Partner.search([('bank_ids.sanitized_acc_number', 'like', '12')])
self.Partner.search([('child_ids.bank_ids.sanitized_acc_number', 'like', '12')])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "partner_id" FROM "res_partner_bank" WHERE "id" IN %s
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('bank_ids', 'in', self.partner.bank_ids.ids)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "res_partner_bank"."partner_id"
FROM "res_partner_bank"
WHERE ("res_partner_bank"."sanitized_acc_number"::text LIKE %s)
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('bank_ids.sanitized_acc_number', 'like', '12')])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE (("res_partner"."id" IN (
SELECT "res_partner_bank"."partner_id"
FROM "res_partner_bank"
WHERE ("res_partner_bank"."sanitized_acc_number"::text LIKE %s)
)) AND ("res_partner"."id" IN (
SELECT "res_partner_bank"."partner_id"
FROM "res_partner_bank"
WHERE ("res_partner_bank"."sanitized_acc_number"::text LIKE %s)
)))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([
('bank_ids.sanitized_acc_number', 'like', '12'),
('bank_ids.sanitized_acc_number', 'like', '45'),
])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "res_partner"."parent_id"
FROM "res_partner"
WHERE (("res_partner"."id" IN (
SELECT "res_partner_bank"."partner_id"
FROM "res_partner_bank"
WHERE ("res_partner_bank"."sanitized_acc_number"::text LIKE %s)
)) AND ("res_partner"."active" = %s))
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('child_ids.bank_ids.sanitized_acc_number', 'like', '12')])
# check domains on one2many fields
self.patch(self.Partner._fields['bank_ids'], 'domain',
[('sanitized_acc_number', 'like', '2')])
self.patch(self.Partner._fields['child_ids'], 'domain',
lambda self: ['!', ('name', '=', self._name)])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "res_partner"."parent_id"
FROM "res_partner"
WHERE ((
"res_partner"."id" IN (
SELECT "res_partner_bank"."partner_id"
FROM "res_partner_bank"
WHERE ((
"res_partner_bank"."id" IN (%s,%s,%s)
) AND (
"res_partner_bank"."sanitized_acc_number"::text LIKE %s
))
)
) AND (
("res_partner"."name" != %s) OR "res_partner"."name" IS NULL
))
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('child_ids.bank_ids.id', 'in', self.partner.bank_ids.ids)])
def test_autojoin_mixed(self):
self.patch(self.Partner._fields['child_ids'], 'auto_join', True)
self.patch(self.Partner._fields['state_id'], 'auto_join', True)
self.patch(self.Partner.state_id._fields['country_id'], 'auto_join', True)
self.Partner.search([('child_ids.state_id.country_id.code', 'like', 'US')])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "res_partner"."parent_id"
FROM "res_partner"
LEFT JOIN "res_country_state" AS "res_partner__state_id"
ON ("res_partner"."state_id" = "res_partner__state_id"."id")
LEFT JOIN "res_country" AS "res_partner__state_id__country_id"
ON ("res_partner__state_id"."country_id" = "res_partner__state_id__country_id"."id")
WHERE ((
"res_partner__state_id__country_id"."code"::text LIKE %s
) AND (
"res_partner"."active" = %s
))
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('child_ids.state_id.country_id.code', 'like', 'US')])
def test_name_search(self):
self.Partner.search([('bank_ids', 'like', '12')])
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "res_partner_bank"."partner_id"
FROM "res_partner_bank"
WHERE ("res_partner_bank"."sanitized_acc_number"::text LIKE %s)
))
ORDER BY "res_partner"."display_name"
''']):
self.Partner.search([('bank_ids', 'like', '12')])
def test_empty(self):
self.Partner.search([('bank_ids', '!=', False)], order='id')
self.Partner.search([('bank_ids', '=', False)], order='id')
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" IN (
SELECT "partner_id" FROM "res_partner_bank" WHERE "partner_id" IS NOT NULL
))
ORDER BY "res_partner"."id"
''']):
self.Partner.search([('bank_ids', '!=', False)], order='id')
with self.assertQueries(['''
SELECT "res_partner".id
FROM "res_partner"
WHERE ("res_partner"."id" NOT IN (
SELECT "partner_id" FROM "res_partner_bank" WHERE "partner_id" IS NOT NULL
))
ORDER BY "res_partner"."id"
''']):
self.Partner.search([('bank_ids', '=', False)], order='id')
class TestMany2many(TransactionCase):
def setUp(self):
super().setUp()
self.User = self.env['res.users'].with_context(active_test=False)
self.company = self.env['res.company'].browse(1)
def test_regular(self):
group = self.env.ref('base.group_user')
rule = group.rule_groups[0]
self.User.search([('groups_id', 'in', group.ids)], order='id')
self.User.search([('groups_id.name', 'like', group.name)], order='id')
self.User.search([('groups_id.rule_groups.name', 'like', rule.name)], order='id')
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
WHERE EXISTS (
SELECT 1 FROM "res_groups_users_rel" AS "res_users__groups_id"
WHERE "res_users__groups_id"."uid" = "res_users".id
AND "res_users__groups_id"."gid" IN %s
)
ORDER BY "res_users"."id"
''']):
self.User.search([('groups_id', 'in', group.ids)], order='id')
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
WHERE NOT EXISTS (
SELECT 1 FROM "res_groups_users_rel" AS "res_users__groups_id"
WHERE "res_users__groups_id"."uid" = "res_users".id
AND "res_users__groups_id"."gid" IN %s
)
ORDER BY "res_users"."id"
''']):
self.User.search([('groups_id', 'not in', group.ids)], order='id')
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
WHERE EXISTS (
SELECT 1 FROM "res_groups_users_rel" AS "res_users__groups_id"
WHERE "res_users__groups_id"."uid" = "res_users".id
AND "res_users__groups_id"."gid" IN (
SELECT "res_groups".id
FROM "res_groups"
WHERE ("res_groups"."color" = %s)
)
)
ORDER BY "res_users"."id"
''']):
self.User.search([('groups_id.color', '=', group.color)], order='id')
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
WHERE EXISTS (
SELECT 1 FROM "res_groups_users_rel" AS "res_users__groups_id"
WHERE "res_users__groups_id"."uid" = "res_users".id
AND "res_users__groups_id"."gid" IN (
SELECT "res_groups".id
FROM "res_groups"
WHERE EXISTS (
SELECT 1 FROM "rule_group_rel" AS "res_groups__rule_groups"
WHERE "res_groups__rule_groups"."group_id" = "res_groups".id
AND "res_groups__rule_groups"."rule_group_id" IN (
SELECT "ir_rule".id
FROM "ir_rule"
WHERE ("ir_rule"."name"::text LIKE %s)
)
)
)
)
ORDER BY "res_users"."id"
''']):
self.User.search([('groups_id.rule_groups.name', 'like', rule.name)], order='id')
def test_autojoin(self):
self.patch(self.User._fields['groups_id'], 'auto_join', True)
with self.assertRaises(NotImplementedError):
self.User.search([('groups_id.name', '=', 'foo')])
def test_name_search(self):
self.User.search([('company_ids', 'like', self.company.name)], order='id')
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
WHERE EXISTS (
SELECT 1 FROM "res_company_users_rel" AS "res_users__company_ids"
WHERE "res_users__company_ids"."user_id" = "res_users".id
AND "res_users__company_ids"."cid" IN (
SELECT "res_company".id
FROM "res_company"
WHERE ("res_company"."name"::text LIKE %s)
)
)
ORDER BY "res_users"."id"
''']):
self.User.search([('company_ids', 'like', self.company.name)], order='id')
def test_empty(self):
self.User.search([('groups_id', '!=', False)], order='id')
self.User.search([('groups_id', '=', False)], order='id')
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
WHERE EXISTS (
SELECT 1 FROM "res_groups_users_rel" AS "res_users__groups_id"
WHERE "res_users__groups_id"."uid" = "res_users".id
)
ORDER BY "res_users"."id"
''']):
self.User.search([('groups_id', '!=', False)], order='id')
with self.assertQueries(['''
SELECT "res_users".id
FROM "res_users"
WHERE NOT EXISTS (
SELECT 1 FROM "res_groups_users_rel" AS "res_users__groups_id"
WHERE "res_users__groups_id"."uid" = "res_users".id
)
ORDER BY "res_users"."id"
''']):
self.User.search([('groups_id', '=', False)], order='id')
| 47.587329 | 83,373 |
7,447 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from contextlib import contextmanager
import psycopg2
import psycopg2.errorcodes
import odoo
from odoo.tests import common
from odoo.tests.common import BaseCase
ADMIN_USER_ID = common.ADMIN_USER_ID
@contextmanager
def environment():
""" Return an environment with a new cursor for the current database; the
cursor is committed and closed after the context block.
"""
registry = odoo.registry(common.get_db_name())
with registry.cursor() as cr:
yield odoo.api.Environment(cr, ADMIN_USER_ID, {})
def drop_sequence(code):
with environment() as env:
seq = env['ir.sequence'].search([('code', '=', code)])
seq.unlink()
class TestIrSequenceStandard(BaseCase):
""" A few tests for a 'Standard' (i.e. PostgreSQL) sequence. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
with environment() as env:
seq = env['ir.sequence'].create({
'code': 'test_sequence_type',
'name': 'Test sequence',
})
self.assertTrue(seq)
def test_ir_sequence_search(self):
""" Try a search. """
with environment() as env:
seqs = env['ir.sequence'].search([])
self.assertTrue(seqs)
def test_ir_sequence_draw(self):
""" Try to draw a number. """
with environment() as env:
n = env['ir.sequence'].next_by_code('test_sequence_type')
self.assertTrue(n)
def test_ir_sequence_draw_twice(self):
""" Try to draw a number from two transactions. """
with environment() as env0:
with environment() as env1:
n0 = env0['ir.sequence'].next_by_code('test_sequence_type')
self.assertTrue(n0)
n1 = env1['ir.sequence'].next_by_code('test_sequence_type')
self.assertTrue(n1)
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type')
class TestIrSequenceNoGap(BaseCase):
""" Copy of the previous tests for a 'No gap' sequence. """
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
with environment() as env:
seq = env['ir.sequence'].create({
'code': 'test_sequence_type_2',
'name': 'Test sequence',
'implementation': 'no_gap',
})
self.assertTrue(seq)
def test_ir_sequence_draw_no_gap(self):
""" Try to draw a number. """
with environment() as env:
n = env['ir.sequence'].next_by_code('test_sequence_type_2')
self.assertTrue(n)
def test_ir_sequence_draw_twice_no_gap(self):
""" Try to draw a number from two transactions.
This is expected to not work.
"""
with environment() as env0:
with environment() as env1:
env1.cr._default_log_exceptions = False # Prevent logging a traceback
# NOTE: The error has to be an OperationalError
# s.t. the automatic request retry (service/model.py) works.
with self.assertRaises(psycopg2.OperationalError) as e:
n0 = env0['ir.sequence'].next_by_code('test_sequence_type_2')
self.assertTrue(n0)
n1 = env1['ir.sequence'].next_by_code('test_sequence_type_2')
self.assertEqual(e.exception.pgcode, psycopg2.errorcodes.LOCK_NOT_AVAILABLE, msg="postgresql returned an incorrect errcode")
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_2')
class TestIrSequenceChangeImplementation(BaseCase):
""" Create sequence objects and change their ``implementation`` field. """
def test_ir_sequence_1_create(self):
""" Try to create a sequence object. """
with environment() as env:
seq = env['ir.sequence'].create({
'code': 'test_sequence_type_3',
'name': 'Test sequence',
})
self.assertTrue(seq)
seq = env['ir.sequence'].create({
'code': 'test_sequence_type_4',
'name': 'Test sequence',
'implementation': 'no_gap',
})
self.assertTrue(seq)
def test_ir_sequence_2_write(self):
with environment() as env:
domain = [('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])]
seqs = env['ir.sequence'].search(domain)
seqs.write({'implementation': 'standard'})
seqs.write({'implementation': 'no_gap'})
def test_ir_sequence_3_unlink(self):
with environment() as env:
domain = [('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])]
seqs = env['ir.sequence'].search(domain)
seqs.unlink()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_3')
drop_sequence('test_sequence_type_4')
class TestIrSequenceGenerate(BaseCase):
""" Create sequence objects and generate some values. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
with environment() as env:
seq = env['ir.sequence'].create({
'code': 'test_sequence_type_5',
'name': 'Test sequence',
})
self.assertTrue(seq)
with environment() as env:
for i in range(1, 10):
n = env['ir.sequence'].next_by_code('test_sequence_type_5')
self.assertEqual(n, str(i))
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
with environment() as env:
seq = env['ir.sequence'].create({
'code': 'test_sequence_type_6',
'name': 'Test sequence',
'implementation': 'no_gap',
})
self.assertTrue(seq)
with environment() as env:
for i in range(1, 10):
n = env['ir.sequence'].next_by_code('test_sequence_type_6')
self.assertEqual(n, str(i))
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_5')
drop_sequence('test_sequence_type_6')
class TestIrSequenceInit(common.TransactionCase):
def test_00(self):
""" test whether the read method returns the right number_next value
(from postgreSQL sequence and not ir_sequence value)
"""
# first creation of sequence (normal)
seq = self.env['ir.sequence'].create({
'number_next': 1,
'company_id': 1,
'padding': 4,
'number_increment': 1,
'implementation': 'standard',
'name': 'test-sequence-00',
})
# Call next() 4 times, and check the last returned value
seq.next_by_id()
seq.next_by_id()
seq.next_by_id()
n = seq.next_by_id()
self.assertEqual(n, "0004", 'The actual sequence value must be 4. reading : %s' % n)
# reset sequence to 1 by write()
seq.write({'number_next': 1})
# Read the value of the current sequence
n = seq.next_by_id()
self.assertEqual(n, "0001", 'The actual sequence value must be 1. reading : %s' % n)
| 35.802885 | 7,447 |
11,514 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
from odoo import Command
class test_search(TransactionCase):
def patch_order(self, model, order):
self.patch(self.registry[model], '_order', order)
def test_00_search_order(self):
# Create 6 partners with a given name, and a given creation order to
# ensure the order of their ID. Some are set as inactive to verify they
# are by default excluded from the searches and to provide a second
# `order` argument.
Partner = self.env['res.partner']
c = Partner.create({'name': 'test_search_order_C'})
d = Partner.create({'name': 'test_search_order_D', 'active': False})
a = Partner.create({'name': 'test_search_order_A'})
b = Partner.create({'name': 'test_search_order_B'})
ab = Partner.create({'name': 'test_search_order_AB'})
e = Partner.create({'name': 'test_search_order_E', 'active': False})
# The tests.
# The basic searches should exclude records that have active = False.
# The order of the returned ids should be given by the `order`
# parameter of search().
name_asc = Partner.search([('name', 'like', 'test_search_order%')], order="name asc")
self.assertEqual([a, ab, b, c], list(name_asc), "Search with 'NAME ASC' order failed.")
name_desc = Partner.search([('name', 'like', 'test_search_order%')], order="name desc")
self.assertEqual([c, b, ab, a], list(name_desc), "Search with 'NAME DESC' order failed.")
id_asc = Partner.search([('name', 'like', 'test_search_order%')], order="id asc")
self.assertEqual([c, a, b, ab], list(id_asc), "Search with 'ID ASC' order failed.")
id_desc = Partner.search([('name', 'like', 'test_search_order%')], order="id desc")
self.assertEqual([ab, b, a, c], list(id_desc), "Search with 'ID DESC' order failed.")
# The inactive records shouldn't be excluded as soon as a condition on
# that field is present in the domain. The `order` parameter of
# search() should support any legal coma-separated values.
active_asc_id_asc = Partner.search([('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active asc, id asc")
self.assertEqual([d, e, c, a, b, ab], list(active_asc_id_asc), "Search with 'ACTIVE ASC, ID ASC' order failed.")
active_desc_id_asc = Partner.search([('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active desc, id asc")
self.assertEqual([c, a, b, ab, d, e], list(active_desc_id_asc), "Search with 'ACTIVE DESC, ID ASC' order failed.")
active_asc_id_desc = Partner.search([('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active asc, id desc")
self.assertEqual([e, d, ab, b, a, c], list(active_asc_id_desc), "Search with 'ACTIVE ASC, ID DESC' order failed.")
active_desc_id_desc = Partner.search([('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active desc, id desc")
self.assertEqual([ab, b, a, c, e, d], list(active_desc_id_desc), "Search with 'ACTIVE DESC, ID DESC' order failed.")
id_asc_active_asc = Partner.search([('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id asc, active asc")
self.assertEqual([c, d, a, b, ab, e], list(id_asc_active_asc), "Search with 'ID ASC, ACTIVE ASC' order failed.")
id_asc_active_desc = Partner.search([('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id asc, active desc")
self.assertEqual([c, d, a, b, ab, e], list(id_asc_active_desc), "Search with 'ID ASC, ACTIVE DESC' order failed.")
id_desc_active_asc = Partner.search([('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id desc, active asc")
self.assertEqual([e, ab, b, a, d, c], list(id_desc_active_asc), "Search with 'ID DESC, ACTIVE ASC' order failed.")
id_desc_active_desc = Partner.search([('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id desc, active desc")
self.assertEqual([e, ab, b, a, d, c], list(id_desc_active_desc), "Search with 'ID DESC, ACTIVE DESC' order failed.")
def test_10_inherits_m2order(self):
Users = self.env['res.users']
# Find Employee group
group_employee = self.env.ref('base.group_user')
# Get country/state data
country_be = self.env.ref('base.be')
country_us = self.env.ref('base.us')
states_us = country_us.state_ids[:2]
# Create test users
u = Users.create({'name': '__search', 'login': '__search', 'groups_id': [Command.set([group_employee.id])]})
a = Users.create({'name': '__test_A', 'login': '__test_A', 'country_id': country_be.id, 'state_id': country_be.id})
b = Users.create({'name': '__test_B', 'login': '__a_test_B', 'country_id': country_us.id, 'state_id': states_us[1].id})
c = Users.create({'name': '__test_B', 'login': '__z_test_B', 'country_id': country_us.id, 'state_id': states_us[0].id})
# Search as search user
Users = Users.with_user(u)
# Do: search on res.users, order on a field on res.partner to try inherits'd fields, then res.users
expected_ids = [u.id, a.id, c.id, b.id]
user_ids = Users.search([('id', 'in', expected_ids)], order='name asc, login desc').ids
self.assertEqual(user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one and inherits'd fields
expected_ids = [c.id, b.id, a.id, u.id]
user_ids = Users.search([('id', 'in', expected_ids)], order='state_id asc, country_id desc, name asc, login desc').ids
self.assertEqual(user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one and inherits'd fields
expected_ids = [u.id, b.id, c.id, a.id]
user_ids = Users.search([('id', 'in', expected_ids)], order='country_id desc, state_id desc, name asc, login desc').ids
self.assertEqual(user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one, but not by specifying in order parameter of search, but by overriding _order of res_users
self.patch_order('res.users', 'country_id desc, name asc, login desc')
expected_ids = [u.id, c.id, b.id, a.id]
user_ids = Users.search([('id', 'in', expected_ids)]).ids
self.assertEqual(user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
def test_11_indirect_inherits_m2o_order(self):
Cron = self.env['ir.cron']
Users = self.env['res.users']
user_ids = {}
cron_ids = {}
for u in 'BAC':
user_ids[u] = Users.create({'name': u, 'login': u}).id
cron_ids[u] = Cron.create({'name': u, 'model_id': self.env.ref('base.model_res_partner').id, 'user_id': user_ids[u]}).id
ids = Cron.search([('id', 'in', list(cron_ids.values()))], order='user_id').ids
expected_ids = [cron_ids[l] for l in 'ABC']
self.assertEqual(ids, expected_ids)
def test_12_m2o_order_loop_self(self):
Cats = self.env['ir.module.category']
cat_ids = {}
def create(name, **kw):
cat_ids[name] = Cats.create(dict(kw, name=name)).id
self.patch_order('ir.module.category', 'parent_id desc, name')
create('A')
create('B', parent_id=cat_ids['A'])
create('C', parent_id=cat_ids['A'])
create('D')
create('E', parent_id=cat_ids['D'])
create('F', parent_id=cat_ids['D'])
expected_ids = [cat_ids[x] for x in 'ADEFBC']
found_ids = Cats.search([('id', 'in', list(cat_ids.values()))]).ids
self.assertEqual(found_ids, expected_ids)
def test_13_m2o_order_loop_multi(self):
Users = self.env['res.users']
# will sort by login desc of the creator, then by name
self.patch_order('res.partner', 'create_uid, name')
self.patch_order('res.users', 'partner_id, login desc')
kw = dict(groups_id=[Command.set([self.ref('base.group_system'),
self.ref('base.group_partner_manager')])])
u1 = Users.create(dict(name='Q', login='m', **kw)).id
u2 = Users.with_user(u1).create(dict(name='B', login='f', **kw)).id
u3 = Users.create(dict(name='C', login='c', **kw)).id
u4 = Users.with_user(u2).create(dict(name='D', login='z', **kw)).id
expected_ids = [u2, u4, u3, u1]
found_ids = Users.search([('id', 'in', expected_ids)]).ids
self.assertEqual(found_ids, expected_ids)
def test_20_x_active(self):
"""Check the behaviour of the x_active field."""
# test that a custom field x_active filters like active
# we take the model res.country as a test model as it is included in base and does
# not have an active field
model_country = self.env['res.country']
self.assertNotIn('active', model_country._fields) # just in case someone adds the active field in the model
self.env['ir.model.fields'].create({
'name': 'x_active',
'model_id': self.env.ref('base.model_res_country').id,
'ttype': 'boolean',
})
self.assertEqual('x_active', model_country._active_name)
country_ussr = model_country.create({'name': 'USSR', 'x_active': False})
ussr_search = model_country.search([('name', '=', 'USSR')])
self.assertFalse(ussr_search)
ussr_search = model_country.with_context(active_test=False).search([('name', '=', 'USSR')])
self.assertIn(country_ussr, ussr_search, "Search with active_test on a custom x_active field failed")
ussr_search = model_country.search([('name', '=', 'USSR'), ('x_active', '=', False)])
self.assertIn(country_ussr, ussr_search, "Search with active_test on a custom x_active field failed")
# test that a custom field x_active on a model with the standard active
# field does not interfere with the standard behaviour
# use res.bank since it has an active field and is simple to use
model_bank = self.env['res.bank']
self.env['ir.model.fields'].create({
'name': 'x_active',
'model_id': self.env.ref('base.model_res_bank').id,
'ttype': 'boolean',
})
self.assertEqual('active', model_bank._active_name)
bank_credit_communal = model_bank.create({'name': 'Crédit Communal', 'x_active': False, 'active': True})
cc_search = model_bank.search([('name', '=', 'Crédit Communal')])
self.assertIn(bank_credit_communal, cc_search, "Search for active record with x_active set to False has failed")
bank_credit_communal.write({
'active': False,
'x_active': True,
})
cc_search = model_bank.search([('name', '=', 'Crédit Communal')])
self.assertNotIn(bank_credit_communal, cc_search, "Search for inactive record with x_active set to True has failed")
| 59.335052 | 11,511 |
24,487 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
MISC_HTML_SOURCE = u"""
<font size="2" style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">test1</font>
<div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; font-style: normal; ">
<b>test2</b></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<i>test3</i></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<u>test4</u></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<strike>test5</strike></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">
<font size="5">test6</font></div><div><ul><li><font color="#1f1f1f" face="monospace" size="2">test7</font></li><li>
<font color="#1f1f1f" face="monospace" size="2">test8</font></li></ul><div><ol><li><font color="#1f1f1f" face="monospace" size="2">test9</font>
</li><li><font color="#1f1f1f" face="monospace" size="2">test10</font></li></ol></div></div>
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><div><div><font color="#1f1f1f" face="monospace" size="2">
test11</font></div></div></div></blockquote><blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;">
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><font color="#1f1f1f" face="monospace" size="2">
test12</font></div><div><font color="#1f1f1f" face="monospace" size="2"><br></font></div></blockquote></blockquote>
<font color="#1f1f1f" face="monospace" size="2"><a href="http://google.com">google</a></font>
<a href="javascript:alert('malicious code')">test link</a>
"""
EDI_LIKE_HTML_SOURCE = u"""<div style="font-family: 'Lucida Grande', Ubuntu, Arial, Verdana, sans-serif; font-size: 12px; color: rgb(34, 34, 34); background-color: #FFF; ">
<p>Hello {{ object.partner_id.name }},</p>
<p>A new invoice is available for you: </p>
<p style="border-left: 1px solid #8e0000; margin-left: 30px;">
<strong>REFERENCES</strong><br />
Invoice number: <strong>{{ object.number }}</strong><br />
Invoice total: <strong>{{ object.amount_total }} {{ object.currency_id.name }}</strong><br />
Invoice date: {{ object.invoice_date }}<br />
Order reference: {{ object.origin }}<br />
Your contact: <a href="mailto:{{ object.user_id.email or '' }}?subject=Invoice%20{{ object.number }}">{{ object.user_id.name }}</a>
</p>
<br/>
<p>It is also possible to directly pay with Paypal:</p>
<a style="margin-left: 120px;" href="{{ object.paypal_url }}">
<img class="oe_edi_paypal_button" src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"/>
</a>
<br/>
<p>If you have any question, do not hesitate to contact us.</p>
<p>Thank you for choosing {{ object.company_id.name or 'us' }}!</p>
<br/>
<br/>
<div style="width: 375px; margin: 0px; padding: 0px; background-color: #8E0000; border-top-left-radius: 5px 5px; border-top-right-radius: 5px 5px; background-repeat: repeat no-repeat;">
<h3 style="margin: 0px; padding: 2px 14px; font-size: 12px; color: #DDD;">
<strong style="text-transform:uppercase;">{{ object.company_id.name }}</strong></h3>
</div>
<div style="width: 347px; margin: 0px; padding: 5px 14px; line-height: 16px; background-color: #F2F2F2;">
<span style="color: #222; margin-bottom: 5px; display: block; ">
{{ object.company_id.street }}<br/>
{{ object.company_id.street2 }}<br/>
{{ object.company_id.zip }} {{ object.company_id.city }}<br/>
{{ object.company_id.state_id and ('%s, ' % object.company_id.state_id.name) or '' }} {{ object.company_id.country_id.name or '' }}<br/>
</span>
<div style="margin-top: 0px; margin-right: 0px; margin-bottom: 0px; margin-left: 0px; padding-top: 0px; padding-right: 0px; padding-bottom: 0px; padding-left: 0px; ">
Phone: {{ object.company_id.phone }}
</div>
<div>
Web : <a href="{{ object.company_id.website }}">{{ object.company_id.website }}</a>
</div>
</div>
</div></body></html>"""
# QUOTES
QUOTE_BLOCKQUOTE = u"""<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
</head>
<body text="#000000" bgcolor="#FFFFFF">
<div class="moz-cite-prefix">On 05-01-16 05:52, Andreas Becker
wrote:<br>
</div>
<blockquote
cite="mid:CAEJSRZvWvud8c6Qp=wfNG6O1+wK3i_jb33qVrF7XyrgPNjnyUA@mail.gmail.com"
type="cite"><base href="https://www.odoo.com">
<div dir="ltr">Yep Dominique that is true, as Postgres was the
base of all same as Odoo and MySQL etc came much later.Â
<div><br>
</div>
<div>Unfortunately many customers who ask for and ERP are with
hosters which still don't provide Postgres and MySQL is
available everywhere. Additionally Postgres seems for many
like a big black box while MySQL is very well documented and
understandable and it has PHPmyAdmin which is far ahead of any
tool managing postgres DBs.</div>
<br>
</div>
</blockquote>
<br>
I don't care how much you are highlighting the advantages of Erpnext
on this Odoo mailinglist, but when you start implying that Postgres
is not well documented it really hurts.<br>
<br>
<pre class="moz-signature" cols="72">--
Opener B.V. - Business solutions driven by open source collaboration
Stefan Rijnhart - Consultant/developer
mail: <a class="moz-txt-link-abbreviated" href="mailto:[email protected]">[email protected]</a>
tel: +31 (0) 20 3090 139
web: <a class="moz-txt-link-freetext" href="https://opener.am">https://opener.am</a></pre>
</body>
</html>"""
QUOTE_BLOCKQUOTE_IN = [u"""<blockquote cite="mid:CAEJSRZvWvud8c6Qp=wfNG6O1+wK3i_jb33qVrF7XyrgPNjnyUA@mail.gmail.com" type="cite" data-o-mail-quote-node="1" data-o-mail-quote="1">"""]
QUOTE_BLOCKQUOTE_OUT = [u"""--
Opener B.V. - Business solutions driven by open source collaboration
Stefan Rijnhart - Consultant/developer"""]
QUOTE_THUNDERBIRD_HTML = u"""<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
</head>
<body text="#000000" bgcolor="#FFFFFF">
<div class="moz-cite-prefix">On 01/05/2016 10:24 AM, Raoul
Poilvache wrote:<br>
</div>
<blockquote
cite="mid:CAP76m_WWFH2KVrbjOxbaozvkmbzZYLWJnQ0n0sy9XpGaCWRf1g@mail.gmail.com"
type="cite">
<div dir="ltr"><b><i>Test reply. The suite.</i></b><br clear="all">
<div><br>
</div>
-- <br>
<div class="gmail_signature">Raoul Poilvache</div>
</div>
</blockquote>
Top cool !!!<br>
<br>
<pre class="moz-signature" cols="72">--
Raoul Poilvache
</pre>
</body>
</html>"""
QUOTE_THUNDERBIRD_HTML_IN = [u"""<blockquote cite="mid:CAP76m_WWFH2KVrbjOxbaozvkmbzZYLWJnQ0n0sy9XpGaCWRf1g@mail.gmail.com" type="cite" data-o-mail-quote-node="1" data-o-mail-quote="1">"""]
QUOTE_THUNDERBIRD_HTML_OUT = [u"""<pre class="moz-signature" cols="72"><span data-o-mail-quote="1">--
Raoul Poilvache
</span></pre>"""]
QUOTE_HOTMAIL_HTML = u"""
<html>
<head>
<style><!--
.hmmessage P
{
margin:0px=3B
padding:0px
}
body.hmmessage
{
font-size: 12pt=3B
font-family:Calibri
}
--></style></head>
<body class='hmmessage'>
<div dir='ltr'>I don't like that.<br><br>
<div><hr id="stopSpelling">
Date: Tue=2C 5 Jan 2016 10:24:48 +0100<br>
Subject: Test from gmail<br>
From: [email protected]<br>
To: [email protected] [email protected]<br><br>
<div dir="ltr"><b><i>Test reply. The suite.</i></b>
<br clear="all"><div><br>
</div>-- <br><div class="ecxgmail_signature">
Raoul Poilvache</div>
</div></div></div></body></html>"""
QUOTE_HOTMAIL_HTML_IN = [u"""I don't like that.<br><br>"""]
QUOTE_HOTMAIL_HTML_OUT = [
u"""<hr id="stopSpelling" data-o-mail-quote="1">""",
u"""<div dir="ltr" data-o-mail-quote="1"><b data-o-mail-quote="1"><i data-o-mail-quote="1">Test reply. The suite.</i></b>"""]
QUOTE_OUTLOOK_HTML = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=3Diso-8859-=
1">
<style type="text/css" style="display:none;"> P {margin-top:0;margin-bo=
ttom:0;}
</style>
</head>
<body dir="ltr">
<div id="mail_body">
Reply from outlook
</div>
<div style="font-family: Calibri, Helvetica, sans-serif; font-size: 12pt;=
color: rgb(0, 0, 0);">
<br>
</div>
<div id="testing_id">
<div id="appendonsend"></div>
<div style="font-family:Calibri,Helvetica,sans-serif; font-size:12pt; col=
or:rgb(0,0,0)">
<br>
</div>
<hr tabindex="-1" style="display:inline-block; width:98%">
<div id="divRplyFwdMsg" dir="ltr">
<font face="Calibri, sans-serif" color="#000000" style="font-size:11pt"><b>De :</b> [email protected]<br>
<b>=C0 :</b> [email protected] <[email protected]><br>
<b>Objet :</b> Parent message</font>
<div> </div>
</div>
<div>
<div dir="ltr">Parent email body</div>
</div>
</div>
</body>
</html>
"""
QUOTE_OUTLOOK_HTML_IN = [
"""Reply from outlook""",
"""<div id="mail_body">""",
]
QUOTE_OUTLOOK_HTML_OUT = [
"""<div id="testing_id" data-o-mail-quote-container="1">""",
"""<div id="divRplyFwdMsg" dir="ltr" data-o-mail-quote="1">""",
]
QUOTE_THUNDERBIRD_1 = u"""<div>On 11/08/2012 05:29 PM,
<a href="mailto:[email protected]">[email protected]</a> wrote:<br></div>
<blockquote>
<div>I contact you about our meeting for tomorrow. Here is the
schedule I propose:</div>
<div>
<ul><li>9 AM: brainstorming about our new amazing business
app</span></li></li>
<li>9.45 AM: summary</li>
<li>10 AM: meeting with Fabien to present our app</li>
</ul></div>
<div>Is everything ok for you ?</div>
<div>
<p>--<br>
Administrator</p>
</div>
<div>
<p>Log in our portal at:
<a href="http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH">http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH</a></p>
</div>
</blockquote>
Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.<br><br>
Did you receive my email about my new laptop, by the way ?<br><br>
Raoul.<br><pre>--
Raoul Grosbedonnée
</pre>"""
QUOTE_THUNDERBIRD_1_IN = [
u'<a href="mailto:[email protected]">[email protected]</a> ',
u'<blockquote data-o-mail-quote-node="1" data-o-mail-quote="1">',
u'Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.']
QUOTE_THUNDERBIRD_1_OUT = [u"""--
Raoul Grosbedonnée
"""]
QUOTE_YAHOO_HTML = """
<html>
<head></head>
<body>
<div class="ydpf6e951dcyahoo-style-wrap">
<div></div>
<div dir="ltr" data-setdir="false">Reply from Yahoo</div>
</div>
<div id="yahoo_quoted_8820595126" class="yahoo_quoted">
<div style="font-family:'Helvetica Neue', Helvetica, Arial, sans-serif;font-size:13px;color:#26282a;">
=20
<div>
Bob a dit:
</div>
<div><br></div>
<div><br></div>
<div>
<div id="yiv3215395356">
<div dir="ltr">Parent email body</div>
</div>
</div>
</div>
</div>
</body>
</html>
"""
QUOTE_YAHOO_HTML_IN = [
"""Reply from Yahoo""",
"""<div dir="ltr" data-setdir="false">""",
"""<div class="ydpf6e951dcyahoo-style-wrap">""",
]
QUOTE_YAHOO_HTML_OUT = [
"""<div id="yahoo_quoted_8820595126" class="yahoo_quoted" data-o-mail-quote="1">""",
]
TEXT_1 = u"""I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature"""
TEXT_1_IN = [u"""I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
TEXT_1_OUT = [u"""
--
MySignature"""]
TEXT_2 = u"""Salut Raoul!
Le 28 oct. 2012 à 00:02, Raoul Grosbedon a écrit :
> I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)
Of course. This seems viable.
> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> [email protected]
>>
>
>
> --
> RaoulSignature
--
Bert TARTOPOILS
[email protected]
"""
TEXT_2_IN = [u"Salut Raoul!", "Of course. This seems viable."]
TEXT_2_OUT = [u"""
> I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)""",
"""
> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> [email protected]
>>
>
>
> --
> RaoulSignature"""]
# MISC
GMAIL_1 = u"""Hello,<div><br></div><div>Ok for me. I am replying directly in gmail, without signature.</div><div><br></div><div>Kind regards,</div><div><br></div><div>Demo.<br><br><div>On Thu, Nov 8, 2012 at 5:29 PM, <span><<a href="mailto:[email protected]">[email protected]</a>></span> wrote:<br><blockquote><div>I contact you about our meeting for tomorrow. Here is the schedule I propose:</div><div><ul><li>9 AM: brainstorming about our new amazing business app</span></li></li>
<li>9.45 AM: summary</li><li>10 AM: meeting with Fabien to present our app</li></ul></div><div>Is everything ok for you ?</div>
<div><p>-- <br>Administrator</p></div>
<div><p>Log in our portal at: <a href="http://localhost:8069#action=login&db=mail_1&login=demo">http://localhost:8069#action=login&db=mail_1&login=demo</a></p></div>
</blockquote></div><br></div>"""
GMAIL_1_IN = [u'Ok for me. I am replying directly in gmail, without signature.', '<blockquote data-o-mail-quote-node="1" data-o-mail-quote="1">']
GMAIL_1_OUT = []
HOTMAIL_1 = u"""<div>
<div dir="ltr"><br>
I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly.
<br><br>Kindest regards,<br>xxx<br>
<div>
<div id="SkyDrivePlaceholder">
</div>
<hr id="stopSpelling">
Subject: Re: your OpenERP.com registration<br>From: [email protected]<br>To: [email protected]<br>Date: Wed, 27 Mar 2013 17:12:12 +0000
<br><br>
Hello xxx,
<br>
I noticed you recently created an OpenERP.com account to access OpenERP Apps.
<br>
You indicated that you wish to use OpenERP in your own company.
We would like to know more about your your business needs and requirements, and see how
we can help you. When would you be available to discuss your project ?<br>
Best regards,<br>
<pre>
<a href="http://openerp.com" target="_blank">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</div>
</div>"""
HOTMAIL_1_IN = [u"""<div dir="ltr"><br>
I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly.
<br><br>Kindest regards,<br>xxx<br>"""]
HOTMAIL_1_OUT = [
u"""<hr id="stopSpelling" data-o-mail-quote="1">""",
u"""<pre data-o-mail-quote="1">
<a href="http://openerp.com" target="_blank" data-o-mail-quote="1">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>"""]
MSOFFICE_1 = u"""
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.
We are a company of 25 engineers providing product design services to clients.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I’ll install on a windows server and run a very limited trial to see how it works.
If we adopt OpenERP we will probably move to Linux or look for a hosted SaaS option.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
<br>
I am also evaluating Adempiere and maybe others.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I expect the trial will take 2-3 months as this is not a high priority for us.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Alan
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
From:
</span></b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
OpenERP Enterprise [mailto:[email protected]]
<br><b>Sent:</b> Monday, 11 March, 2013 14:47<br><b>To:</b> Alan Widmer<br><b>Subject:</b> Re: your OpenERP.com registration
</span>
</p>
<p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Alan Widmer, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>
Uou mentioned you wish to use OpenERP in your own company. Please let me more about your
business needs and requirements? When will you be available to discuss about your project?
</p>
<p></p>
<p>Thanks for your interest in OpenERP, </p>
<p></p>
<p>Feel free to contact me if you have any questions, </p>
<p></p>
<p>Looking forward to hear from you soon. </p>
<p></p>
<pre><p> </p></pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre><a href="http://openerp.com">http://openerp.com</a><p></p></pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_1_IN = [u'Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.']
MSOFFICE_1_OUT = [u'I noticed you recently downloaded OpenERP.', 'Uou mentioned you wish to use OpenERP in your own company.', 'Belgium: +32.81.81.37.00']
# ------------------------------------------------------------
# Test cases coming from bugs
# ------------------------------------------------------------
# bug: read more not apparent, strange message in read more span
BUG1 = u"""<pre>Hi Migration Team,
Paragraph 1, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 2, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 3, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Thanks.
Regards,
--
Olivier Laurent
Migration Manager
OpenERP SA
Chaussée de Namur, 40
B-1367 Gérompont
Tel: +32.81.81.37.00
Web: http://www.openerp.com</pre>"""
BUG_1_IN = [
u'Hi Migration Team',
u'Paragraph 1'
]
BUG_1_OUT = [u"""
--
Olivier Laurent
Migration Manager
OpenERP SA
Chaussée de Namur, 40
B-1367 Gérompont
Tel: +32.81.81.37.00
Web: http://www.openerp.com"""]
REMOVE_CLASS = u"""
<div style="FONT-SIZE: 12pt; FONT-FAMILY: 'Times New Roman'; COLOR: #000000">
<div>Hello</div>
<div>I have just installed Odoo 9 and I've got the following error:</div>
<div> </div>
<div class="openerp openerp_webclient_container oe_webclient">
<div class="oe_loading" style="DISPLAY: none"> </div>
</div>
<div class="modal-backdrop in"></div>
<div role="dialog" tabindex="-1" aria-hidden="false" class="modal in" style="DISPLAY: block" data-backdrop="static">
<div class="modal-dialog modal-lg">
<div class="modal-content openerp">
<div class="modal-header">
<h4 class="modal-title">Odoo Error<span class="o_subtitle text-muted"></span></h4>
</div>
<div class="o_error_detail modal-body">
<pre>An error occurred in a modal and I will send you back the html to try opening one on your end</pre>
</div>
</div>
</div>
</div>
</div>
"""
REMOVE_CLASS_IN = [
u'<div style="font-size:12pt; font-family:\'Times New Roman\'; color:#000000">',
u'An error occurred in a modal and I will send you back the html to try opening one on your end']
REMOVE_CLASS_OUT = [
u'<div class="modal-backdrop in">',
u'<div class="modal-content openerp">',
u'<div class="modal-header">']
| 40.59204 | 24,477 |
2,064 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
from odoo.addons.base.tests.common import SavepointCaseWithUserDemo
class TestResPartnerBank(SavepointCaseWithUserDemo):
"""Tests acc_number
"""
def test_sanitized_acc_number(self):
partner_bank_model = self.env['res.partner.bank']
acc_number = " BE-001 2518823 03 "
vals = partner_bank_model.search([('acc_number', '=', acc_number)])
self.assertEqual(0, len(vals))
partner_bank = partner_bank_model.create({
'acc_number': acc_number,
'partner_id': self.env['res.partner'].create({'name': 'Pepper Test'}).id,
'acc_type': 'bank',
})
vals = partner_bank_model.search([('acc_number', '=', acc_number)])
self.assertEqual(1, len(vals))
self.assertEqual(partner_bank, vals[0])
vals = partner_bank_model.search([('acc_number', 'in', [acc_number])])
self.assertEqual(1, len(vals))
self.assertEqual(partner_bank, vals[0])
self.assertEqual(partner_bank.acc_number, acc_number)
# sanitaze the acc_number
sanitized_acc_number = 'BE001251882303'
vals = partner_bank_model.search(
[('acc_number', '=', sanitized_acc_number)])
self.assertEqual(1, len(vals))
self.assertEqual(partner_bank, vals[0])
vals = partner_bank_model.search(
[('acc_number', 'in', [sanitized_acc_number])])
self.assertEqual(1, len(vals))
self.assertEqual(partner_bank, vals[0])
self.assertEqual(partner_bank.sanitized_acc_number,
sanitized_acc_number)
# search is case insensitive
vals = partner_bank_model.search(
[('acc_number', '=', sanitized_acc_number.lower())])
self.assertEqual(1, len(vals))
vals = partner_bank_model.search(
[('acc_number', '=', acc_number.lower())])
self.assertEqual(1, len(vals))
| 40.470588 | 2,064 |
2,711 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from base64 import b64decode
from odoo.tests.common import TransactionCase
class TestAvatarMixin(TransactionCase):
""" tests the avatar mixin """
def setUp(self):
super().setUp()
self.user_without_image = self.env['res.users'].create({
'name': 'Marc Demo',
'email': '[email protected]',
'image_1920': False,
'create_date': '2015-11-12 00:00:00',
'login': 'demo_1',
'password': 'demo_1'
})
self.user_without_image.partner_id.create_date = '2015-11-12 00:00:00'
self.user_without_name = self.env['res.users'].create({
'name': '',
'email': '[email protected]',
'image_1920': False,
'login': 'marc_1',
'password': 'marc_1',
})
self.external_partner = self.env['res.partner'].create({
'name': 'Josh Demo',
'email': '[email protected]',
'image_1920': False
})
def test_partner_has_avatar_even_if_it_has_no_image(self):
self.assertTrue(self.user_without_image.partner_id.avatar_128)
self.assertTrue(self.user_without_image.partner_id.avatar_256)
self.assertTrue(self.user_without_image.partner_id.avatar_512)
self.assertTrue(self.user_without_image.partner_id.avatar_1024)
self.assertTrue(self.user_without_image.partner_id.avatar_1920)
def test_content_of_generated_partner_avatar(self):
expectedAvatar = (
"<?xml version='1.0' encoding='UTF-8' ?>"
"<svg height='180' width='180' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>"
"<rect fill='hsl(184, 40%, 45%)' height='180' width='180'/>"
"<text fill='#ffffff' font-size='96' text-anchor='middle' x='90' y='125' font-family='sans-serif'>M</text>"
"</svg>"
)
self.assertEqual(expectedAvatar, b64decode(self.user_without_image.partner_id.avatar_1920).decode('utf-8'))
def test_partner_without_name_has_default_placeholder_image_as_avatar(self):
self.assertEqual(self.user_without_name.partner_id._avatar_get_placeholder(), self.user_without_name.partner_id.avatar_1920)
def test_external_partner_has_default_placeholder_image_as_avatar(self):
self.assertEqual(self.external_partner._avatar_get_placeholder(), self.external_partner.avatar_1920)
def test_partner_and_user_have_the_same_avatar(self):
self.assertEqual(self.user_without_image.partner_id.avatar_1920, self.user_without_image.avatar_1920)
| 45.183333 | 2,711 |
2,120 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
class test_res_lang(TransactionCase):
def test_00_intersperse(self):
from odoo.addons.base.models.res_lang import intersperse
assert intersperse("", []) == ("", 0)
assert intersperse("0", []) == ("0", 0)
assert intersperse("012", []) == ("012", 0)
assert intersperse("1", []) == ("1", 0)
assert intersperse("12", []) == ("12", 0)
assert intersperse("123", []) == ("123", 0)
assert intersperse("1234", []) == ("1234", 0)
assert intersperse("123456789", []) == ("123456789", 0)
assert intersperse("&ab%#@1", []) == ("&ab%#@1", 0)
assert intersperse("0", []) == ("0", 0)
assert intersperse("0", [1]) == ("0", 0)
assert intersperse("0", [2]) == ("0", 0)
assert intersperse("0", [200]) == ("0", 0)
assert intersperse("12345678", [1], '.') == ('1234567.8', 1)
assert intersperse("12345678", [1], '.') == ('1234567.8', 1)
assert intersperse("12345678", [2], '.') == ('123456.78', 1)
assert intersperse("12345678", [2,1], '.') == ('12345.6.78', 2)
assert intersperse("12345678", [2,0], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [-1,2], '.') == ('12345678', 0)
assert intersperse("12345678", [2,-1], '.') == ('123456.78', 1)
assert intersperse("12345678", [2,0,1], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [2,0,0], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [2,0,-1], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [3,3,3,3], '.') == ('12.345.678', 2)
assert intersperse("abc1234567xy", [2], '.') == ('abc1234567.xy', 1)
assert intersperse("abc1234567xy8", [2], '.') == ('abc1234567x.y8', 1) # ... w.r.t. here.
assert intersperse("abc12", [3], '.') == ('abc12', 0)
assert intersperse("abc12", [2], '.') == ('abc12', 0)
assert intersperse("abc12", [1], '.') == ('abc1.2', 1)
| 50.47619 | 2,120 |
21,079 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import binascii
from PIL import Image, ImageDraw, PngImagePlugin
from odoo import tools
from odoo.exceptions import UserError
from odoo.tests.common import TransactionCase
class TestImage(TransactionCase):
"""Tests for the different image tools helpers."""
def setUp(self):
super(TestImage, self).setUp()
self.bg_color = (135, 90, 123)
self.fill_color = (0, 160, 157)
self.base64_1x1_png = b'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGNgYGAAAAAEAAH2FzhVAAAAAElFTkSuQmCC'
self.base64_svg = base64.b64encode(b'<svg></svg>')
self.base64_1920x1080_jpeg = tools.image_to_base64(Image.new('RGB', (1920, 1080)), 'JPEG')
# The following image contains a tag `Lens Info` with a value of `3.99mm f/1.8`
# This particular tag 0xa432 makes the `exif_transpose` method fail in 5.4.1 < Pillow < 7.2.0
self.base64_exif_jpg = b"""/9j/4AAQSkZJRgABAQAAAQABAAD/4QDQRXhpZgAATU0AKgAAAAgABgESAAMAAAABAAYAAAEaAAUA
AAABAAAAVgEbAAUAAAABAAAAXgEoAAMAAAABAAEAAAITAAMAAAABAAEAAIdpAAQAAAABAAAAZgAA
AAAAAAABAAAAAQAAAAEAAAABAAWQAAAHAAAABDAyMzGRAQAHAAAABAECAwCgAAAHAAAABDAxMDCg
AQADAAAAAf//AACkMgAFAAAABAAAAKgAAAAAAAABjwAAAGQAAAGPAAAAZAAAAAkAAAAFAAAACQAA
AAX/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAx
NDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy
MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAADAAYDASIAAhEBAxEB/8QAHwAAAQUBAQEB
AQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1Fh
ByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZ
WmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXG
x8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAEC
AwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHB
CSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0
dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX
2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigD//2Q=="""
# Draw a red square in the middle of the image, this will be used to
# verify crop is working. The border is going to be `self.bg_color` and
# the middle is going to be `self.fill_color`.
# horizontal image (border is left/right)
image = Image.new('RGB', (1920, 1080), color=self.bg_color)
offset = (image.size[0] - image.size[1]) / 2
draw = ImageDraw.Draw(image)
draw.rectangle(xy=[
(offset, 0),
(image.size[0] - offset, image.size[1])
], fill=self.fill_color)
self.base64_1920x1080_png = tools.image_to_base64(image, 'PNG')
# vertical image (border is top/bottom)
image = Image.new('RGB', (1080, 1920), color=self.bg_color)
offset = (image.size[1] - image.size[0]) / 2
draw = ImageDraw.Draw(image)
draw.rectangle(xy=[
(0, offset),
(image.size[0], image.size[1] - offset)
], fill=self.fill_color)
self.base64_1080x1920_png = tools.image_to_base64(image, 'PNG')
def test_00_base64_to_image(self):
"""Test that base64 is correctly opened as a PIL image."""
image = tools.base64_to_image(self.base64_1x1_png)
self.assertEqual(type(image), PngImagePlugin.PngImageFile, "base64 as bytes, correct format")
self.assertEqual(image.size, (1, 1), "base64 as bytes, correct size")
image = tools.base64_to_image(self.base64_1x1_png.decode('ASCII'))
self.assertEqual(type(image), PngImagePlugin.PngImageFile, "base64 as string, correct format")
self.assertEqual(image.size, (1, 1), "base64 as string, correct size")
with self.assertRaises(UserError, msg="This file could not be decoded as an image file. Please try with a different file."):
image = tools.base64_to_image(b'oazdazpodazdpok')
with self.assertRaises(UserError, msg="This file could not be decoded as an image file. Please try with a different file."):
image = tools.base64_to_image(b'oazdazpodazdpokd')
def test_01_image_to_base64(self):
"""Test that a PIL image is correctly saved as base64."""
image = Image.new('RGB', (1, 1))
image_base64 = tools.image_to_base64(image, 'PNG')
self.assertEqual(image_base64, self.base64_1x1_png)
def test_02_image_fix_orientation(self):
"""Test that the orientation of images is correct."""
# Colors that can be distinguished among themselves even with jpeg loss.
blue = (0, 0, 255)
yellow = (255, 255, 0)
green = (0, 255, 0)
pink = (255, 0, 255)
# Image large enough so jpeg loss is not a huge factor in the corners.
size = 50
expected = (blue, yellow, green, pink)
# They are all supposed to be same image: (blue, yellow, green, pink) in
# that order, but each encoded with a different orientation.
self._orientation_test(1, (blue, yellow, green, pink), size, expected) # top/left
self._orientation_test(2, (yellow, blue, pink, green), size, expected) # top/right
self._orientation_test(3, (pink, green, yellow, blue), size, expected) # bottom/right
self._orientation_test(4, (green, pink, blue, yellow), size, expected) # bottom/left
self._orientation_test(5, (blue, green, yellow, pink), size, expected) # left/top
self._orientation_test(6, (yellow, pink, blue, green), size, expected) # right/top
self._orientation_test(7, (pink, yellow, green, blue), size, expected) # right/bottom
self._orientation_test(8, (green, blue, pink, yellow), size, expected) # left/bottom
def test_03_image_fix_orientation_exif(self):
"""Test that a jpg image with exif orientation tag gets rotated"""
image = tools.base64_to_image(self.base64_exif_jpg)
self.assertEqual(image.size, (6,3))
image = tools.image_fix_orientation(image)
self.assertEqual(image.size, (3,6))
def test_10_image_process_base64_source(self):
"""Test the base64_source parameter of image_process."""
wrong_base64 = b'oazdazpodazdpok'
self.assertFalse(tools.image_process(False), "return False if base64_source is falsy")
self.assertEqual(tools.image_process(self.base64_svg), self.base64_svg, "return base64_source if format is SVG")
# in the following tests, pass `quality` to force the processing
with self.assertRaises(UserError, msg="This file could not be decoded as an image file. Please try with a different file."):
tools.image_process(wrong_base64, quality=95)
with self.assertRaises(UserError, msg="This file could not be decoded as an image file. Please try with a different file."):
tools.image_process(b'oazdazpodazdpokd', quality=95)
image = tools.base64_to_image(tools.image_process(self.base64_1920x1080_jpeg, quality=95))
self.assertEqual(image.size, (1920, 1080), "OK return the image")
# test that nothing happens if no operation has been requested
# (otherwise those would raise because of wrong base64)
self.assertEqual(tools.image_process(wrong_base64), wrong_base64)
self.assertEqual(tools.image_process(wrong_base64, size=False), wrong_base64)
def test_11_image_process_size(self):
"""Test the size parameter of image_process."""
# Format of `tests`: (original base64 image, size parameter, expected result, text)
tests = [
(self.base64_1920x1080_jpeg, (192, 108), (192, 108), "resize to given size"),
(self.base64_1920x1080_jpeg, (1920, 1080), (1920, 1080), "same size, no change"),
(self.base64_1920x1080_jpeg, (192, None), (192, 108), "set height from ratio"),
(self.base64_1920x1080_jpeg, (0, 108), (192, 108), "set width from ratio"),
(self.base64_1920x1080_jpeg, (192, 200), (192, 108), "adapt to width"),
(self.base64_1920x1080_jpeg, (400, 108), (192, 108), "adapt to height"),
(self.base64_1920x1080_jpeg, (3000, 2000), (1920, 1080), "don't resize above original, both set"),
(self.base64_1920x1080_jpeg, (3000, False), (1920, 1080), "don't resize above original, width set"),
(self.base64_1920x1080_jpeg, (None, 2000), (1920, 1080), "don't resize above original, height set"),
(self.base64_1080x1920_png, (3000, 192), (108, 192), "vertical image, resize if below"),
]
count = 0
for test in tests:
image = tools.base64_to_image(tools.image_process(test[0], size=test[1]))
self.assertEqual(image.size, test[2], test[3])
count = count + 1
self.assertEqual(count, 10, "ensure the loop is ran")
def test_12_image_process_verify_resolution(self):
"""Test the verify_resolution parameter of image_process."""
res = tools.image_process(self.base64_1920x1080_jpeg, verify_resolution=True)
self.assertNotEqual(res, False, "size ok")
base64_image_excessive = tools.image_to_base64(Image.new('RGB', (50001, 1000)), 'PNG')
with self.assertRaises(UserError, msg="size excessive"):
tools.image_process(base64_image_excessive, verify_resolution=True)
def test_13_image_process_quality(self):
"""Test the quality parameter of image_process."""
# CASE: PNG RGBA doesn't apply quality, just optimize
image = tools.image_to_base64(Image.new('RGBA', (1080, 1920)), 'PNG')
res = tools.image_process(image)
self.assertLessEqual(len(res), len(image))
# CASE: PNG RGB doesn't apply quality, just optimize
image = tools.image_to_base64(Image.new('P', (1080, 1920)), 'PNG')
res = tools.image_process(image)
self.assertLessEqual(len(res), len(image))
# CASE: JPEG optimize + reduced quality
res = tools.image_process(self.base64_1920x1080_jpeg)
self.assertLessEqual(len(res), len(self.base64_1920x1080_jpeg))
# CASE: GIF doesn't apply quality, just optimize
image = tools.image_to_base64(Image.new('RGB', (1080, 1920)), 'GIF')
res = tools.image_process(image)
self.assertLessEqual(len(res), len(image))
def test_14_image_process_crop(self):
"""Test the crop parameter of image_process."""
# Optimized PNG use palette, getpixel below will return palette value.
fill = 0
bg = 1
# Format of `tests`: (original base64 image, size parameter, crop parameter, res size, res color (top, bottom, left, right), text)
tests = [
(self.base64_1920x1080_png, None, None, (1920, 1080), (fill, fill, bg, bg), "horizontal, verify initial"),
(self.base64_1920x1080_png, (2000, 2000), 'center', (1080, 1080), (fill, fill, fill, fill), "horizontal, crop biggest possible"),
(self.base64_1920x1080_png, (2000, 4000), 'center', (540, 1080), (fill, fill, fill, fill), "horizontal, size vertical, limit height"),
(self.base64_1920x1080_png, (4000, 2000), 'center', (1920, 960), (fill, fill, bg, bg), "horizontal, size horizontal, limit width"),
(self.base64_1920x1080_png, (512, 512), 'center', (512, 512), (fill, fill, fill, fill), "horizontal, type center"),
(self.base64_1920x1080_png, (512, 512), 'top', (512, 512), (fill, fill, fill, fill), "horizontal, type top"),
(self.base64_1920x1080_png, (512, 512), 'bottom', (512, 512), (fill, fill, fill, fill), "horizontal, type bottom"),
(self.base64_1920x1080_png, (512, 512), 'wrong', (512, 512), (fill, fill, fill, fill), "horizontal, wrong crop value, use center"),
(self.base64_1920x1080_png, (192, 0), None, (192, 108), (fill, fill, bg, bg), "horizontal, not cropped, just do resize"),
(self.base64_1080x1920_png, None, None, (1080, 1920), (bg, bg, fill, fill), "vertical, verify initial"),
(self.base64_1080x1920_png, (2000, 2000), 'center', (1080, 1080), (fill, fill, fill, fill), "vertical, crop biggest possible"),
(self.base64_1080x1920_png, (2000, 4000), 'center', (960, 1920), (bg, bg, fill, fill), "vertical, size vertical, limit height"),
(self.base64_1080x1920_png, (4000, 2000), 'center', (1080, 540), (fill, fill, fill, fill), "vertical, size horizontal, limit width"),
(self.base64_1080x1920_png, (512, 512), 'center', (512, 512), (fill, fill, fill, fill), "vertical, type center"),
(self.base64_1080x1920_png, (512, 512), 'top', (512, 512), (bg, fill, fill, fill), "vertical, type top"),
(self.base64_1080x1920_png, (512, 512), 'bottom', (512, 512), (fill, bg, fill, fill), "vertical, type bottom"),
(self.base64_1080x1920_png, (512, 512), 'wrong', (512, 512), (fill, fill, fill, fill), "vertical, wrong crop value, use center"),
(self.base64_1080x1920_png, (108, 0), None, (108, 192), (bg, bg, fill, fill), "vertical, not cropped, just do resize"),
]
count = 0
for test in tests:
count = count + 1
# process the image, pass quality to make sure the result is palette
image = tools.base64_to_image(tools.image_process(test[0], size=test[1], crop=test[2], quality=95))
# verify size
self.assertEqual(image.size, test[3], "%s - correct size" % test[5])
half_width, half_height = image.size[0] / 2, image.size[1] / 2
top, bottom, left, right = 0, image.size[1] - 1, 0, image.size[0] - 1
# verify top
px = (half_width, top)
self.assertEqual(image.getpixel(px), test[4][0], "%s - color top (%s, %s)" % (test[5], px[0], px[1]))
# verify bottom
px = (half_width, bottom)
self.assertEqual(image.getpixel(px), test[4][1], "%s - color bottom (%s, %s)" % (test[5], px[0], px[1]))
# verify left
px = (left, half_height)
self.assertEqual(image.getpixel(px), test[4][2], "%s - color left (%s, %s)" % (test[5], px[0], px[1]))
# verify right
px = (right, half_height)
self.assertEqual(image.getpixel(px), test[4][3], "%s - color right (%s, %s)" % (test[5], px[0], px[1]))
self.assertEqual(count, 2 * 9, "ensure the loop is ran")
def test_15_image_process_colorize(self):
"""Test the colorize parameter of image_process."""
# verify initial condition
image_rgba = Image.new('RGBA', (1, 1))
self.assertEqual(image_rgba.mode, 'RGBA')
self.assertEqual(image_rgba.getpixel((0, 0)), (0, 0, 0, 0))
base64_rgba = tools.image_to_base64(image_rgba, 'PNG')
# CASE: color random, color has changed
image = tools.base64_to_image(tools.image_process(base64_rgba, colorize=True))
self.assertEqual(image.mode, 'RGB')
self.assertNotEqual(image.getpixel((0, 0)), (0, 0, 0))
def test_16_image_process_format(self):
"""Test the format parameter of image_process."""
image = tools.base64_to_image(tools.image_process(self.base64_1920x1080_jpeg, output_format='PNG'))
self.assertEqual(image.format, 'PNG', "change format to PNG")
image = tools.base64_to_image(tools.image_process(self.base64_1x1_png, output_format='JpEg'))
self.assertEqual(image.format, 'JPEG', "change format to JPEG (case insensitive)")
image = tools.base64_to_image(tools.image_process(self.base64_1920x1080_jpeg, output_format='BMP'))
self.assertEqual(image.format, 'PNG', "change format to BMP converted to PNG")
self.base64_image_1080_1920_rgba = tools.image_to_base64(Image.new('RGBA', (108, 192)), 'PNG')
image = tools.base64_to_image(tools.image_process(self.base64_image_1080_1920_rgba, output_format='jpeg'))
self.assertEqual(image.format, 'JPEG', "change format PNG with RGBA to JPEG")
# pass quality to force the image to be processed
self.base64_image_1080_1920_tiff = tools.image_to_base64(Image.new('RGB', (108, 192)), 'TIFF')
image = tools.base64_to_image(tools.image_process(self.base64_image_1080_1920_tiff, quality=95))
self.assertEqual(image.format, 'JPEG', "unsupported format to JPEG")
def test_20_image_data_uri(self):
"""Test that image_data_uri is working as expected."""
self.assertEqual(tools.image_data_uri(self.base64_1x1_png), 'data:image/png;base64,' + self.base64_1x1_png.decode('ascii'))
def test_21_image_guess_size_from_field_name(self):
f = tools.image_guess_size_from_field_name
# Test case: empty field_name input
self.assertEqual(f(''), (0, 0))
# Test case: custom field_name input
self.assertEqual(f('custom_field'), (0, 0))
# Test case: field_name input that starts with 'x_'
self.assertEqual(f('x_field'), (0, 0))
# Test case: field_name input that starts with 'x_' and ends with a number less than 16
self.assertEqual(f('x_studio_image_1'), (0, 0))
# Test case: field_name input that starts with 'x_' and ends with a number greater than 16
self.assertEqual(f('x_studio_image_32'), (0, 0))
# Test case: field_name input that has a suffix less than 16
self.assertEqual(f('image_15'), (0, 0))
# Test case: field_name input that has a suffix equal to 16
self.assertEqual(f('image_16'), (16, 16))
# Test case: field_name input that has a suffix greater than 16
self.assertEqual(f('image_32'), (32, 32))
# Test case: field_name input that has a suffix with 2 numbers
self.assertEqual(f('image_1920_1080'), (1080, 1080))
# Test case: field_name input that has a float as suffix
self.assertEqual(f('image_32.5'), (0, 0))
# Test case: field_name input that has a suffix greater than 16 but no underscore
self.assertEqual(f('image32'), (0, 0))
def _assertAlmostEqualSequence(self, rgb1, rgb2, delta=10):
self.assertEqual(len(rgb1), len(rgb2))
for index, t in enumerate(zip(rgb1, rgb2)):
self.assertAlmostEqual(t[0], t[1], delta=delta, msg="%s vs %s at %d" % (rgb1, rgb2, index))
def _get_exif_colored_square_b64(self, orientation, colors, size):
image = Image.new('RGB', (size, size), color=self.bg_color)
draw = ImageDraw.Draw(image)
# Paint the colors on the 4 corners, to be able to test which colors
# move on which corners.
draw.rectangle(xy=[(0, 0), (size // 2, size // 2)], fill=colors[0]) # top/left
draw.rectangle(xy=[(size // 2, 0), (size, size // 2)], fill=colors[1]) # top/right
draw.rectangle(xy=[(0, size // 2), (size // 2, size)], fill=colors[2]) # bottom/left
draw.rectangle(xy=[(size // 2, size // 2), (size, size)], fill=colors[3]) # bottom/right
# Set the proper exif tag based on orientation params.
exif = b'Exif\x00\x00II*\x00\x08\x00\x00\x00\x01\x00\x12\x01\x03\x00\x01\x00\x00\x00' + bytes([orientation]) + b'\x00\x00\x00\x00\x00\x00\x00'
# The image image is saved with the exif tag.
return tools.image_to_base64(image, 'JPEG', exif=exif)
def _orientation_test(self, orientation, colors, size, expected):
# Generate the test image based on orientation and order of colors.
b64_image = self._get_exif_colored_square_b64(orientation, colors, size)
# The image is read again now that it has orientation added.
fixed_image = tools.image_fix_orientation(tools.base64_to_image(b64_image))
# Ensure colors are in the right order (blue, yellow, green, pink).
self._assertAlmostEqualSequence(fixed_image.getpixel((0, 0)), expected[0]) # top/left
self._assertAlmostEqualSequence(fixed_image.getpixel((size - 1, 0)), expected[1]) # top/right
self._assertAlmostEqualSequence(fixed_image.getpixel((0, size - 1)), expected[2]) # bottom/left
self._assertAlmostEqualSequence(fixed_image.getpixel((size - 1, size - 1)), expected[3]) # bottom/right
def test_ptype_image_to_jpeg(self):
"""converts to RGB when saving as JPEG"""
image1 = Image.new('P', (1, 1), color='red')
image2 = Image.new('RGB', (1, 1), color='red')
self.assertEqual(tools.image.image_apply_opt(image1, 'JPEG'), tools.image.image_apply_opt(image2, 'JPEG'))
| 60.746398 | 21,079 |
6,242 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import date
from odoo.tests.common import SingleTransactionCase
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
class TestIrSequenceDateRangeStandard(SingleTransactionCase):
""" A few tests for a 'Standard' (i.e. PostgreSQL) sequence. """
def test_ir_sequence_date_range_1_create(self):
""" Try to create a sequence object with date ranges enabled. """
seq = self.env['ir.sequence'].create({
'code': 'test_sequence_date_range',
'name': 'Test sequence',
'use_date_range': True,
})
self.assertTrue(seq)
def test_ir_sequence_date_range_2_change_dates(self):
""" Draw numbers to create a first subsequence then change its date range. Then, try to draw a new number adn check a new subsequence was correctly created. """
year = date.today().year - 1
january = lambda d: date(year, 1, d)
seq16 = self.env['ir.sequence'].with_context(ir_sequence_date=january(16))
n = seq16.next_by_code('test_sequence_date_range')
self.assertEqual(n, '1')
n = seq16.next_by_code('test_sequence_date_range')
self.assertEqual(n, '2')
# modify the range of date created
domain = [('sequence_id.code', '=', 'test_sequence_date_range'), ('date_from', '=', january(1))]
seq_date_range = self.env['ir.sequence.date_range'].search(domain)
seq_date_range.write({'date_from': january(18)})
n = seq16.next_by_code('test_sequence_date_range')
self.assertEqual(n, '1')
# check the newly created sequence stops at the 17th of January
domain = [('sequence_id.code', '=', 'test_sequence_date_range'), ('date_from', '=', january(1))]
seq_date_range = self.env['ir.sequence.date_range'].search(domain)
self.assertEqual(seq_date_range.date_to, january(17))
def test_ir_sequence_date_range_3_unlink(self):
seq = self.env['ir.sequence'].search([('code', '=', 'test_sequence_date_range')])
seq.unlink()
class TestIrSequenceDateRangeNoGap(SingleTransactionCase):
""" Copy of the previous tests for a 'No gap' sequence. """
def test_ir_sequence_date_range_1_create_no_gap(self):
""" Try to create a sequence object. """
seq = self.env['ir.sequence'].create({
'code': 'test_sequence_date_range_2',
'name': 'Test sequence',
'use_date_range': True,
'implementation': 'no_gap',
})
self.assertTrue(seq)
def test_ir_sequence_date_range_2_change_dates(self):
""" Draw numbers to create a first subsequence then change its date range. Then, try to draw a new number adn check a new subsequence was correctly created. """
year = date.today().year - 1
january = lambda d: date(year, 1, d)
seq16 = self.env['ir.sequence'].with_context({'ir_sequence_date': january(16)})
n = seq16.next_by_code('test_sequence_date_range_2')
self.assertEqual(n, '1')
n = seq16.next_by_code('test_sequence_date_range_2')
self.assertEqual(n, '2')
# modify the range of date created
domain = [('sequence_id.code', '=', 'test_sequence_date_range_2'), ('date_from', '=', january(1))]
seq_date_range = self.env['ir.sequence.date_range'].search(domain)
seq_date_range.write({'date_from': january(18)})
n = seq16.next_by_code('test_sequence_date_range_2')
self.assertEqual(n, '1')
# check the newly created sequence stops at the 17th of January
domain = [('sequence_id.code', '=', 'test_sequence_date_range_2'), ('date_from', '=', january(1))]
seq_date_range = self.env['ir.sequence.date_range'].search(domain)
self.assertEqual(seq_date_range.date_to, january(17))
def test_ir_sequence_date_range_3_unlink(self):
seq = self.env['ir.sequence'].search([('code', '=', 'test_sequence_date_range_2')])
seq.unlink()
class TestIrSequenceDateRangeChangeImplementation(SingleTransactionCase):
""" Create sequence objects and change their ``implementation`` field. """
def test_ir_sequence_date_range_1_create(self):
""" Try to create a sequence object. """
seq = self.env['ir.sequence'].create({
'code': 'test_sequence_date_range_3',
'name': 'Test sequence',
'use_date_range': True,
})
self.assertTrue(seq)
seq = self.env['ir.sequence'].create({
'code': 'test_sequence_date_range_4',
'name': 'Test sequence',
'use_date_range': True,
'implementation': 'no_gap',
})
self.assertTrue(seq)
def test_ir_sequence_date_range_2_use(self):
""" Make some use of the sequences to create some subsequences """
year = date.today().year - 1
january = lambda d: date(year, 1, d)
seq = self.env['ir.sequence']
seq16 = self.env['ir.sequence'].with_context({'ir_sequence_date': january(16)})
for i in range(1, 5):
n = seq.next_by_code('test_sequence_date_range_3')
self.assertEqual(n, str(i))
for i in range(1, 5):
n = seq16.next_by_code('test_sequence_date_range_3')
self.assertEqual(n, str(i))
for i in range(1, 5):
n = seq.next_by_code('test_sequence_date_range_4')
self.assertEqual(n, str(i))
for i in range(1, 5):
n = seq16.next_by_code('test_sequence_date_range_4')
self.assertEqual(n, str(i))
def test_ir_sequence_date_range_3_write(self):
"""swap the implementation method on both"""
domain = [('code', 'in', ['test_sequence_date_range_3', 'test_sequence_date_range_4'])]
seqs = self.env['ir.sequence'].search(domain)
seqs.write({'implementation': 'standard'})
seqs.write({'implementation': 'no_gap'})
def test_ir_sequence_date_range_4_unlink(self):
domain = [('code', 'in', ['test_sequence_date_range_3', 'test_sequence_date_range_4'])]
seqs = self.env['ir.sequence'].search(domain)
seqs.unlink()
| 43.957746 | 6,242 |
15,119 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from psycopg2 import IntegrityError
from odoo.exceptions import ValidationError
from odoo.tests.common import TransactionCase, tagged
from odoo.tools import mute_logger
from odoo import Command
class TestXMLID(TransactionCase):
def get_data(self, xml_id):
""" Return the 'ir.model.data' record corresponding to ``xml_id``. """
module, suffix = xml_id.split('.', 1)
domain = [('module', '=', module), ('name', '=', suffix)]
return self.env['ir.model.data'].search(domain)
def test_create(self):
model = self.env['res.partner.category']
xml_id = 'test_convert.category_foo'
# create category (flag 'noupdate' should be False by default)
data = dict(xml_id=xml_id, values={'name': 'Foo'})
category = model._load_records([data])
self.assertEqual(category, self.env.ref(xml_id, raise_if_not_found=False))
self.assertEqual(category.name, 'Foo')
self.assertEqual(self.get_data(xml_id).noupdate, False)
# update category
data = dict(xml_id=xml_id, values={'name': 'Bar'})
category1 = model._load_records([data], update=True)
self.assertEqual(category, category1)
self.assertEqual(category.name, 'Bar')
self.assertEqual(self.get_data(xml_id).noupdate, False)
# update category
data = dict(xml_id=xml_id, values={'name': 'Baz'}, noupdate=True)
category2 = model._load_records([data], update=True)
self.assertEqual(category, category2)
self.assertEqual(category.name, 'Baz')
self.assertEqual(self.get_data(xml_id).noupdate, False)
def test_create_noupdate(self):
model = self.env['res.partner.category']
xml_id = 'test_convert.category_foo'
# create category
data = dict(xml_id=xml_id, values={'name': 'Foo'}, noupdate=True)
category = model._load_records([data])
self.assertEqual(category, self.env.ref(xml_id, raise_if_not_found=False))
self.assertEqual(category.name, 'Foo')
self.assertEqual(self.get_data(xml_id).noupdate, True)
# update category
data = dict(xml_id=xml_id, values={'name': 'Bar'}, noupdate=False)
category1 = model._load_records([data], update=True)
self.assertEqual(category, category1)
self.assertEqual(category.name, 'Foo')
self.assertEqual(self.get_data(xml_id).noupdate, True)
# update category
data = dict(xml_id=xml_id, values={'name': 'Baz'}, noupdate=True)
category2 = model._load_records([data], update=True)
self.assertEqual(category, category2)
self.assertEqual(category.name, 'Foo')
self.assertEqual(self.get_data(xml_id).noupdate, True)
def test_create_noupdate_multi(self):
model = self.env['res.partner.category']
data_list = [
dict(xml_id='test_convert.category_foo', values={'name': 'Foo'}, noupdate=True),
dict(xml_id='test_convert.category_bar', values={'name': 'Bar'}, noupdate=True),
]
# create category
categories = model._load_records(data_list)
foo = self.env.ref('test_convert.category_foo')
bar = self.env.ref('test_convert.category_bar')
self.assertEqual(categories, foo + bar)
self.assertEqual(foo.name, 'Foo')
self.assertEqual(bar.name, 'Bar')
# check data
self.assertEqual(self.get_data('test_convert.category_foo').noupdate, True)
self.assertEqual(self.get_data('test_convert.category_bar').noupdate, True)
def test_create_order(self):
model = self.env['res.partner.category']
data_list = [
dict(xml_id='test_convert.category_foo', values={'name': 'Foo'}),
dict(xml_id='test_convert.category_bar', values={'name': 'Bar'}, noupdate=True),
dict(xml_id='test_convert.category_baz', values={'name': 'Baz'}),
]
# create categories
foo = model._load_records([data_list[0]])
bar = model._load_records([data_list[1]])
baz = model._load_records([data_list[2]])
self.assertEqual(foo.name, 'Foo')
self.assertEqual(bar.name, 'Bar')
self.assertEqual(baz.name, 'Baz')
# update them, and check the order of result
for data in data_list:
data['values']['name'] += 'X'
cats = model._load_records(data_list, update=True)
self.assertEqual(list(cats), [foo, bar, baz])
self.assertEqual(foo.name, 'FooX')
self.assertEqual(bar.name, 'Bar')
self.assertEqual(baz.name, 'BazX')
def test_create_inherits(self):
model = self.env['res.users']
xml_id = 'test_convert.user_foo'
par_xml_id = xml_id + '_res_partner'
# create user
user = model._load_records([dict(xml_id=xml_id, values={'name': 'Foo', 'login': 'foo'})])
self.assertEqual(user, self.env.ref(xml_id, raise_if_not_found=False))
self.assertEqual(user.partner_id, self.env.ref(par_xml_id, raise_if_not_found=False))
self.assertEqual(user.name, 'Foo')
self.assertEqual(user.login, 'foo')
def test_recreate(self):
model = self.env['res.partner.category']
xml_id = 'test_convert.category_foo'
data = dict(xml_id=xml_id, values={'name': 'Foo'})
# create category
category = model._load_records([data])
self.assertEqual(category, self.env.ref(xml_id, raise_if_not_found=False))
self.assertEqual(category.name, 'Foo')
# suppress category
category.unlink()
self.assertFalse(self.env.ref(xml_id, raise_if_not_found=False))
# update category, this should recreate it
category = model._load_records([data], update=True)
self.assertEqual(category, self.env.ref(xml_id, raise_if_not_found=False))
self.assertEqual(category.name, 'Foo')
def test_create_xmlids(self):
# create users and assign them xml ids
foo, bar = self.env['res.users']._load_records([{
'xml_id': 'test_convert.foo',
'values': {'name': 'Foo', 'login': 'foo'},
'noupdate': True,
}, {
'xml_id': 'test_convert.bar',
'values': {'name': 'Bar', 'login': 'bar'},
'noupdate': True,
}])
self.assertEqual(foo, self.env.ref('test_convert.foo', raise_if_not_found=False))
self.assertEqual(bar, self.env.ref('test_convert.bar', raise_if_not_found=False))
self.assertEqual(foo.partner_id, self.env.ref('test_convert.foo_res_partner', raise_if_not_found=False))
self.assertEqual(bar.partner_id, self.env.ref('test_convert.bar_res_partner', raise_if_not_found=False))
self.assertEqual(self.get_data('test_convert.foo').noupdate, True)
self.assertEqual(self.get_data('test_convert.bar').noupdate, True)
@mute_logger('odoo.sql_db', 'odoo.addons.base.models.ir_model')
def test_create_external_id_with_space(self):
model = self.env['res.partner.category']
data_list = [{
'xml_id': 'test_convert.category_with space',
'values': {'name': 'Bar'},
}]
with self.assertRaisesRegex(IntegrityError, 'ir_model_data_name_nospaces'):
model._load_records(data_list)
class TestIrModel(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# The test mode is necessary in this case. After each test, we call
# registry.reset_changes(), which opens a new cursor to retrieve custom
# models and fields. A regular cursor would correspond to the state of
# the database before setUpClass(), which is not correct. Instead, a
# test cursor will correspond to the state of the database of cls.cr at
# that point, i.e., before the call to setUp().
cls.registry.enter_test_mode(cls.cr)
cls.addClassCleanup(cls.registry.leave_test_mode)
# model and records for banana stages
cls.env['ir.model'].create({
'name': 'Banana Ripeness',
'model': 'x_banana_ripeness',
'field_id': [
Command.create({'name': 'x_name', 'ttype': 'char', 'field_description': 'Name'}),
]
})
# stage values are pairs (id, display_name)
cls.ripeness_green = cls.env['x_banana_ripeness'].name_create('Green')
cls.ripeness_okay = cls.env['x_banana_ripeness'].name_create('Okay, I guess?')
cls.ripeness_gone = cls.env['x_banana_ripeness'].name_create('Walked away on its own')
# model and records for bananas
cls.bananas_model = cls.env['ir.model'].create({
'name': 'Bananas',
'model': 'x_bananas',
'field_id': [
Command.create({'name': 'x_name', 'ttype': 'char', 'field_description': 'Name'}),
Command.create({'name': 'x_length', 'ttype': 'float', 'field_description': 'Length'}),
Command.create({'name': 'x_color', 'ttype': 'integer', 'field_description': 'Color'}),
Command.create({'name': 'x_ripeness_id', 'ttype': 'many2one',
'field_description': 'Ripeness','relation': 'x_banana_ripeness',
'group_expand': True})
]
})
# add non-stored field that is not valid in order
cls.env['ir.model.fields'].create({
'name': 'x_is_yellow',
'field_description': 'Is the banana yellow?',
'ttype': 'boolean',
'model_id': cls.bananas_model.id,
'store': False,
'depends': 'x_color',
'compute': "for banana in self:\n banana['x_is_yellow'] = banana.x_color == 9"
})
# default stage is ripeness_green
cls.env['ir.default'].set('x_bananas', 'x_ripeness_id', cls.ripeness_green[0])
cls.env['x_bananas'].create([{
'x_name': 'Banana #1',
'x_length': 3.14159,
'x_color': 9,
}, {
'x_name': 'Banana #2',
'x_length': 0,
'x_color': 6,
}, {
'x_name': 'Banana #3',
'x_length': 10,
'x_color': 6,
}])
def setUp(self):
# this cleanup is necessary after each test, and must be done last
self.addCleanup(self.registry.reset_changes)
super().setUp()
def test_model_order_constraint(self):
"""Check that the order constraint is properly enforced."""
VALID_ORDERS = ['id', 'id desc', 'id asc, x_length', 'x_color, x_length, create_uid']
for order in VALID_ORDERS:
self.bananas_model.order = order
INVALID_ORDERS = ['', 'x_wat', 'id esc', 'create_uid,', 'id, x_is_yellow']
for order in INVALID_ORDERS:
with self.assertRaises(ValidationError), self.cr.savepoint():
self.bananas_model.order = order
# check that the constraint is checked at model creation
fields_value = [
Command.create({'name': 'x_name', 'ttype': 'char', 'field_description': 'Name'}),
Command.create({'name': 'x_length', 'ttype': 'float', 'field_description': 'Length'}),
Command.create({'name': 'x_color', 'ttype': 'integer', 'field_description': 'Color'}),
]
self.env['ir.model'].create({
'name': 'MegaBananas',
'model': 'x_mega_bananas',
'order': 'x_name asc, id desc', # valid order
'field_id': fields_value,
})
with self.assertRaises(ValidationError):
self.env['ir.model'].create({
'name': 'GigaBananas',
'model': 'x_giga_bananas',
'order': 'x_name asc, x_wat', # invalid order
'field_id': fields_value,
})
def test_model_order_search(self):
"""Check that custom orders are applied when querying a model."""
ORDERS = {
'id asc': ['Banana #1', 'Banana #2', 'Banana #3'],
'id desc': ['Banana #3', 'Banana #2', 'Banana #1'],
'x_color asc, id asc': ['Banana #2', 'Banana #3', 'Banana #1'],
'x_color asc, id desc': ['Banana #3', 'Banana #2', 'Banana #1'],
'x_length asc, id': ['Banana #2', 'Banana #1', 'Banana #3'],
}
for order, names in ORDERS.items():
self.bananas_model.order = order
self.assertEqual(self.env['x_bananas']._order, order)
bananas = self.env['x_bananas'].search([])
self.assertEqual(bananas.mapped('x_name'), names, 'failed to order by %s' % order)
def test_group_expansion(self):
"""Check that the basic custom group expansion works."""
groups = self.env['x_bananas'].read_group(domain=[],
fields=['x_ripeness_id'],
groupby=['x_ripeness_id'])
expected = [{
'x_ripeness_id': self.ripeness_green,
'x_ripeness_id_count': 3,
'__domain': [('x_ripeness_id', '=', self.ripeness_green[0])],
}, {
'x_ripeness_id': self.ripeness_okay,
'x_ripeness_id_count': 0,
'__domain': [('x_ripeness_id', '=', self.ripeness_okay[0])],
}, {
'x_ripeness_id': self.ripeness_gone,
'x_ripeness_id_count': 0,
'__domain': [('x_ripeness_id', '=', self.ripeness_gone[0])],
}]
self.assertEqual(groups, expected, 'should include 2 empty ripeness stages')
def test_rec_name_deletion(self):
"""Check that deleting 'x_name' does not crash."""
record = self.env['x_bananas'].create({'x_name': "Ifan Ben-Mezd"})
self.assertEqual(record._rec_name, 'x_name')
self.assertEqual(self.registry.field_depends[type(record).display_name], ('x_name',))
self.assertEqual(record.display_name, "Ifan Ben-Mezd")
# unlinking x_name should fixup _rec_name and display_name
self.env['ir.model.fields']._get('x_bananas', 'x_name').unlink()
record = self.env['x_bananas'].browse(record.id)
self.assertEqual(record._rec_name, None)
self.assertEqual(self.registry.field_depends[type(record).display_name], ())
self.assertEqual(record.display_name, f"x_bananas,{record.id}")
@tagged('test_eval_context')
class TestEvalContext(TransactionCase):
def test_module_usage(self):
self.env['ir.model.fields'].create({
'name': 'x_foo_bar_baz',
'model_id': self.env['ir.model'].search([('model', '=', 'res.partner')]).id,
'field_description': 'foo',
'ttype': 'integer',
'store': False,
'depends': 'name',
'compute': ("time.time()\ndatetime.datetime.now()\n"
"dateutil.relativedelta.relativedelta(hours=1)")
})
self.env['res.partner'].create({'name': 'foo'}).x_foo_bar_baz
| 43.950581 | 15,119 |
16,401 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import contextlib
import difflib
import logging
import re
from contextlib import contextmanager
from pathlib import PurePath
from unittest import TestCase
from unittest.mock import patch
from odoo.tests.common import TransactionCase
from odoo.tests.common import users, warmup
from odoo.tests.runner import OdooTestResult
_logger = logging.getLogger(__name__)
from odoo.tests import MetaCase
class TestTestSuite(TestCase, metaclass=MetaCase):
def test_test_suite(self):
""" Check that OdooSuite handles unittest.TestCase correctly. """
class TestRunnerLoggingCommon(TransactionCase):
"""
The purpose of this class is to do some "metatesting": it actually checks
that on error, the runner logged the error with the right file reference.
This is mainly to avoid having errors in test/common.py or test/runner.py`.
This kind of metatesting is tricky; in this case the logs are made outside
of the test method, after the teardown actually.
"""
def setUp(self):
self.expected_logs = None
self.expected_first_frame_methods = None
return super().setUp()
def _feedErrorsToResult(self, result, errors):
# We use this hook to catch the logged error. It is initially called
# post tearDown, and logs the actual errors. Because of our hack
# tests.common._ErrorCatcher, the errors are logged directly. This is
# still useful to test errors raised from tests. We cannot assert what
# was logged after the test inside the test, though. This method can be
# temporary renamed to test the real failure.
try:
self.test_result = result
# while we are here, let's check that the first frame of the stack
# is always inside the test method
for error in errors:
_, exc_info = error
if exc_info:
tb = exc_info[2]
self._check_first_frame(tb)
# intercept all ir_logging. We cannot use log catchers or other
# fancy stuff because makeRecord is too low level.
log_records = []
def makeRecord(logger, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None):
log_records.append({
'logger': logger, 'name': name, 'level': level, 'fn': fn, 'lno': lno,
'msg': msg % args, 'exc_info': exc_info, 'func': func, 'extra': extra, 'sinfo': sinfo,
})
def handle(logger, record):
# disable error logging
return
fake_result = OdooTestResult()
with patch('logging.Logger.makeRecord', makeRecord), patch('logging.Logger.handle', handle):
super()._feedErrorsToResult(fake_result, errors)
self._check_log_records(log_records)
except Exception as e:
# we don't expect _feedErrorsToResult() to raise any exception, this
# will make it more robust to future changes and eventual mistakes
_logger.exception(e)
def _check_first_frame(self, tb):
""" Check that the first frame of the given traceback is the expected method name. """
# the list expected_first_frame_methods allow to define a list of first
# expected frame (useful for setup/teardown tests)
if self.expected_first_frame_methods is None:
expected_first_frame_method = self._testMethodName
else:
expected_first_frame_method = self.expected_first_frame_methods.pop(0)
first_frame_method = tb.tb_frame.f_code.co_name
if first_frame_method != expected_first_frame_method:
self._log_error(f"Checking first tb frame: {first_frame_method} is not equal to {expected_first_frame_method}")
def _check_log_records(self, log_records):
""" Check that what was logged is what was expected. """
for log_record in log_records:
self._assert_log_equal(log_record, 'logger', _logger)
self._assert_log_equal(log_record, 'name', 'odoo.addons.base.tests.test_test_suite')
self._assert_log_equal(log_record, 'fn', __file__)
self._assert_log_equal(log_record, 'func', self._testMethodName)
if self.expected_logs is not None:
for log_record in log_records:
level, msg = self.expected_logs.pop(0)
self._assert_log_equal(log_record, 'level', level)
self._assert_log_equal(log_record, 'msg', msg)
def _assert_log_equal(self, log_record, key, expected):
""" Check the content of a log record. """
value = log_record[key]
if key == 'msg':
value = self._clean_message(value)
if value != expected:
if key != 'msg':
self._log_error(f"Key `{key}` => `{value}` is not equal to `{expected}` \n {log_record['str']}")
else:
diff = '\n'.join(difflib.ndiff(value.splitlines(), expected.splitlines()))
self._log_error(f"Key `{key}` did not matched expected:\n{diff}")
def _log_error(self, message):
""" Log an actual error (about a log in a test that doesn't match expectations) """
# we would just log, but using the test_result will help keeping the tests counters correct
self.test_result.addError(self, (AssertionError, AssertionError(message), None))
def _clean_message(self, message):
root_path = PurePath(__file__).parents[4] # removes /odoo/addons/base/tests/test_test_suite.py
python_path = PurePath(contextlib.__file__).parent # /usr/lib/pythonx.x, C:\\python\\Lib, ...
message = re.sub(r'line \d+', 'line $line', message)
message = re.sub(r'py:\d+', 'py:$line', message)
message = re.sub(r'decorator-gen-\d+', 'decorator-gen-xxx', message)
message = message.replace(f'"{root_path}', '"/root_path/odoo')
message = message.replace(f'"{python_path}', '"/usr/lib/python')
message = message.replace('\\', '/')
return message
class TestRunnerLogging(TestRunnerLoggingCommon):
def test_raise(self):
raise Exception('This is an error')
def test_raise_subtest(self):
"""
with subtest, we expect to have multiple errors, one per subtest
"""
def make_message(message):
return (
f'''ERROR: Subtest TestRunnerLogging.test_raise_subtest (<subtest>)
Traceback (most recent call last):
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in test_raise_subtest
raise Exception('{message}')
Exception: {message}
''')
self.expected_logs = [
(logging.INFO, '=' * 70),
(logging.ERROR, make_message('This is an error')),
]
with self.subTest():
raise Exception('This is an error')
self.assertFalse(self.expected_logs, "Error should have been logged immediatly")
self.expected_logs = [
(logging.INFO, '=' * 70),
(logging.ERROR, make_message('This is an error2')),
]
with self.subTest():
raise Exception('This is an error2')
self.assertFalse(self.expected_logs, "Error should have been logged immediatly")
@users('__system__')
@warmup
def test_with_decorators(self):
message = (
'''ERROR: Subtest TestRunnerLogging.test_with_decorators (login='__system__')
Traceback (most recent call last):
File "<decorator-gen-xxx>", line $line, in test_with_decorators
File "/root_path/odoo/odoo/tests/common.py", line $line, in _users
func(*args, **kwargs)
File "<decorator-gen-xxx>", line $line, in test_with_decorators
File "/root_path/odoo/odoo/tests/common.py", line $line, in warmup
func(*args, **kwargs)
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in test_with_decorators
raise Exception('This is an error')
Exception: This is an error
''')
self.expected_logs = [
(logging.INFO, '=' * 70),
(logging.ERROR, message),
]
raise Exception('This is an error')
def test_traverse_contextmanager(self):
@contextmanager
def assertSomething():
yield
raise Exception('This is an error')
with assertSomething():
pass
def test_subtest_sub_call(self):
def func():
with self.subTest():
raise Exception('This is an error')
func()
def test_call_stack(self):
message = (
'''ERROR: TestRunnerLogging.test_call_stack
Traceback (most recent call last):
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in test_call_stack
alpha()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in alpha
beta()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in beta
gamma()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in gamma
raise Exception('This is an error')
Exception: This is an error
''')
self.expected_logs = [
(logging.INFO, '=' * 70),
(logging.ERROR, message),
]
def alpha():
beta()
def beta():
gamma()
def gamma():
raise Exception('This is an error')
alpha()
def test_call_stack_context_manager(self):
message = (
'''ERROR: TestRunnerLogging.test_call_stack_context_manager
Traceback (most recent call last):
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in test_call_stack_context_manager
alpha()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in alpha
beta()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in beta
gamma()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in gamma
raise Exception('This is an error')
Exception: This is an error
''')
self.expected_logs = [
(logging.INFO, '=' * 70),
(logging.ERROR, message),
]
def alpha():
beta()
def beta():
with self.with_user('admin'):
gamma()
return 0
def gamma():
raise Exception('This is an error')
alpha()
def test_call_stack_subtest(self):
message = (
'''ERROR: Subtest TestRunnerLogging.test_call_stack_subtest (<subtest>)
Traceback (most recent call last):
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in test_call_stack_subtest
alpha()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in alpha
beta()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in beta
gamma()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in gamma
raise Exception('This is an error')
Exception: This is an error
''')
self.expected_logs = [
(logging.INFO, '=' * 70),
(logging.ERROR, message),
]
def alpha():
beta()
def beta():
with self.subTest():
gamma()
def gamma():
raise Exception('This is an error')
alpha()
def test_assertQueryCount(self):
message = (
'''FAIL: Subtest TestRunnerLogging.test_assertQueryCount (<subtest>)
Traceback (most recent call last):
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in test_assertQueryCount
with self.assertQueryCount(system=0):
File "/usr/lib/python/contextlib.py", line $line, in __exit__
next(self.gen)
File "/root_path/odoo/odoo/tests/common.py", line $line, in assertQueryCount
self.fail(msg % (login, count, expected, funcname, filename, linenum))
AssertionError: Query count more than expected for user __system__: 1 > 0 in test_assertQueryCount at base/tests/test_test_suite.py:$line
''')
if self._python_version < (3, 10, 0):
message = message.replace("with self.assertQueryCount(system=0):", "self.env.cr.execute('SELECT 1')")
self.expected_logs = [
(logging.INFO, '=' * 70),
(logging.ERROR, message),
]
with self.assertQueryCount(system=0):
self.env.cr.execute('SELECT 1')
@users('__system__')
@warmup
def test_assertQueryCount_with_decorators(self):
with self.assertQueryCount(system=0):
self.env.cr.execute('SELECT 1')
def test_reraise(self):
message = (
'''ERROR: TestRunnerLogging.test_reraise
Traceback (most recent call last):
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in test_reraise
alpha()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in alpha
beta()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in beta
raise Exception('This is an error')
Exception: This is an error
''')
self.expected_logs = [
(logging.INFO, '=' * 70),
(logging.ERROR, message),
]
def alpha():
# pylint: disable=try-except-raise
try:
beta()
except Exception:
raise
def beta():
raise Exception('This is an error')
alpha()
def test_handle_error(self):
message = (
'''ERROR: TestRunnerLogging.test_handle_error
Traceback (most recent call last):
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in alpha
beta()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in beta
raise Exception('This is an error')
Exception: This is an error
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in test_handle_error
alpha()
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in alpha
raise Exception('This is an error2')
Exception: This is an error2
''')
self.expected_logs = [
(logging.INFO, '=' * 70),
(logging.ERROR, message),
]
def alpha():
try:
beta()
except Exception:
raise Exception('This is an error2')
def beta():
raise Exception('This is an error')
alpha()
class TestRunnerLoggingSetup(TestRunnerLoggingCommon):
def setUp(self):
super().setUp()
self.expected_first_frame_methods = [
'setUp',
'cleanupError2',
'cleanupError',
]
def cleanupError():
raise Exception("This is a cleanup error")
self.addCleanup(cleanupError)
def cleanupError2():
raise Exception("This is a second cleanup error")
self.addCleanup(cleanupError2)
raise Exception('This is a setup error')
def test_raises_setup(self):
_logger.error("This shouldn't be executed")
def tearDown(self):
_logger.error("This shouldn't be executed since setup failed")
class TestRunnerLoggingTeardown(TestRunnerLoggingCommon):
def setUp(self):
super().setUp()
self.expected_first_frame_methods = [
'test_raises_teardown',
'test_raises_teardown',
'test_raises_teardown',
'tearDown',
'cleanupError2',
'cleanupError',
]
def cleanupError():
raise Exception("This is a cleanup error")
self.addCleanup(cleanupError)
def cleanupError2():
raise Exception("This is a second cleanup error")
self.addCleanup(cleanupError2)
def tearDown(self):
raise Exception('This is a tearDown error')
def test_raises_teardown(self):
with self.subTest():
raise Exception('This is a subTest error')
with self.subTest():
raise Exception('This is a second subTest error')
raise Exception('This is a test error')
| 36.125551 | 16,401 |
2,140 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import HttpCase, tagged
from odoo.tools import mute_logger, logging
from unittest.mock import patch
@tagged('-at_install', 'post_install')
class TestHttpCase(HttpCase):
def test_console_error_string(self):
with self.assertRaises(AssertionError) as error_catcher:
code = "console.error('test error','message')"
with patch('odoo.tests.common.ChromeBrowser.take_screenshot', return_value=None):
self.browser_js(url_path='about:blank', code=code)
# second line must contains error message
self.assertEqual(error_catcher.exception.args[0].splitlines()[1], "test error message")
def test_console_error_object(self):
with self.assertRaises(AssertionError) as error_catcher:
code = "console.error(TypeError('test error message'))"
with patch('odoo.tests.common.ChromeBrowser.take_screenshot', return_value=None):
self.browser_js(url_path='about:blank', code=code)
# second line must contains error message
self.assertEqual(error_catcher.exception.args[0].splitlines()[1:3],
['TypeError: test error message', ' at <anonymous>:1:15'])
def test_console_log_object(self):
logger = logging.getLogger('odoo')
level = logger.level
logger.setLevel(logging.INFO)
self.addCleanup(logger.setLevel, level)
with self.assertLogs() as log_catcher:
code = "console.log({custom:{1:'test', 2:'a'}, value:1, description:'dummy'});console.log('test successful');"
self.browser_js(url_path='about:blank', code=code)
console_log_count = 0
for log in log_catcher.output:
if '.browser:' in log:
text = log.split('.browser:', 1)[1]
if text == 'test successful':
continue
self.assertEqual(text, "Object(custom=Object, value=1, description='dummy')")
console_log_count +=1
self.assertEqual(console_log_count, 1)
| 47.555556 | 2,140 |
128,681 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
import logging
import time
from functools import partial
from lxml import etree
from lxml.builder import E
from psycopg2 import IntegrityError
from odoo.exceptions import AccessError, ValidationError
from odoo.tests import common
from odoo.tools import mute_logger, view_validation
from odoo.addons.base.models.ir_ui_view import (
transfer_field_to_modifiers, transfer_node_to_modifiers, simplify_modifiers,
)
_logger = logging.getLogger(__name__)
class ViewXMLID(common.TransactionCase):
def test_model_data_id(self):
""" Check whether views know their xmlid record. """
view = self.env.ref('base.view_company_form')
self.assertTrue(view)
self.assertTrue(view.model_data_id)
self.assertEqual(view.model_data_id.complete_name, 'base.view_company_form')
class ViewCase(common.TransactionCase):
def setUp(self):
super(ViewCase, self).setUp()
self.View = self.env['ir.ui.view']
class TestNodeLocator(common.TransactionCase):
"""
The node locator returns None when it can not find a node, and the first
match when it finds something (no jquery-style node sets)
"""
def test_no_match_xpath(self):
"""
xpath simply uses the provided @expr pattern to find a node
"""
node = self.env['ir.ui.view'].locate_node(
E.root(E.foo(), E.bar(), E.baz()),
E.xpath(expr="//qux"),
)
self.assertIsNone(node)
def test_match_xpath(self):
bar = E.bar()
node = self.env['ir.ui.view'].locate_node(
E.root(E.foo(), bar, E.baz()),
E.xpath(expr="//bar"),
)
self.assertIs(node, bar)
def test_no_match_field(self):
"""
A field spec will match by @name against all fields of the view
"""
node = self.env['ir.ui.view'].locate_node(
E.root(E.foo(), E.bar(), E.baz()),
E.field(name="qux"),
)
self.assertIsNone(node)
node = self.env['ir.ui.view'].locate_node(
E.root(E.field(name="foo"), E.field(name="bar"), E.field(name="baz")),
E.field(name="qux"),
)
self.assertIsNone(node)
def test_match_field(self):
bar = E.field(name="bar")
node = self.env['ir.ui.view'].locate_node(
E.root(E.field(name="foo"), bar, E.field(name="baz")),
E.field(name="bar"),
)
self.assertIs(node, bar)
def test_no_match_other(self):
"""
Non-xpath non-fields are matched by node name first
"""
node = self.env['ir.ui.view'].locate_node(
E.root(E.foo(), E.bar(), E.baz()),
E.qux(),
)
self.assertIsNone(node)
def test_match_other(self):
bar = E.bar()
node = self.env['ir.ui.view'].locate_node(
E.root(E.foo(), bar, E.baz()),
E.bar(),
)
self.assertIs(bar, node)
def test_attribute_mismatch(self):
"""
Non-xpath non-field are filtered by matching attributes on spec and
matched nodes
"""
node = self.env['ir.ui.view'].locate_node(
E.root(E.foo(attr='1'), E.bar(attr='2'), E.baz(attr='3')),
E.bar(attr='5'),
)
self.assertIsNone(node)
def test_attribute_filter(self):
match = E.bar(attr='2')
node = self.env['ir.ui.view'].locate_node(
E.root(E.bar(attr='1'), match, E.root(E.bar(attr='3'))),
E.bar(attr='2'),
)
self.assertIs(node, match)
def test_version_mismatch(self):
"""
A @version on the spec will be matched against the view's version
"""
node = self.env['ir.ui.view'].locate_node(
E.root(E.foo(attr='1'), version='4'),
E.foo(attr='1', version='3'),
)
self.assertIsNone(node)
class TestViewInheritance(ViewCase):
def arch_for(self, name, view_type='form', parent=None):
""" Generates a trivial view of the specified ``view_type``.
The generated view is empty but ``name`` is set as its root's ``@string``.
If ``parent`` is not falsy, generates an extension view (instead of
a root view) replacing the parent's ``@string`` by ``name``
:param str name: ``@string`` value for the view root
:param str view_type:
:param bool parent:
:return: generated arch
:rtype: str
"""
if not parent:
element = E(view_type, string=name)
else:
element = E(view_type,
E.attribute(name, name='string'),
position='attributes'
)
return etree.tostring(element, encoding='unicode')
def makeView(self, name, parent=None, arch=None):
""" Generates a basic ir.ui.view with the provided name, parent and arch.
If no parent is provided, the view is top-level.
If no arch is provided, generates one by calling :meth:`~.arch_for`.
:param str name:
:param int parent: id of the parent view, if any
:param str arch:
:returns: the created view's id.
:rtype: int
"""
view = self.View.create({
'model': self.model,
'name': name,
'arch': arch or self.arch_for(name, parent=parent),
'inherit_id': parent,
'priority': 5, # higher than default views
})
self.view_ids[name] = view
return view
def get_views(self, names):
return self.View.concat(*(self.view_ids[name] for name in names))
def setUp(self):
super(TestViewInheritance, self).setUp()
self.patch(self.registry, '_init', False)
self.model = 'ir.ui.view.custom'
self.view_ids = {}
self.a = self.makeView("A")
self.a1 = self.makeView("A1", self.a.id)
self.a2 = self.makeView("A2", self.a.id)
self.a11 = self.makeView("A11", self.a1.id)
self.a11.mode = 'primary'
self.makeView("A111", self.a11.id)
self.makeView("A12", self.a1.id)
self.makeView("A21", self.a2.id)
self.a22 = self.makeView("A22", self.a2.id)
self.makeView("A221", self.a22.id)
self.b = self.makeView('B', arch=self.arch_for("B", 'tree'))
self.makeView('B1', self.b.id, arch=self.arch_for("B1", 'tree', parent=self.b))
self.c = self.makeView('C', arch=self.arch_for("C", 'tree'))
self.c.write({'priority': 1})
def test_get_inheriting_views(self):
self.assertEqual(
self.view_ids['A']._get_inheriting_views(),
self.get_views('A A1 A2 A12 A21 A22 A221'.split()),
)
self.assertEqual(
self.view_ids['A21']._get_inheriting_views(),
self.get_views(['A21']),
)
self.assertEqual(
self.view_ids['A11']._get_inheriting_views(),
self.get_views(['A11', 'A111']),
)
self.assertEqual(
(self.view_ids['A11'] + self.view_ids['A'])._get_inheriting_views(),
self.get_views('A A1 A2 A11 A111 A12 A21 A22 A221'.split()),
)
def test_default_view(self):
default = self.View.default_view(model=self.model, view_type='form')
self.assertEqual(default, self.view_ids['A'].id)
default_tree = self.View.default_view(model=self.model, view_type='tree')
self.assertEqual(default_tree, self.view_ids['C'].id)
def test_no_default_view(self):
self.assertFalse(self.View.default_view(model='no_model.exist', view_type='form'))
self.assertFalse(self.View.default_view(model=self.model, view_type='graph'))
def test_no_recursion(self):
r1 = self.makeView('R1')
with self.assertRaises(ValidationError), self.cr.savepoint():
r1.write({'inherit_id': r1.id})
r2 = self.makeView('R2', r1.id)
r3 = self.makeView('R3', r2.id)
with self.assertRaises(ValidationError), self.cr.savepoint():
r2.write({'inherit_id': r3.id})
with self.assertRaises(ValidationError), self.cr.savepoint():
r1.write({'inherit_id': r3.id})
with self.assertRaises(ValidationError), self.cr.savepoint():
r1.write({
'inherit_id': r1.id,
'arch': self.arch_for('itself', parent=True),
})
def test_write_arch(self):
self.env['res.lang']._activate_lang('fr_FR')
v = self.makeView("T", arch='<form string="Foo">Bar</form>')
self.env['ir.translation']._upsert_translations([{
'type': 'model_terms',
'name': 'ir.ui.view,arch_db',
'lang': 'fr_FR',
'res_id': v.id,
'src': 'Foo',
'value': 'Fou',
}, {
'type': 'model_terms',
'name': 'ir.ui.view,arch_db',
'lang': 'fr_FR',
'res_id': v.id,
'src': 'Bar',
'value': 'Barre',
}])
self.assertEqual(v.arch, '<form string="Foo">Bar</form>')
# modify v to discard translations; this should not invalidate 'arch'!
v.arch = '<form></form>'
self.assertEqual(v.arch, '<form></form>')
def test_get_combined_arch_query_count(self):
# If the query count increases, you probably made the view combination
# fetch an extra field on views. You better fetch that extra field with
# the query of _get_inheriting_views() and manually feed the cache.
self.View.invalidate_cache()
with self.assertQueryCount(3):
# 1: browse([self.view_ids['A']])
# 2: _get_inheriting_views: id, inherit_id, mode, groups
# 3: _combine: arch_db
self.view_ids['A'].get_combined_arch()
class TestApplyInheritanceSpecs(ViewCase):
""" Applies a sequence of inheritance specification nodes to a base
architecture. IO state parameters (cr, uid, model, context) are used for
error reporting
The base architecture is altered in-place.
"""
def setUp(self):
super(TestApplyInheritanceSpecs, self).setUp()
self.base_arch = E.form(
E.field(name="target"),
string="Title")
self.adv_arch = E.form(
E.field(
"TEXT1",
E.field(name="subtarget"),
"TEXT2",
E.field(name="anothersubtarget"),
"TEXT3",
name="target",
),
string="Title")
def test_replace_outer(self):
spec = E.field(
E.field(name="replacement"),
name="target", position="replace")
self.View.apply_inheritance_specs(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.form(E.field(name="replacement"), string="Title"))
def test_delete(self):
spec = E.field(name="target", position="replace")
self.View.apply_inheritance_specs(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.form(string="Title"))
def test_insert_after(self):
spec = E.field(
E.field(name="inserted"),
name="target", position="after")
self.View.apply_inheritance_specs(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.form(
E.field(name="target"),
E.field(name="inserted"),
string="Title"
))
def test_insert_before(self):
spec = E.field(
E.field(name="inserted"),
name="target", position="before")
self.View.apply_inheritance_specs(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.form(
E.field(name="inserted"),
E.field(name="target"),
string="Title"))
def test_insert_inside(self):
default = E.field(E.field(name="inserted"), name="target")
spec = E.field(E.field(name="inserted 2"), name="target", position='inside')
self.View.apply_inheritance_specs(self.base_arch, default)
self.View.apply_inheritance_specs(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.form(
E.field(
E.field(name="inserted"),
E.field(name="inserted 2"),
name="target"),
string="Title"))
def test_replace_inner(self):
spec = E.field(
"TEXT 4",
E.field(name="replacement"),
"TEXT 5",
E.field(name="replacement2"),
"TEXT 6",
name="target", position="replace", mode="inner")
expected = E.form(
E.field(
"TEXT 4",
E.field(name="replacement"),
"TEXT 5",
E.field(name="replacement2"),
"TEXT 6",
name="target"),
string="Title")
# applying spec to both base_arch and adv_arch is expected to give the same result
self.View.apply_inheritance_specs(self.base_arch, spec)
self.assertEqual(self.base_arch, expected)
self.View.apply_inheritance_specs(self.adv_arch, spec)
self.assertEqual(self.adv_arch, expected)
def test_unpack_data(self):
spec = E.data(
E.field(E.field(name="inserted 0"), name="target"),
E.field(E.field(name="inserted 1"), name="target"),
E.field(E.field(name="inserted 2"), name="target"),
E.field(E.field(name="inserted 3"), name="target"),
)
self.View.apply_inheritance_specs(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.form(
E.field(
E.field(name="inserted 0"),
E.field(name="inserted 1"),
E.field(name="inserted 2"),
E.field(name="inserted 3"),
name="target"),
string="Title"))
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_invalid_position(self):
spec = E.field(
E.field(name="whoops"),
name="target", position="serious_series")
with self.assertRaises(ValueError):
self.View.apply_inheritance_specs(self.base_arch, spec)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_incorrect_version(self):
# Version ignored on //field elements, so use something else
arch = E.form(E.element(foo="42"))
spec = E.element(
E.field(name="placeholder"),
foo="42", version="7.0")
with self.assertRaises(ValueError):
self.View.apply_inheritance_specs(arch, spec)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_target_not_found(self):
spec = E.field(name="targut")
with self.assertRaises(ValueError):
self.View.apply_inheritance_specs(self.base_arch, spec)
class TestApplyInheritanceWrapSpecs(ViewCase):
def setUp(self):
super(TestApplyInheritanceWrapSpecs, self).setUp()
self.base_arch = E.template(E.div(E.p("Content")))
def apply_spec(self, spec):
self.View.apply_inheritance_specs(self.base_arch, spec)
def test_replace(self):
spec = E.xpath(
E.div("$0", {'class': "some"}),
expr="//p", position="replace")
self.apply_spec(spec)
self.assertEqual(
self.base_arch,
E.template(E.div(
E.div(E.p('Content'), {'class': 'some'})
))
)
class TestApplyInheritanceMoveSpecs(ViewCase):
def setUp(self):
super(TestApplyInheritanceMoveSpecs, self).setUp()
self.base_arch = E.template(
E.div(E.p("Content", {'class': 'some'})),
E.div({'class': 'target'})
)
self.wrapped_arch = E.template(
E.div("aaaa", E.p("Content", {'class': 'some'}), "bbbb"),
E.div({'class': 'target'})
)
def apply_spec(self, arch, spec):
self.View.apply_inheritance_specs(arch, spec)
def test_move_replace(self):
spec = E.xpath(
E.xpath(expr="//p", position="move"),
expr="//div[@class='target']", position="replace")
self.apply_spec(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.template(
E.div(),
E.p("Content", {'class': 'some'})
)
)
self.apply_spec(self.wrapped_arch, spec)
self.assertEqual(
self.wrapped_arch,
E.template(
E.div("aaaabbbb"),
E.p("Content", {'class': 'some'})
)
)
def test_move_inside(self):
spec = E.xpath(
E.xpath(expr="//p", position="move"),
expr="//div[@class='target']", position="inside")
self.apply_spec(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.template(
E.div(),
E.div(E.p("Content", {'class': 'some'}), {'class': 'target'})
)
)
self.apply_spec(self.wrapped_arch, spec)
self.assertEqual(
self.wrapped_arch,
E.template(
E.div("aaaabbbb"),
E.div(E.p("Content", {'class': 'some'}), {'class': 'target'})
)
)
def test_move_before(self):
spec = E.xpath(
E.xpath(expr="//p", position="move"),
expr="//div[@class='target']", position="before")
self.apply_spec(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.template(
E.div(""),
E.p("Content", {'class': 'some'}),
E.div({'class': 'target'}),
)
)
self.apply_spec(self.wrapped_arch, spec)
self.assertEqual(
self.wrapped_arch,
E.template(
E.div("aaaabbbb"),
E.p("Content", {'class': 'some'}),
E.div({'class': 'target'}),
)
)
def test_move_after(self):
spec = E.xpath(
E.xpath(expr="//p", position="move"),
expr="//div[@class='target']", position="after")
self.apply_spec(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.template(
E.div(),
E.div({'class': 'target'}),
E.p("Content", {'class': 'some'}),
)
)
self.apply_spec(self.wrapped_arch, spec)
self.assertEqual(
self.wrapped_arch,
E.template(
E.div("aaaabbbb"),
E.div({'class': 'target'}),
E.p("Content", {'class': 'some'}),
)
)
def test_move_with_other_1(self):
# multiple elements with move in first position
spec = E.xpath(
E.xpath(expr="//p", position="move"),
E.p("Content2", {'class': 'new_p'}),
expr="//div[@class='target']", position="after")
self.apply_spec(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.template(
E.div(),
E.div({'class': 'target'}),
E.p("Content", {'class': 'some'}),
E.p("Content2", {'class': 'new_p'}),
)
)
def test_move_with_other_2(self):
# multiple elements with move in last position
spec = E.xpath(
E.p("Content2", {'class': 'new_p'}),
E.xpath(expr="//p", position="move"),
expr="//div[@class='target']", position="after")
self.apply_spec(self.wrapped_arch, spec)
self.assertEqual(
self.wrapped_arch,
E.template(
E.div("aaaabbbb"),
E.div({'class': 'target'}),
E.p("Content2", {'class': 'new_p'}),
E.p("Content", {'class': 'some'}),
)
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_incorrect_move_1(self):
# cannot move an inexisting element
spec = E.xpath(
E.xpath(expr="//p[@name='none']", position="move"),
expr="//div[@class='target']", position="after")
with self.assertRaises(ValueError):
self.apply_spec(self.base_arch, spec)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_incorrect_move_2(self):
# move xpath cannot contain any children
spec = E.xpath(
E.xpath(E.p("Content2", {'class': 'new_p'}), expr="//p", position="move"),
expr="//div[@class='target']", position="after")
with self.assertRaises(ValueError):
self.apply_spec(self.base_arch, spec)
def test_incorrect_move_3(self):
# move won't be correctly applied if not a direct child of an xpath
spec = E.xpath(
E.div(E.xpath(E.p("Content2", {'class': 'new_p'}), expr="//p", position="move"), {'class': 'wrapper'}),
expr="//div[@class='target']", position="after")
self.apply_spec(self.base_arch, spec)
self.assertEqual(
self.base_arch,
E.template(
E.div(E.p("Content", {'class': 'some'})),
E.div({'class': 'target'}),
E.div(E.xpath(E.p("Content2", {'class': 'new_p'}), expr="//p", position="move"), {'class': 'wrapper'}),
)
)
class TestApplyInheritedArchs(ViewCase):
""" Applies a sequence of modificator archs to a base view
"""
class TestNoModel(ViewCase):
def test_create_view_nomodel(self):
view = self.View.create({
'name': 'dummy',
'arch': '<template name="foo"/>',
'inherit_id': False,
'type': 'qweb',
})
fields = ['name', 'arch', 'type', 'priority', 'inherit_id', 'model']
[data] = view.read(fields)
self.assertEqual(data, {
'id': view.id,
'name': 'dummy',
'arch': '<template name="foo"/>',
'type': 'qweb',
'priority': 16,
'inherit_id': False,
'model': False,
})
text_para = E.p("", {'class': 'legalese'})
arch = E.body(
E.div(
E.h1("Title"),
id="header"),
E.p("Welcome!"),
E.div(
E.hr(),
text_para,
id="footer"),
{'class': "index"},)
def test_qweb_translation(self):
"""
Test if translations work correctly without a model
"""
self.env['res.lang']._activate_lang('fr_FR')
ARCH = '<template name="foo">%s</template>'
TEXT_EN = "Copyright copyrighter"
TEXT_FR = u"Copyrighter, tous droits réservés"
view = self.View.create({
'name': 'dummy',
'arch': ARCH % TEXT_EN,
'inherit_id': False,
'type': 'qweb',
})
self.env['ir.translation'].create({
'type': 'model_terms',
'name': 'ir.ui.view,arch_db',
'res_id': view.id,
'lang': 'fr_FR',
'src': TEXT_EN,
'value': TEXT_FR,
})
view = view.with_context(lang='fr_FR')
self.assertEqual(view.arch, ARCH % TEXT_FR)
class TestTemplating(ViewCase):
def setUp(self):
super(TestTemplating, self).setUp()
self.patch(self.registry, '_init', False)
def test_branding_inherit(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1"/>
</root>
"""
})
view2 = self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """<xpath expr="//item" position="before">
<item order="2"/>
</xpath>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
[initial] = arch.xpath('//item[@order=1]')
self.assertEqual(
str(view1.id),
initial.get('data-oe-id'),
"initial should come from the root view")
self.assertEqual(
'/root[1]/item[1]',
initial.get('data-oe-xpath'),
"initial's xpath should be within the root view only")
[second] = arch.xpath('//item[@order=2]')
self.assertEqual(
str(view2.id),
second.get('data-oe-id'),
"second should come from the extension view")
def test_branding_inherit_replace_node(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """<hello>
<world></world>
<world><t t-esc="hello"/></world>
<world></world>
</hello>
"""
})
self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """<xpath expr="/hello/world[1]" position="replace">
<world>Is a ghetto</world>
<world>Wonder when I'll find paradise</world>
</xpath>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
# First world - has been replaced by inheritance
[initial] = arch.xpath('/hello[1]/world[1]')
self.assertEqual(
'/xpath/world[1]',
initial.get('data-oe-xpath'),
'Inherited nodes have correct xpath')
# Second world added by inheritance
[initial] = arch.xpath('/hello[1]/world[2]')
self.assertEqual(
'/xpath/world[2]',
initial.get('data-oe-xpath'),
'Inherited nodes have correct xpath')
# Third world - is not editable
[initial] = arch.xpath('/hello[1]/world[3]')
self.assertFalse(
initial.get('data-oe-xpath'),
'node containing t-esc is not branded')
# The most important assert
# Fourth world - should have a correct oe-xpath, which is 3rd in main view
[initial] = arch.xpath('/hello[1]/world[4]')
self.assertEqual(
'/hello[1]/world[3]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
def test_branding_inherit_replace_node2(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """<hello>
<world></world>
<world><t t-esc="hello"/></world>
<world></world>
</hello>
"""
})
self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """<xpath expr="/hello/world[1]" position="replace">
<war>Is a ghetto</war>
<world>Wonder when I'll find paradise</world>
</xpath>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
[initial] = arch.xpath('/hello[1]/war[1]')
self.assertEqual(
'/xpath/war',
initial.get('data-oe-xpath'),
'Inherited nodes have correct xpath')
# First world: from inheritance
[initial] = arch.xpath('/hello[1]/world[1]')
self.assertEqual(
'/xpath/world',
initial.get('data-oe-xpath'),
'Inherited nodes have correct xpath')
# Second world - is not editable
[initial] = arch.xpath('/hello[1]/world[2]')
self.assertFalse(
initial.get('data-oe-xpath'),
'node containing t-esc is not branded')
# The most important assert
# Third world - should have a correct oe-xpath, which is 3rd in main view
[initial] = arch.xpath('/hello[1]/world[3]')
self.assertEqual(
'/hello[1]/world[3]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
def test_branding_inherit_remove_node(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
# The t-esc node is to ensure branding is distributed to both
# <world/> elements from the start
'arch': """
<hello>
<world></world>
<world></world>
<t t-esc="foo"/>
</hello>
"""
})
self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """
<data>
<xpath expr="/hello/world[1]" position="replace"/>
</data>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
# Only remaining world but still the second in original view
[initial] = arch.xpath('/hello[1]/world[1]')
self.assertEqual(
'/hello[1]/world[2]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
def test_branding_inherit_remove_node2(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """
<hello>
<world></world>
<world></world>
</hello>
"""
})
self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """
<data>
<xpath expr="/hello/world[1]" position="replace"/>
</data>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
# Note: this test is a variant of the test_branding_inherit_remove_node
# -> in this case, we expect the branding to not be distributed on the
# <hello/> element anymore but on the only remaining world.
[initial] = arch.xpath('/hello[1]')
self.assertIsNone(
initial.get('data-oe-model'),
"The inner content of the root was xpath'ed, it should not receive branding anymore")
# Only remaining world but still the second in original view
[initial] = arch.xpath('/hello[1]/world[1]')
self.assertEqual(
'/hello[1]/world[2]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
def test_branding_inherit_multi_replace_node(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """
<hello>
<world class="a"></world>
<world class="b"></world>
<world class="c"></world>
</hello>
"""
})
view2 = self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """
<data>
<xpath expr="//world" position="replace">
<world class="new_a"></world>
<world class="z"></world>
</xpath>
</data>
"""
})
self.View.create({ # Inherit from the child view and target the added element
'name': "Extension",
'type': 'qweb',
'inherit_id': view2.id,
'arch': """
<data>
<xpath expr="//world[hasclass('new_a')]" position="replace">
<world class="another_new_a"></world>
</xpath>
</data>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
# Check if the replacement inside the child view did not mess up the
# branding of elements in that child view
[initial] = arch.xpath('//world[hasclass("z")]')
self.assertEqual(
'/data/xpath/world[2]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
# Check if the replacement of the first worlds did not mess up the
# branding of the last world.
[initial] = arch.xpath('//world[hasclass("c")]')
self.assertEqual(
'/hello[1]/world[3]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
def test_branding_inherit_multi_replace_node2(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """
<hello>
<world class="a"></world>
<world class="b"></world>
<world class="c"></world>
</hello>
"""
})
self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """
<data>
<xpath expr="//world" position="replace">
<world class="new_a"></world>
<world class="z"></world>
</xpath>
</data>
"""
})
self.View.create({ # Inherit from the parent view but actually target
# the element added by the first child view
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """
<data>
<xpath expr="//world" position="replace">
<world class="another_new_a"></world>
</xpath>
</data>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
# Check if the replacement inside the child view did not mess up the
# branding of elements in that child view
[initial] = arch.xpath('//world[hasclass("z")]')
self.assertEqual(
'/data/xpath/world[2]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
# Check if the replacement of the first worlds did not mess up the
# branding of the last world.
[initial] = arch.xpath('//world[hasclass("c")]')
self.assertEqual(
'/hello[1]/world[3]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
def test_branding_inherit_remove_added_from_inheritance(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """
<hello>
<world class="a"></world>
<world class="b"></world>
</hello>
"""
})
view2 = self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
# Note: class="x" instead of t-field="x" in this arch, should lead
# to the same result that this test is ensuring but was actually
# a different case in old stable versions.
'arch': """
<data>
<xpath expr="//world[hasclass('a')]" position="after">
<world t-field="x"></world>
<world class="y"></world>
</xpath>
</data>
"""
})
self.View.create({ # Inherit from the child view and target the added element
'name': "Extension",
'type': 'qweb',
'inherit_id': view2.id,
'arch': """
<data>
<xpath expr="//world[@t-field='x']" position="replace"/>
</data>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
# Check if the replacement inside the child view did not mess up the
# branding of elements in that child view, should not be the case as
# that root level branding is not distributed.
[initial] = arch.xpath('//world[hasclass("y")]')
self.assertEqual(
'/data/xpath/world[2]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
# Check if the child view replacement of added nodes did not mess up
# the branding of last world in the parent view.
[initial] = arch.xpath('//world[hasclass("b")]')
self.assertEqual(
'/hello[1]/world[2]',
initial.get('data-oe-xpath'),
"The node's xpath position should be correct")
def test_branding_inherit_remove_node_processing_instruction(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """
<html>
<head>
<hello></hello>
</head>
<body>
<world></world>
</body>
</html>
"""
})
self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """
<data>
<xpath expr="//hello" position="replace"/>
<xpath expr="//world" position="replace"/>
</data>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
head = arch.xpath('//head')[0]
head_child = head[0]
self.assertEqual(
head_child.target,
'apply-inheritance-specs-node-removal',
"A node was removed at the start of the <head>, a processing instruction should exist as first child node")
self.assertEqual(
head_child.text,
'hello',
"The processing instruction should mention the tag of the node that was removed")
body = arch.xpath('//body')[0]
body_child = body[0]
self.assertEqual(
body_child.target,
'apply-inheritance-specs-node-removal',
"A node was removed at the start of the <body>, a processing instruction should exist as first child node")
self.assertEqual(
body_child.text,
'world',
"The processing instruction should mention the tag of the node that was removed")
self.View.distribute_branding(arch)
# Test that both head and body have their processing instruction
# 'apply-inheritance-specs-node-removal' removed after branding
# distribution. Note: test head and body separately as the code in
# charge of the removal is different in each case.
self.assertEqual(
len(head),
0,
"The processing instruction of the <head> should have been removed")
self.assertEqual(
len(body),
0,
"The processing instruction of the <body> should have been removed")
def test_branding_inherit_top_t_field(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """
<hello>
<world></world>
<world t-field="a"/>
<world></world>
<world></world>
</hello>
"""
})
self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """
<xpath expr="/hello/world[3]" position="after">
<world t-field="b"/>
</xpath>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
# First t-field should have an indication of xpath
[node] = arch.xpath('//*[@t-field="a"]')
self.assertEqual(
node.get('data-oe-xpath'),
'/hello[1]/world[2]',
'First t-field has indication of xpath')
# Second t-field, from inheritance, should also have an indication of xpath
[node] = arch.xpath('//*[@t-field="b"]')
self.assertEqual(
node.get('data-oe-xpath'),
'/xpath/world',
'Inherited t-field has indication of xpath')
# The most important assert
# The last world xpath should not have been impacted by the t-field from inheritance
[node] = arch.xpath('//world[last()]')
self.assertEqual(
node.get('data-oe-xpath'),
'/hello[1]/world[4]',
"The node's xpath position should be correct")
# Also test inherit via non-xpath t-field node, direct children of data,
# is not impacted by the feature
self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """
<data>
<world t-field="a" position="replace">
<world t-field="z"/>
</world>
</data>
"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
node = arch.xpath('//world')[1]
self.assertEqual(
node.get('t-field'),
'z',
"The node has properly been replaced")
def test_branding_primary_inherit(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1"/>
</root>
"""
})
view2 = self.View.create({
'name': "Extension",
'type': 'qweb',
'mode': 'primary',
'inherit_id': view1.id,
'arch': """<xpath expr="//item" position="after">
<item order="2"/>
</xpath>
"""
})
arch_string = view2.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
[initial] = arch.xpath('//item[@order=1]')
self.assertEqual(
initial.get('data-oe-id'),
str(view1.id),
"initial should come from the root view")
self.assertEqual(
initial.get('data-oe-xpath'),
'/root[1]/item[1]',
"initial's xpath should be within the inherited view only")
[second] = arch.xpath('//item[@order=2]')
self.assertEqual(
second.get('data-oe-id'),
str(view2.id),
"second should come from the extension view")
self.assertEqual(
second.get('data-oe-xpath'),
'/xpath/item',
"second xpath should be on the inheriting view only")
def test_branding_distribute_inner(self):
""" Checks that the branding is correctly distributed within a view
extension
"""
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1"/>
</root>"""
})
view2 = self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """<xpath expr="//item" position="before">
<item order="2">
<content t-att-href="foo">bar</content>
</item>
</xpath>"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
self.assertEqual(
arch,
E.root(
E.item(
E.content("bar", {
't-att-href': "foo",
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(view2.id),
'data-oe-field': 'arch',
'data-oe-xpath': '/xpath/item/content[1]',
}), {
'order': '2',
}),
E.item({
'order': '1',
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(view1.id),
'data-oe-field': 'arch',
'data-oe-xpath': '/root[1]/item[1]',
})
)
)
def test_branding_attribute_groups(self):
view = self.View.create({
'name': "Base View",
'type': 'qweb',
'arch': """<root>
<item groups="base.group_no_one"/>
</root>""",
})
arch_string = view.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
self.assertEqual(arch, E.root(E.item({
'groups': 'base.group_no_one',
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(view.id),
'data-oe-field': 'arch',
'data-oe-xpath': '/root[1]/item[1]',
})))
def test_call_no_branding(self):
view = self.View.create({
'name': "Base View",
'type': 'qweb',
'arch': """<root>
<item><span t-call="foo"/></item>
</root>""",
})
arch_string = view.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
self.assertEqual(arch, E.root(E.item(E.span({'t-call': "foo"}))))
def test_esc_no_branding(self):
view = self.View.create({
'name': "Base View",
'type': 'qweb',
'arch': """<root>
<item><span t-esc="foo"/></item>
</root>""",
})
arch_string = view.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
self.assertEqual(arch, E.root(E.item(E.span({'t-esc': "foo"}))))
def test_ignore_unbrand(self):
view1 = self.View.create({
'name': "Base view",
'type': 'qweb',
'arch': """<root>
<item order="1" t-ignore="true">
<t t-esc="foo"/>
</item>
</root>"""
})
view2 = self.View.create({
'name': "Extension",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """<xpath expr="//item[@order='1']" position="inside">
<item order="2">
<content t-att-href="foo">bar</content>
</item>
</xpath>"""
})
arch_string = view1.with_context(inherit_branding=True).get_combined_arch()
arch = etree.fromstring(arch_string)
self.View.distribute_branding(arch)
self.assertEqual(
arch,
E.root(
E.item(
{'t-ignore': 'true', 'order': '1'},
E.t({'t-esc': 'foo'}),
E.item(
{'order': '2'},
E.content(
{'t-att-href': 'foo'},
"bar")
)
)
),
"t-ignore should apply to injected sub-view branding, not just to"
" the main view's"
)
class TestViews(ViewCase):
def test_nonexistent_attribute_removal(self):
self.View.create({
'name': 'Test View',
'model': 'ir.ui.view',
'inherit_id': self.ref('base.view_view_tree'),
'arch': """<?xml version="1.0"?>
<xpath expr="//field[@name='name']" position="attributes">
<attribute name="non_existing_attribute"></attribute>
</xpath>
""",
})
def _insert_view(self, **kw):
"""Insert view into database via a query to passtrough validation"""
kw.pop('id', None)
kw.setdefault('mode', 'extension' if kw.get('inherit_id') else 'primary')
kw.setdefault('active', True)
keys = sorted(kw)
fields = ','.join('"%s"' % (k.replace('"', r'\"'),) for k in keys)
params = ','.join('%%(%s)s' % (k,) for k in keys)
query = 'INSERT INTO ir_ui_view(%s) VALUES(%s) RETURNING id' % (fields, params)
self.cr.execute(query, kw)
return self.cr.fetchone()[0]
def test_custom_view_validation(self):
model = 'ir.actions.act_url'
validate = partial(self.View._validate_custom_views, model)
# validation of a single view
vid = self._insert_view(
name='base view',
model=model,
priority=1,
arch_db="""<?xml version="1.0"?>
<tree string="view">
<field name="url"/>
</tree>
""",
)
self.assertTrue(validate()) # single view
# validation of a inherited view
self._insert_view(
name='inherited view',
model=model,
priority=1,
inherit_id=vid,
arch_db="""<?xml version="1.0"?>
<xpath expr="//field[@name='url']" position="before">
<field name="name"/>
</xpath>
""",
)
self.assertTrue(validate()) # inherited view
# validation of a second inherited view (depending on 1st)
self._insert_view(
name='inherited view 2',
model=model,
priority=5,
inherit_id=vid,
arch_db="""<?xml version="1.0"?>
<xpath expr="//field[@name='name']" position="after">
<field name="target"/>
</xpath>
""",
)
self.assertTrue(validate()) # inherited view
def test_view_inheritance(self):
view1 = self.View.create({
'name': "bob",
'model': 'ir.ui.view',
'arch': """
<form string="Base title">
<separator name="separator" string="Separator" colspan="4"/>
<footer>
<button name="action_archive" type="object" string="Next button" class="btn-primary"/>
<button string="Skip" special="cancel" class="btn-secondary"/>
</footer>
</form>
"""
})
view2 = self.View.create({
'name': "edmund",
'model': 'ir.ui.view',
'inherit_id': view1.id,
'arch': """
<data>
<form position="attributes">
<attribute name="string">Replacement title</attribute>
</form>
<footer position="replace">
<footer>
<button name="action_archive" type="object" string="New button"/>
</footer>
</footer>
<separator name="separator" position="replace">
<p>Replacement data</p>
</separator>
</data>
"""
})
view3 = self.View.create({
'name': 'jake',
'model': 'ir.ui.view',
'inherit_id': view1.id,
'priority': 17,
'arch': """
<footer position="attributes">
<attribute name="thing">bob tata lolo</attribute>
<attribute name="thing" add="bibi and co" remove="tata" separator=" " />
<attribute name="otherthing">bob, tata,lolo</attribute>
<attribute name="otherthing" remove="tata, bob"/>
</footer>
"""
})
view = self.View.with_context(check_view_ids=[view2.id, view3.id]) \
.fields_view_get(view2.id, view_type='form')
self.assertEqual(view['type'], 'form')
self.assertEqual(
etree.fromstring(
view['arch'],
parser=etree.XMLParser(remove_blank_text=True)
),
E.form(
E.p("Replacement data"),
E.footer(
E.button(name="action_archive", type="object", string="New button"),
thing="bob lolo bibi and co", otherthing="lolo"
),
string="Replacement title"))
def test_view_inheritance_text_inside(self):
""" Test view inheritance when adding elements and text. """
view1 = self.View.create({
'name': "alpha",
'model': 'ir.ui.view',
'arch': '<form string="F">(<div/>)</form>',
})
view2 = self.View.create({
'name': "beta",
'model': 'ir.ui.view',
'inherit_id': view1.id,
'arch': '<div position="inside">a<p/>b<p/>c</div>',
})
view = self.View.with_context(check_view_ids=view2.ids).fields_view_get(view1.id)
self.assertEqual(view['type'], 'form')
self.assertEqual(
view['arch'],
'<form string="F">(<div>a<p/>b<p/>c</div>)</form>',
)
def test_view_inheritance_text_after(self):
""" Test view inheritance when adding elements and text. """
view1 = self.View.create({
'name': "alpha",
'model': 'ir.ui.view',
'arch': '<form string="F">(<div/>)</form>',
})
view2 = self.View.create({
'name': "beta",
'model': 'ir.ui.view',
'inherit_id': view1.id,
'arch': '<div position="after">a<p/>b<p/>c</div>',
})
view = self.View.with_context(check_view_ids=view2.ids).fields_view_get(view1.id)
self.assertEqual(view['type'], 'form')
self.assertEqual(
view['arch'],
'<form string="F">(<div/>a<p/>b<p/>c)</form>',
)
def test_view_inheritance_text_before(self):
""" Test view inheritance when adding elements and text. """
view1 = self.View.create({
'name': "alpha",
'model': 'ir.ui.view',
'arch': '<form string="F">(<div/>)</form>',
})
view2 = self.View.create({
'name': "beta",
'model': 'ir.ui.view',
'inherit_id': view1.id,
'arch': '<div position="before">a<p/>b<p/>c</div>',
})
view = self.View.with_context(check_view_ids=view2.ids).fields_view_get(view1.id)
self.assertEqual(view['type'], 'form')
self.assertEqual(
view['arch'],
'<form string="F">(a<p/>b<p/>c<div/>)</form>',
)
def test_view_inheritance_divergent_models(self):
view1 = self.View.create({
'name': "bob",
'model': 'ir.ui.view.custom',
'arch': """
<form string="Base title">
<separator name="separator" string="Separator" colspan="4"/>
<footer>
<button name="action_archive" type="object" string="Next button" class="btn-primary"/>
<button string="Skip" special="cancel" class="btn-secondary"/>
</footer>
</form>
"""
})
view2 = self.View.create({
'name': "edmund",
'model': 'ir.ui.view',
'inherit_id': view1.id,
'arch': """
<data>
<form position="attributes">
<attribute name="string">Replacement title</attribute>
</form>
<footer position="replace">
<footer>
<button name="action_unarchive" type="object" string="New button"/>
</footer>
</footer>
<separator name="separator" position="replace">
<p>Replacement data</p>
</separator>
</data>
"""
})
view3 = self.View.create({
'name': 'jake',
'model': 'ir.ui.menu',
'inherit_id': view1.id,
'priority': 17,
'arch': """
<footer position="attributes">
<attribute name="thing">bob</attribute>
</footer>
"""
})
view = self.View.with_context(check_view_ids=[view2.id, view3.id]) \
.fields_view_get(view2.id, view_type='form')
self.assertEqual(view['type'], 'form')
self.assertEqual(
etree.fromstring(
view['arch'],
parser=etree.XMLParser(remove_blank_text=True)
),
E.form(
E.p("Replacement data"),
E.footer(
E.button(name="action_unarchive", type="object", string="New button")),
string="Replacement title"
))
def test_modifiers(self):
def _test_modifiers(what, expected):
modifiers = {}
if isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
else:
node = etree.fromstring(what) if isinstance(what, str) else what
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
assert modifiers == expected, "%s != %s" % (modifiers, expected)
_test_modifiers('<field name="a"/>', {})
_test_modifiers('<field name="a" invisible="1"/>', {"invisible": True})
_test_modifiers('<field name="a" readonly="1"/>', {"readonly": True})
_test_modifiers('<field name="a" required="1"/>', {"required": True})
_test_modifiers('<field name="a" invisible="0"/>', {})
_test_modifiers('<field name="a" readonly="0"/>', {})
_test_modifiers('<field name="a" required="0"/>', {})
# TODO: Order is not guaranteed
_test_modifiers(
'<field name="a" invisible="1" required="1"/>',
{"invisible": True, "required": True},
)
_test_modifiers(
'<field name="a" invisible="1" required="0"/>',
{"invisible": True},
)
_test_modifiers(
'<field name="a" invisible="0" required="1"/>',
{"required": True},
)
_test_modifiers(
"""<field name="a" attrs="{'invisible': [['b', '=', 'c']]}"/>""",
{"invisible": [["b", "=", "c"]]},
)
# fields in a tree view
tree = etree.fromstring('''
<tree>
<header>
<button name="a" invisible="1"/>
</header>
<field name="a"/>
<field name="a" invisible="0"/>
<field name="a" invisible="1"/>
<field name="a" attrs="{'invisible': [['b', '=', 'c']]}"/>
</tree>
''')
_test_modifiers(tree[0][0], {"invisible": True})
_test_modifiers(tree[1], {})
_test_modifiers(tree[2], {"column_invisible": False})
_test_modifiers(tree[3], {"column_invisible": True})
_test_modifiers(tree[4], {"invisible": [['b', '=', 'c']]})
# The dictionary is supposed to be the result of fields_get().
_test_modifiers({}, {})
_test_modifiers({"invisible": True}, {"invisible": True})
_test_modifiers({"invisible": False}, {})
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_invalid_field(self):
self.assertInvalid("""
<form string="View">
<field name="name"/>
<field name="not_a_field"/>
</form>
""", 'Field "not_a_field" does not exist in model "ir.ui.view"')
self.assertInvalid("""
<form string="View">
<field/>
</form>
""", 'Field tag must have a "name" attribute defined')
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_invalid_subfield(self):
arch = """
<form string="View">
<field name="name"/>
<field name="inherit_children_ids">
<tree name="Children">
<field name="name"/>
<field name="not_a_field"/>
</tree>
</field>
</form>
"""
self.assertInvalid(
arch,
'''Field "not_a_field" does not exist in model "ir.ui.view"''',
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_context_in_view(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_id" context="{'stuff': model}"/>
</form>
"""
self.assertValid(arch % '<field name="model"/>')
self.assertInvalid(
arch % '',
"""Field 'model' used in context ({'stuff': model}) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_context_in_subview(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_id" context="{'stuff': model}"/>
</form>
</field>
</form>
"""
self.assertValid(arch % ('', '<field name="model"/>'))
self.assertInvalid(
arch % ('', ''),
"""Field 'model' used in context ({'stuff': model}) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('<field name="model"/>', ''),
"""Field 'model' used in context ({'stuff': model}) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_context_in_subview_with_parent(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_id" context="{'stuff': parent.model}"/>
</form>
</field>
</form>
"""
self.assertValid(arch % ('<field name="model"/>', ''))
self.assertInvalid(
arch % ('', ''),
"""Field 'model' used in context ({'stuff': parent.model}) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('', '<field name="model"/>'),
"""Field 'model' used in context ({'stuff': parent.model}) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_context_in_subsubview_with_parent(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_id" context="{'stuff': parent.parent.model}"/>
</form>
</field>
</form>
</field>
</form>
"""
self.assertValid(arch % ('<field name="model"/>', '', ''))
self.assertInvalid(
arch % ('', '', ''),
"""Field 'model' used in context ({'stuff': parent.parent.model}) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('', '<field name="model"/>', ''),
"""Field 'model' used in context ({'stuff': parent.parent.model}) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('', '', '<field name="model"/>'),
"""Field 'model' used in context ({'stuff': parent.parent.model}) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_id_case(self):
# id is read by default and should be usable in domains
self.assertValid("""
<form string="View">
<field name="inherit_id" domain="[('id', '=', False)]"/>
</form>
""")
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_boolean_case(self):
arch = """
<form string="View">
%s
<field name="inherit_id" domain="[(%s, '=', %s)]"/>
</form>
"""
self.assertValid(arch % ('', '1', '1'))
self.assertValid(arch % ('', '0', '1'))
# self.assertInvalid(arch % ('', '1', '0'))
self.assertValid(arch % ('<field name="name"/>', '0 if name else 1', '1'))
# self.assertInvalid(arch % ('<field name="name"/><field name="type"/>', "'tata' if name else 'tutu'", 'type'), 'xxxx')
self.assertInvalid(
arch % ('', '0 if name else 1', '1'),
"""Field 'name' used in domain of <field name="inherit_id"> ([(0 if name else 1, '=', 1)]) must be present in view but is missing""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_in_view(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_id" domain="[('model', '=', model)]"/>
</form>
"""
self.assertValid(arch % '<field name="model"/>')
self.assertInvalid(
arch % '',
"""Field 'model' used in domain of <field name="inherit_id"> ([('model', '=', model)]) must be present in view but is missing.""",
)
def test_domain_unknown_field(self):
self.assertInvalid("""
<form string="View">
<field name="name"/>
<field name="inherit_id" domain="[('invalid_field', '=', 'res.users')]"/>
</form>
""",
'''Unknown field "ir.ui.view.invalid_field" in domain of <field name="inherit_id"> ([('invalid_field', '=', 'res.users')])''',
)
def test_domain_field_searchable(self):
arch = """
<form string="View">
<field name="name"/>
<field name="inherit_id" domain="[('%s', '=', 'test')]"/>
</form>
"""
# computed field with a search method
self.assertValid(arch % 'model_data_id')
# computed field, not stored, no search
self.assertInvalid(
arch % 'xml_id',
'''Unsearchable field 'xml_id' in path 'xml_id' in domain of <field name="inherit_id"> ([('xml_id', '=', 'test')])''',
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_field_no_comodel(self):
self.assertInvalid("""
<form string="View">
<field name="name" domain="[('test', '=', 'test')]"/>
</form>
""", "Domain on non-relational field \"name\" makes no sense (domain:[('test', '=', 'test')])")
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_in_subview(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_id" domain="[('model', '=', model)]"/>
</form>
</field>
</form>
"""
self.assertValid(arch % ('', '<field name="model"/>'))
self.assertInvalid(
arch % ('', ''),
"""Field 'model' used in domain of <field name="inherit_id"> ([('model', '=', model)]) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('<field name="model"/>', ''),
"""Field 'model' used in domain of <field name="inherit_id"> ([('model', '=', model)]) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_in_subview_with_parent(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_id" domain="[('model', '=', parent.model)]"/>
</form>
</field>%s
</form>
"""
self.assertValid(arch % ('<field name="model"/>', '', ''))
self.assertValid(arch % ('', '', '<field name="model"/>'))
self.assertInvalid(
arch % ('', '', ''),
"""Field 'model' used in domain of <field name="inherit_id"> ([('model', '=', parent.model)]) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('', '<field name="model"/>', ''),
"""Field 'model' used in domain of <field name="inherit_id"> ([('model', '=', parent.model)]) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_on_field_in_view(self):
field = self.env['ir.ui.view']._fields['inherit_id']
self.patch(field, 'domain', "[('model', '=', model)]")
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_id"/>
</form>
"""
self.assertValid(arch % '<field name="model"/>')
self.assertInvalid(
arch % '',
"""Field 'model' used in domain of field 'inherit_id' ([('model', '=', model)]) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_on_field_in_subview(self):
field = self.env['ir.ui.view']._fields['inherit_id']
self.patch(field, 'domain', "[('model', '=', model)]")
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_id"/>
</form>
</field>
</form>
"""
self.assertValid(arch % ('', '<field name="model"/>'))
self.assertInvalid(
arch % ('', ''),
"""Field 'model' used in domain of field 'inherit_id' ([('model', '=', model)]) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('<field name="model"/>', ''),
"""Field 'model' used in domain of field 'inherit_id' ([('model', '=', model)]) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_on_field_in_subview_with_parent(self):
field = self.env['ir.ui.view']._fields['inherit_id']
self.patch(field, 'domain', "[('model', '=', parent.model)]")
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_id"/>
</form>
</field>
</form>
"""
self.assertValid(arch % ('<field name="model"/>', ''))
self.assertInvalid(
arch % ('', ''),
"""Field 'model' used in domain of field 'inherit_id' ([('model', '=', parent.model)]) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('', '<field name="model"/>'),
"""Field 'model' used in domain of field 'inherit_id' ([('model', '=', parent.model)]) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_on_field_in_noneditable_subview(self):
field = self.env['ir.ui.view']._fields['inherit_id']
self.patch(field, 'domain', "[('model', '=', model)]")
arch = """
<form string="View">
<field name="name"/>
<field name="inherit_children_ids">
<tree string="Children"%s>
<field name="name"/>
<field name="inherit_id"/>
</tree>
</field>
</form>
"""
self.assertValid(arch % '')
self.assertInvalid(
arch % ' editable="bottom"',
"""Field 'model' used in domain of field 'inherit_id' ([('model', '=', model)]) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_on_readonly_field_in_view(self):
field = self.env['ir.ui.view']._fields['inherit_id']
self.patch(field, 'domain', "[('model', '=', model)]")
arch = """
<form string="View">
<field name="name"/>
<field name="inherit_id" readonly="1"/>
</form>
"""
self.assertValid(arch)
self.patch(field, 'readonly', True)
arch = """
<form string="View">
<field name="name"/>
<field name="inherit_id"/>
</form>
"""
self.assertValid(arch)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_on_readonly_field_in_subview(self):
field = self.env['ir.ui.view']._fields['inherit_id']
self.patch(field, 'domain', "[('model', '=', model)]")
arch = """
<form string="View">
<field name="name"/>
<field name="inherit_children_ids"%s>
<form string="Children">
<field name="name"/>
<field name="inherit_id"/>
</form>
</field>
</form>
"""
self.assertValid(arch % ' readonly="1"')
self.assertInvalid(
arch % '',
"""Field 'model' used in domain of field 'inherit_id' ([('model', '=', model)]) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_modifier_attribute_is_boolean(self):
arch = """
<form string="View">
<field name="name" readonly="%s"/>
</form>
"""
self.assertValid(arch % '1')
self.assertValid(arch % '0')
self.assertValid(arch % 'True')
self.assertInvalid(
arch % "[('model', '=', '1')]",
"Attribute readonly evaluation expects a boolean, got [('model', '=', '1')]",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_in_filter(self):
arch = """
<search string="Search">
<field name="%s"/>
<filter string="Dummy" name="draft" domain="[('%s', '=', 'dummy')]"/>
</search>
"""
self.assertValid(arch % ('name', 'name'))
self.assertValid(arch % ('name', 'inherit_children_ids.name'))
self.assertInvalid(
arch % ('invalid_field', 'name'),
'Field "invalid_field" does not exist in model "ir.ui.view"',
)
self.assertInvalid(
arch % ('name', 'invalid_field'),
"""Unknown field "ir.ui.view.invalid_field" in domain of <filter name="draft"> ([('invalid_field', '=', 'dummy')])""",
)
self.assertInvalid(
arch % ('name', 'inherit_children_ids.invalid_field'),
"""Unknown field "ir.ui.view.invalid_field" in domain of <filter name="draft"> ([('inherit_children_ids.invalid_field', '=', 'dummy')])""",
)
# todo add check for non searchable fields and group by
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_group_by_in_filter(self):
arch = """
<search string="Search">
<filter string="Date" name="month" domain="[]" context="{'group_by':'%s'}"/>
</search>
"""
self.assertValid(arch % 'name')
self.assertInvalid(
arch % 'invalid_field',
"""Unknown field "invalid_field" in "group_by" value in context="{'group_by':'invalid_field'}""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_domain_invalid_in_filter(self):
# invalid domain: it should be a list of tuples
self.assertInvalid(
""" <search string="Search">
<filter string="Dummy" name="draft" domain="['name', '=', 'dummy']"/>
</search>
""",
"""Invalid domain format ['name', '=', 'dummy'] in domain of <filter name="draft">""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_searchpanel(self):
arch = """
<search>
%s
<searchpanel>
%s
<field name="groups_id" select="multi" domain="[['%s', '=', %s]]" enable_counters="1"/>
</searchpanel>
</search>
"""
self.assertValid(arch % ('', '<field name="inherit_id"/>', 'view_access', 'inherit_id'))
self.assertInvalid(
arch % ('<field name="inherit_id"/>', '', 'view_access', 'inherit_id'),
"""Field 'inherit_id' used in domain of <field name="groups_id"> ([['view_access', '=', inherit_id]]) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('', '<field name="inherit_id"/>', 'view_access', 'view_access'),
"""Field 'view_access' used in domain of <field name="groups_id"> ([['view_access', '=', view_access]]) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('', '<field name="inherit_id"/>', 'inherit_id', 'inherit_id'),
"""Unknown field "res.groups.inherit_id" in domain of <field name="groups_id"> ([['inherit_id', '=', inherit_id]])""",
)
self.assertInvalid(
arch % ('', '<field name="inherit_id" select="multi"/>', 'view_access', 'inherit_id'),
"""Field 'inherit_id' used in domain of <field name="groups_id"> ([['view_access', '=', inherit_id]]) is present in view but is in select multi.""",
)
arch = """
<search>
<searchpanel>
<field name="inherit_id" enable_counters="1"/>
</searchpanel>
<searchpanel>
<field name="inherit_id" enable_counters="1"/>
</searchpanel>
</search>
"""
self.assertInvalid(arch, "Search tag can only contain one search panel")
def test_groups_field(self):
arch = """
<form string="View">
<field name="name" groups="%s"/>
</form>
"""
self.assertValid(arch % 'base.group_no_one')
self.assertWarning(arch % 'base.dummy')
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_attrs_field(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_id"
attrs="{'readonly': [('model', '=', 'ir.ui.view')]}"/>
</form>
"""
self.assertValid(arch % '<field name="model"/>')
self.assertInvalid(
arch % '',
"""Field 'model' used in attrs ({'readonly': [('model', '=', 'ir.ui.view')]}) must be present in view but is missing""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_attrs_invalid_domain(self):
arch = """
<form string="View">
<field name="name"/>
<field name="model"/>
<field name="inherit_id"
attrs="{'readonly': [('model', 'ir.ui.view')]}"/>
</form>
"""
self.assertInvalid(
arch,
"""Invalid domain format {'readonly': [('model', 'ir.ui.view')]} in attrs""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_attrs_subfield(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_id"
attrs="{'readonly': [('model', '=', 'ir.ui.view')]}"/>
</form>
</field>
</form>
"""
self.assertValid(arch % ('', '<field name="model"/>'))
self.assertInvalid(
arch % ('', ''),
"""Field 'model' used in attrs ({'readonly': [('model', '=', 'ir.ui.view')]}) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('<field name="model"/>', ''),
"""Field 'model' used in attrs ({'readonly': [('model', '=', 'ir.ui.view')]}) must be present in view but is missing.""",
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_attrs_subfield_with_parent(self):
arch = """
<form string="View">
<field name="name"/>%s
<field name="inherit_children_ids">
<form string="Children">
<field name="name"/>%s
<field name="inherit_id"
attrs="{'readonly': [('parent.model', '=', 'ir.ui.view')]}"/>
</form>
</field>
</form>
"""
self.assertValid(arch % ('<field name="model"/>', ''))
self.assertInvalid(
arch % ('', ''),
"""Field 'model' used in attrs ({'readonly': [('parent.model', '=', 'ir.ui.view')]}) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('', '<field name="model"/>'),
"""Field 'model' used in attrs ({'readonly': [('parent.model', '=', 'ir.ui.view')]}) must be present in view but is missing.""",
)
def test_button(self):
arch = """
<form>
<button type="object" name="%s"/>
</form>
"""
self.assertValid(arch % 'action_archive', name='valid button name')
self.assertInvalid(
arch % 'wtfzzz', 'wtfzzz is not a valid action on ir.ui.view',
name='button name is not even a method',
)
self.assertInvalid(
arch % '_check_xml',
'_check_xml on ir.ui.view is private and cannot be called from a button',
name='button name is a private method',
)
self.assertWarning(arch % 'postprocess_and_fields', name='button name is a method that requires extra arguments')
arch = """
<form>
<button type="action" name="%s"/>
</form>
"""
self.assertInvalid(arch % 0, 'Action 0 (id: 0) does not exist for button of type action.')
self.assertInvalid(arch % 'base.random_xmlid', 'Invalid xmlid base.random_xmlid for button of type action')
self.assertInvalid('<form><button type="action"/></form>', 'Button must have a name')
self.assertInvalid('<form><button special="dummy"/></form>', "Invalid special 'dummy' in button")
self.assertValid(arch % 'base.action_server_module_immediate_install')
self.assertInvalid(arch % 'base.partner_root', "base.partner_root is of type res.partner, expected a subclass of ir.actions.actions")
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_tree(self):
arch = """
<tree>
<field name="name"/>
<button type='object' name="action_archive"/>
%s
</tree>
"""
self.assertValid(arch % '')
self.assertInvalid(arch % '<group/>', "Tree child can only have one of field, button, control, groupby, widget, header tag (not group)")
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_tree_groupby(self):
arch = """
<tree>
<field name="name"/>
<groupby name="%s">
<button type="object" name="action_archive"/>
</groupby>
</tree>
"""
self.assertValid(arch % ('model_data_id'))
self.assertInvalid(arch % ('type'), "Field 'type' found in 'groupby' node can only be of type many2one, found selection")
self.assertInvalid(arch % ('dummy'), "Field 'dummy' found in 'groupby' node does not exist in model ir.ui.view")
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_tree_groupby_many2one(self):
arch = """
<tree>
<field name="name"/>
%s
<groupby name="model_data_id">
%s
<button type="object" name="action_archive" attrs="{'invisible': [('noupdate', '=', True)]}" string="Button1"/>
</groupby>
</tree>
"""
self.assertValid(arch % ('', '<field name="noupdate"/>'))
self.assertInvalid(
arch % ('', ''),
"""Field 'noupdate' used in attrs ({'invisible': [('noupdate', '=', True)]}) must be present in view but is missing.""",
)
self.assertInvalid(
arch % ('<field name="noupdate"/>', ''),
'''Field "noupdate" does not exist in model "ir.ui.view"''',
)
self.assertInvalid(
arch % ('', '<field name="noupdate"/><field name="fake_field"/>'),
'''Field "fake_field" does not exist in model "ir.model.data"''',
)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_check_xml_on_reenable(self):
view1 = self.View.create({
'name': 'valid _check_xml',
'model': 'ir.ui.view',
'arch': """
<form string="View">
<field name="name"/>
</form>
""",
})
view2 = self.View.create({
'name': 'valid _check_xml',
'model': 'ir.ui.view',
'inherit_id': view1.id,
'active': False,
'arch': """
<field name="foo" position="after">
<field name="bar"/>
</field>
"""
})
with self.assertRaises(ValidationError):
view2.active = True
# Re-enabling the view and correcting it at the same time should not raise the `_check_xml` constraint.
view2.write({
'active': True,
'arch': """
<field name="name" position="after">
<span>bar</span>
</field>
""",
})
def test_for_in_label(self):
self.assertValid('<form><field name="model"/><label for="model"/></form>')
self.assertInvalid(
'<form><field name="model"/><label/></form>',
"""Label tag must contain a "for". To match label style without corresponding field or button, use 'class="o_form_label"'""",
)
self.assertInvalid(
'<form><label for="model"/></form>',
"""Name or id 'model' in <label for="..."> must be present in view but is missing.""",
)
def test_col_colspan_numerical(self):
self.assertValid('<form><group col="5"></group></form>')
self.assertInvalid(
'<form><group col="alpha"></group></form>',
"'col' value must be an integer (alpha)",
)
self.assertValid('<form><div colspan="5"></div></form>')
self.assertInvalid(
'<form><div colspan="alpha"></div></form>',
"'colspan' value must be an integer (alpha)",
)
def test_valid_alerts(self):
self.assertValid('<form><div class="alert alert-success" role="alert"/></form>')
self.assertValid('<form><div class="alert alert-success" role="alertdialog"/></form>')
self.assertValid('<form><div class="alert alert-success" role="status"/></form>')
self.assertWarning('<form><div class="alert alert-success"/></form>')
def test_valid_prohibited_none_role(self):
self.assertWarning('<form><div role="none"/></form>')
self.assertWarning('<form><div role="presentation"/></form>')
def test_valid_alternative_image_text(self):
self.assertValid('<form><img src="a" alt="a image"></img></form>')
self.assertWarning('<form><img src="a"></img></form>')
def test_valid_accessibility_icon_text(self):
self.assertWarning(
'<form><span class="fa fa-warning"/></form>',
'A <span> with fa class (fa fa-warning) must have title in its tag, parents, descendants or have text'
)
self.assertWarning(
'<form><button icon="fa-warning"/></form>',
'A button with icon attribute (fa-warning) must have title in its tag, parents, descendants or have text'
)
self.assertValid('<form><button icon="fa-warning"/>text</form>')
self.assertValid('<form><span class="fa fa-warning"/>text</form>')
self.assertValid('<form>text<span class="fa fa-warning"/></form>')
self.assertValid('<form><span class="fa fa-warning">text</span></form>')
self.assertValid('<form><span title="text" class="fa fa-warning"/></form>')
self.assertValid('<form><span aria-label="text" class="fa fa-warning"/></form>')
def test_valid_simili_button(self):
self.assertWarning('<form><a class="btn"/></form>')
self.assertValid('<form><a class="btn" role="button"/></form>')
def test_valid_dialog(self):
self.assertWarning('<form><div class="modal"/></form>')
self.assertValid('<form><div role="dialog" class="modal"></div></form>')
self.assertWarning('<form><div class="modal-header"/></form>')
self.assertValid('<form><header class="modal-header"/></form>')
self.assertWarning('<form><div class="modal-footer"/></form>')
self.assertValid('<form><footer class="modal-footer"/></form>')
self.assertWarning('<form><div class="modal-body"/></form>')
self.assertValid('<form><main class="modal-body"/></form>')
def test_valid_simili_dropdown(self):
self.assertValid('<form><ul class="dropdown-menu" role="menu"></ul></form>')
self.assertWarning('<form><ul class="dropdown-menu"></ul></form>')
def test_valid_simili_progressbar(self):
self.assertValid('<form><div class="o_progressbar" role="progressbar" aria-valuenow="14" aria-valuemin="0" aria-valuemax="100">14%</div></form>')
self.assertWarning('<form><div class="o_progressbar" aria-valuenow="14" aria-valuemin="0" aria-valuemax="100">14%</div></form>')
self.assertWarning('<form><div class="o_progressbar" role="progressbar" aria-valuemin="0" aria-valuemax="100">14%</div></form>')
self.assertWarning('<form><div class="o_progressbar" role="progressbar" aria-valuenow="14" aria-valuemax="100">14%</div></form>')
self.assertWarning('<form><div class="o_progressbar" role="progressbar" aria-valuenow="14" aria-valuemin="0" >14%</div></form>')
def test_valid_simili_tabpanel(self):
self.assertValid('<form><div class="tab-pane" role="tabpanel"/></form>')
self.assertWarning('<form><div class="tab-pane"/></form>')
def test_valid_simili_tablist(self):
self.assertValid('<form><div class="nav-tabs" role="tablist"/></form>')
self.assertWarning('<form><div class="nav-tabs"/></form>')
def test_valid_simili_tab(self):
self.assertValid('<form><a data-toggle="tab" role="tab" aria-controls="test"/></form>')
self.assertWarning('<form><a data-toggle="tab" aria-controls="test"/></form>')
self.assertWarning('<form><a data-toggle="tab" role="tab"/></form>')
self.assertWarning('<form><a data-toggle="tab" role="tab" aria-controls="#test"/></form>')
def test_valid_focusable_button(self):
self.assertValid('<form><a class="btn" role="button"/></form>')
self.assertValid('<form><button class="btn" role="button"/></form>')
self.assertValid('<form><select class="btn" role="button"/></form>')
self.assertValid('<form><input type="button" class="btn" role="button"/></form>')
self.assertValid('<form><input type="submit" class="btn" role="button"/></form>')
self.assertValid('<form><input type="reset" class="btn" role="button"/></form>')
self.assertValid('<form><div type="reset" class="btn btn-group" role="button"/></form>')
self.assertValid('<form><div type="reset" class="btn btn-toolbar" role="button"/></form>')
self.assertValid('<form><div type="reset" class="btn btn-ship" role="button"/></form>')
self.assertWarning('<form><div class="btn" role="button"/></form>')
self.assertWarning('<form><input type="email" class="btn" role="button"/></form>')
def test_partial_validation(self):
self.View = self.View.with_context(load_all_views=True)
# base view
view0 = self.assertValid("""
<form string="View">
<field name="model"/>
<field name="inherit_id" domain="[('model', '=', model)]"/>
</form>
""")
# added elements should be validated
self.assertInvalid(
"""<form position="inside">
<field name="groups_id" domain="[('invalid_field', '=', 'dummy')]"/>
</form>""",
"""Unknown field "res.groups.invalid_field" in domain of <field name="groups_id"> ([('invalid_field', '=', 'dummy')]))""",
inherit_id=view0.id,
)
view1 = self.assertValid(
"""<form position="inside">
<field name="name"/>
</form>""",
inherit_id=view0.id,
)
view2 = self.assertValid(
"""<form position="inside">
<field name="groups_id" domain="[('name', '=', name)]"/>
<label for="groups_id"/>
</form>""",
inherit_id=view1.id,
)
# modifying attributes should validate the target element
self.assertInvalid(
"""<field name="inherit_id" position="attributes">
<attribute name="domain">[('invalid_field', '=', 'dummy')]</attribute>
</field>""",
"""Unknown field "ir.ui.view.invalid_field" in domain of <field name="inherit_id"> ([('invalid_field', '=', 'dummy')]))""",
inherit_id=view0.id,
)
# replacing an element should validate the whole view
self.assertInvalid(
"""<field name="model" position="replace"/>""",
"""Field 'model' used in domain of <field name="inherit_id"> ([('model', '=', model)]) must be present in view but is missing.""",
inherit_id=view0.id,
)
# moving an element should have no impact; this test checks that the
# implementation does not flag the inner element to be validated, which
# prevents to locate the corresponding element inside the arch
self.assertValid(
"""<field name="groups_id" position="before">
<label for="groups_id" position="move"/>
</field>""",
inherit_id=view2.id,
)
# modifying a view extension should validate the other views
with mute_logger('odoo.addons.base.models.ir_ui_view'):
with self.assertRaises(ValidationError):
with self.cr.savepoint():
view1.arch = """<form position="inside">
<field name="type"/>
</form>"""
def test_address_view(self):
# pe_partner_address_form
address_arch = """<form><div class="o_address_format"><field name="parent_name"/></div></form>"""
address_view = self.View.create({
'name': 'view',
'model': 'res.partner',
'arch': address_arch,
'priority': 900,
})
# view can be created without address_view
form_arch = """<form><field name="id"/><div class="o_address_format"><field name="street"/></div></form>"""
partner_view = self.View.create({
'name': 'view',
'model': 'res.partner',
'arch': form_arch,
})
# default view, no address_view defined
arch = self.env['res.partner'].fields_view_get(view_id=partner_view.id)['arch']
self.assertIn('"street"', arch)
self.assertNotIn('"parent_name"', arch)
# custom view, address_view defined
self.env.company.country_id.address_view_id = address_view
arch = self.env['res.partner'].fields_view_get(view_id=partner_view.id)['arch']
self.assertNotIn('"street"', arch)
self.assertIn('"parent_name"', arch)
# weird result: <form> inside a <form>
self.assertRegex(arch, r"<form>.*<form>.*</form>.*</form>")
def test_graph_fields(self):
self.assertValid('<graph string="Graph"><field name="model" type="row"/><field name="inherit_id" type="measure"/></graph>')
self.assertInvalid(
'<graph string="Graph"><label for="model"/><field name="model" type="row"/><field name="inherit_id" type="measure"/></graph>',
'A <graph> can only contains <field> nodes, found a <label>'
)
def assertValid(self, arch, name='valid view', inherit_id=False):
return self.View.create({
'name': name,
'model': 'ir.ui.view',
'inherit_id': inherit_id,
'arch': arch,
})
def assertInvalid(self, arch, expected_message=None, name='invalid view', inherit_id=False):
with mute_logger('odoo.addons.base.models.ir_ui_view'):
with self.assertRaises(ValidationError) as catcher:
with self.cr.savepoint():
self.View.create({
'name': name,
'model': 'ir.ui.view',
'inherit_id': inherit_id,
'arch': arch,
})
message = str(catcher.exception.args[0])
self.assertEqual(catcher.exception.context['name'], name)
if expected_message:
self.assertIn(expected_message, message)
else:
_logger.warning(message)
def assertWarning(self, arch, expected_message=None, name='invalid view'):
with self.assertLogs('odoo.addons.base.models.ir_ui_view', level="WARNING") as log_catcher:
self.View.create({
'name': name,
'model': 'ir.ui.view',
'arch': arch,
})
self.assertEqual(len(log_catcher.output), 1, "Exactly one warning should be logged")
message = log_catcher.output[0]
self.assertIn('View error context', message)
self.assertIn("'name': '%s'" % name, message)
if expected_message:
self.assertIn(expected_message, message)
class TestViewTranslations(common.TransactionCase):
# these tests are essentially the same as in test_translate.py, but they use
# the computed field 'arch' instead of the translated field 'arch_db'
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.env['res.lang']._activate_lang('fr_FR')
cls.env['res.lang']._activate_lang('nl_NL')
cls.env['ir.translation']._load_module_terms(['base'], ['fr_FR', 'nl_NL'])
def create_view(self, archf, terms, **kwargs):
view = self.env['ir.ui.view'].create({
'name': 'test',
'model': 'res.partner',
'arch': archf % terms,
})
# DLE P70: `_sync_terms_translations`, which delete translations for which there is no value, is called sooner than before
# because it's called in `_write`, which is called by `flush`, which is called by the `search`.
# `arch_db` is in `_write` instead of `create` because `arch_db` is the inverse of `arch`.
# We need to flush `arch_db` before creating the translations otherwise the translation for which there is no value will be deleted,
# while the `test_sync_update` specifically needs empty translations
view.flush()
self.env['ir.translation'].create([
{
'type': 'model_terms',
'name': 'ir.ui.view,arch_db',
'lang': lang,
'res_id': view.id,
'src': src,
'value': val,
'state': 'translated',
}
for lang, trans_terms in kwargs.items()
for src, val in zip(terms, trans_terms)
])
return view
def test_sync(self):
""" Check translations of 'arch' after minor change in source terms. """
archf = '<form string="X">%s</form>'
terms_en = ('Bread and cheeze',)
terms_fr = ('Pain et fromage',)
terms_nl = ('Brood and kaas',)
view = self.create_view(archf, terms_en, en_US=terms_en, fr_FR=terms_fr, nl_NL=terms_nl)
env_nolang = self.env(context={})
env_en = self.env(context={'lang': 'en_US'})
env_fr = self.env(context={'lang': 'fr_FR'})
env_nl = self.env(context={'lang': 'nl_NL'})
self.assertEqual(view.with_env(env_nolang).arch, archf % terms_en)
self.assertEqual(view.with_env(env_en).arch, archf % terms_en)
self.assertEqual(view.with_env(env_fr).arch, archf % terms_fr)
self.assertEqual(view.with_env(env_nl).arch, archf % terms_nl)
# modify source term in view (fixed type in 'cheeze')
terms_en = ('Bread and cheese',)
view.with_env(env_en).write({'arch': archf % terms_en})
# check whether translations have been synchronized
self.assertEqual(view.with_env(env_nolang).arch, archf % terms_en)
self.assertEqual(view.with_env(env_en).arch, archf % terms_en)
self.assertEqual(view.with_env(env_fr).arch, archf % terms_fr)
self.assertEqual(view.with_env(env_nl).arch, archf % terms_nl)
view = self.create_view(archf, terms_fr, en_US=terms_en, fr_FR=terms_fr, nl_NL=terms_nl)
# modify source term in view in another language with close term
new_terms_fr = ('Pains et fromage',)
view.with_env(env_fr).write({'arch': archf % new_terms_fr})
# check whether translations have been synchronized
self.assertEqual(view.with_env(env_nolang).arch, archf % new_terms_fr)
self.assertEqual(view.with_env(env_en).arch, archf % terms_en)
self.assertEqual(view.with_env(env_fr).arch, archf % new_terms_fr)
self.assertEqual(view.with_env(env_nl).arch, archf % terms_nl)
def test_sync_update(self):
""" Check translations after major changes in source terms. """
archf = '<form string="X"><div>%s</div><div>%s</div></form>'
terms_src = ('Subtotal', 'Subtotal:')
terms_en = ('', 'Sub total:')
view = self.create_view(archf, terms_src, en_US=terms_en)
translations = self.env['ir.translation'].search([
('type', '=', 'model_terms'),
('name', '=', "ir.ui.view,arch_db"),
('res_id', '=', view.id),
])
self.assertEqual(len(translations), 2)
# modifying the arch should sync existing translations without errors
new_arch = archf % ('Subtotal', 'Subtotal:<br/>')
view.write({"arch": new_arch})
self.assertEqual(view.arch, new_arch)
translations = self.env['ir.translation'].search([
('type', '=', 'model_terms'),
('name', '=', "ir.ui.view,arch_db"),
('res_id', '=', view.id),
])
# 'Subtotal' being src==value, it will be discared
# 'Subtotal:' will be discarded as it match 'Subtotal' instead of 'Subtotal:<br/>'
self.assertEqual(len(translations), 0)
def test_cache_consistency(self):
view = self.env["ir.ui.view"].create({
"name": "test_translate_xml_cache_invalidation",
"model": "res.partner",
"arch": "<form><b>content</b></form>",
})
view_fr = view.with_context({"lang": "fr_FR"})
self.assertIn("<b>", view.arch_db)
self.assertIn("<b>", view.arch)
self.assertIn("<b>", view_fr.arch_db)
self.assertIn("<b>", view_fr.arch)
# write with no lang, and check consistency in other languages
view.write({"arch": "<form><i>content</i></form>"})
self.assertIn("<i>", view.arch_db)
self.assertIn("<i>", view.arch)
self.assertIn("<i>", view_fr.arch_db)
self.assertIn("<i>", view_fr.arch)
class ViewModeField(ViewCase):
"""
This should probably, eventually, be folded back into other test case
classes, integrating the test (or not) of the mode field to regular cases
"""
def testModeImplicitValue(self):
""" mode is auto-generated from inherit_id:
* inherit_id -> mode=extension
* not inherit_id -> mode=primary
"""
view = self.View.create({
'inherit_id': None,
'arch': '<qweb/>'
})
self.assertEqual(view.mode, 'primary')
view2 = self.View.create({
'inherit_id': view.id,
'arch': '<qweb/>'
})
self.assertEqual(view2.mode, 'extension')
view2.write({'inherit_id': None})
self.assertEqual(view2.mode, 'primary')
view2.write({'inherit_id': view.id})
self.assertEqual(view2.mode, 'extension')
@mute_logger('odoo.sql_db')
def testModeExplicit(self):
view = self.View.create({
'inherit_id': None,
'arch': '<qweb/>'
})
view2 = self.View.create({
'inherit_id': view.id,
'mode': 'primary',
'arch': '<qweb/>'
})
self.assertEqual(view.mode, 'primary')
self.assertEqual(view2.mode, 'primary')
with self.assertRaises(IntegrityError):
self.View.create({
'inherit_id': None,
'mode': 'extension',
'arch': '<qweb/>'
})
@mute_logger('odoo.sql_db')
def testPurePrimaryToExtension(self):
"""
A primary view with inherit_id=None can't be converted to extension
"""
view_pure_primary = self.View.create({
'inherit_id': None,
'arch': '<qweb/>'
})
with self.assertRaises(IntegrityError):
view_pure_primary.write({'mode': 'extension'})
view_pure_primary.flush()
def testInheritPrimaryToExtension(self):
"""
A primary view with an inherit_id can be converted to extension
"""
base = self.View.create({
'inherit_id': None,
'arch': '<qweb/>',
})
view = self.View.create({
'inherit_id': base.id,
'mode': 'primary',
'arch': '<qweb/>'
})
view.write({'mode': 'extension'})
def testDefaultExtensionToPrimary(self):
"""
An extension view can be converted to primary
"""
base = self.View.create({
'inherit_id': None,
'arch': '<qweb/>',
})
view = self.View.create({
'inherit_id': base.id,
'arch': '<qweb/>'
})
view.write({'mode': 'primary'})
def testChangeInheritOfPrimary(self):
"""
A primary view with an inherit_id must remain primary when changing the inherit_id
"""
base1 = self.View.create({
'inherit_id': None,
'arch': '<qweb/>',
})
base2 = self.View.create({
'inherit_id': None,
'arch': '<qweb/>',
})
view = self.View.create({
'mode': 'primary',
'inherit_id': base1.id,
'arch': '<qweb/>',
})
self.assertEqual(view.mode, 'primary')
view.write({'inherit_id': base2.id})
self.assertEqual(view.mode, 'primary')
class TestDefaultView(ViewCase):
def testDefaultViewBase(self):
self.View.create({
'inherit_id': False,
'priority': 10,
'mode': 'primary',
'arch': '<qweb/>',
})
view2 = self.View.create({
'inherit_id': False,
'priority': 1,
'mode': 'primary',
'arch': '<qweb/>',
})
default = self.View.default_view(False, 'qweb')
self.assertEqual(
default, view2.id,
"default_view should get the view with the lowest priority for "
"a (model, view_type) pair"
)
def testDefaultViewPrimary(self):
view1 = self.View.create({
'inherit_id': False,
'priority': 10,
'mode': 'primary',
'arch': '<qweb/>',
})
self.View.create({
'inherit_id': False,
'priority': 5,
'mode': 'primary',
'arch': '<qweb/>',
})
view3 = self.View.create({
'inherit_id': view1.id,
'priority': 1,
'mode': 'primary',
'arch': '<qweb/>',
})
default = self.View.default_view(False, 'qweb')
self.assertEqual(
default, view3.id,
"default_view should get the view with the lowest priority for "
"a (model, view_type) pair in all the primary tables"
)
class TestViewCombined(ViewCase):
"""
* When asked for a view, instead of looking for the closest parent with
inherit_id=False look for mode=primary
* If root.inherit_id, resolve the arch for root.inherit_id (?using which
model?), then apply root's inheritance specs to it
* Apply inheriting views on top
"""
def setUp(self):
super(TestViewCombined, self).setUp()
self.a1 = self.View.create({
'model': 'a',
'arch': '<qweb><a1/></qweb>'
})
self.a2 = self.View.create({
'model': 'a',
'inherit_id': self.a1.id,
'priority': 5,
'arch': '<xpath expr="//a1" position="after"><a2/></xpath>'
})
self.a3 = self.View.create({
'model': 'a',
'inherit_id': self.a1.id,
'arch': '<xpath expr="//a1" position="after"><a3/></xpath>'
})
# mode=primary should be an inheritance boundary in both direction,
# even within a model it should not extend the parent
self.a4 = self.View.create({
'model': 'a',
'inherit_id': self.a1.id,
'mode': 'primary',
'arch': '<xpath expr="//a1" position="after"><a4/></xpath>',
})
self.b1 = self.View.create({
'model': 'b',
'inherit_id': self.a3.id,
'mode': 'primary',
'arch': '<xpath expr="//a1" position="after"><b1/></xpath>'
})
self.b2 = self.View.create({
'model': 'b',
'inherit_id': self.b1.id,
'arch': '<xpath expr="//a1" position="after"><b2/></xpath>'
})
self.c1 = self.View.create({
'model': 'c',
'inherit_id': self.a1.id,
'mode': 'primary',
'arch': '<xpath expr="//a1" position="after"><c1/></xpath>'
})
self.c2 = self.View.create({
'model': 'c',
'inherit_id': self.c1.id,
'priority': 5,
'arch': '<xpath expr="//a1" position="after"><c2/></xpath>'
})
self.c3 = self.View.create({
'model': 'c',
'inherit_id': self.c2.id,
'priority': 10,
'arch': '<xpath expr="//a1" position="after"><c3/></xpath>'
})
self.d1 = self.View.create({
'model': 'd',
'inherit_id': self.b1.id,
'mode': 'primary',
'arch': '<xpath expr="//a1" position="after"><d1/></xpath>'
})
def test_basic_read(self):
context = {'check_view_ids': self.View.search([]).ids}
arch = self.a1.with_context(context).get_combined_arch()
self.assertEqual(
etree.fromstring(arch),
E.qweb(
E.a1(),
E.a3(),
E.a2(),
), arch)
def test_read_from_child(self):
context = {'check_view_ids': self.View.search([]).ids}
arch = self.a3.with_context(context).get_combined_arch()
self.assertEqual(
etree.fromstring(arch),
E.qweb(
E.a1(),
E.a3(),
E.a2(),
), arch)
def test_read_from_child_primary(self):
context = {'check_view_ids': self.View.search([]).ids}
arch = self.a4.with_context(context).get_combined_arch()
self.assertEqual(
etree.fromstring(arch),
E.qweb(
E.a1(),
E.a4(),
E.a3(),
E.a2(),
), arch)
def test_cross_model_simple(self):
context = {'check_view_ids': self.View.search([]).ids}
arch = self.c2.with_context(context).get_combined_arch()
self.assertEqual(
etree.fromstring(arch),
E.qweb(
E.a1(),
E.c3(),
E.c2(),
E.c1(),
E.a3(),
E.a2(),
), arch)
def test_cross_model_double(self):
context = {'check_view_ids': self.View.search([]).ids}
arch = self.d1.with_context(context).get_combined_arch()
self.assertEqual(
etree.fromstring(arch),
E.qweb(
E.a1(),
E.d1(),
E.b2(),
E.b1(),
E.a3(),
E.a2(),
), arch)
def test_primary_after_extensions(self):
# Here is a tricky use-case: a*
# - views a and d are primary / \
# - views b and c are extensions b c
# - depth-first order is: a, b, d, c |
# - combination order is: a, b, c, d d*
#
# The arch of d has been chosen to fail if d is applied before c.
# Because this child of 'b' is primary, it must be applied *after* the
# other extensions of a!
a = self.View.create({
'model': 'a',
'arch': '<qweb><a/></qweb>',
})
b = self.View.create({
'model': 'a',
'inherit_id': a.id,
'arch': '<a position="after"><b/></a>'
})
c = self.View.create({ # pylint: disable=unused-variable
'model': 'a',
'inherit_id': a.id,
'arch': '<a position="after"><c/></a>'
})
d = self.View.create({ # pylint: disable=unused-variable
'model': 'a',
'inherit_id': b.id,
'mode': 'primary',
'arch': '<a position="replace"/>',
})
class TestOptionalViews(ViewCase):
"""
Tests ability to enable/disable inherited views, formerly known as
inherit_option_id
"""
def setUp(self):
super(TestOptionalViews, self).setUp()
self.v0 = self.View.create({
'model': 'a',
'arch': '<qweb><base/></qweb>',
})
self.v1 = self.View.create({
'model': 'a',
'inherit_id': self.v0.id,
'active': True,
'priority': 10,
'arch': '<xpath expr="//base" position="after"><v1/></xpath>',
})
self.v2 = self.View.create({
'model': 'a',
'inherit_id': self.v0.id,
'active': True,
'priority': 9,
'arch': '<xpath expr="//base" position="after"><v2/></xpath>',
})
self.v3 = self.View.create({
'model': 'a',
'inherit_id': self.v0.id,
'active': False,
'priority': 8,
'arch': '<xpath expr="//base" position="after"><v3/></xpath>'
})
def test_applied(self):
""" mandatory and enabled views should be applied
"""
context = {'check_view_ids': self.View.search([]).ids}
arch = self.v0.with_context(context).get_combined_arch()
self.assertEqual(
etree.fromstring(arch),
E.qweb(
E.base(),
E.v1(),
E.v2(),
)
)
def test_applied_state_toggle(self):
""" Change active states of v2 and v3, check that the results
are as expected
"""
self.v2.toggle_active()
context = {'check_view_ids': self.View.search([]).ids}
arch = self.v0.with_context(context).get_combined_arch()
self.assertEqual(
etree.fromstring(arch),
E.qweb(
E.base(),
E.v1(),
)
)
self.v3.toggle_active()
context = {'check_view_ids': self.View.search([]).ids}
arch = self.v0.with_context(context).get_combined_arch()
self.assertEqual(
etree.fromstring(arch),
E.qweb(
E.base(),
E.v1(),
E.v3(),
)
)
self.v2.toggle_active()
context = {'check_view_ids': self.View.search([]).ids}
arch = self.v0.with_context(context).get_combined_arch()
self.assertEqual(
etree.fromstring(arch),
E.qweb(
E.base(),
E.v1(),
E.v2(),
E.v3(),
)
)
class TestXPathExtentions(common.BaseCase):
def test_hasclass(self):
tree = E.node(
E.node({'class': 'foo bar baz'}),
E.node({'class': 'foo bar'}),
{'class': "foo"})
self.assertEqual(
len(tree.xpath('//node[hasclass("foo")]')),
3)
self.assertEqual(
len(tree.xpath('//node[hasclass("bar")]')),
2)
self.assertEqual(
len(tree.xpath('//node[hasclass("baz")]')),
1)
self.assertEqual(
len(tree.xpath('//node[hasclass("foo")][not(hasclass("bar"))]')),
1)
self.assertEqual(
len(tree.xpath('//node[hasclass("foo", "baz")]')),
1)
class TestQWebRender(ViewCase):
def test_render(self):
view1 = self.View.create({
'name': "dummy",
'type': 'qweb',
'arch': """
<t t-name="base.dummy">
<div><span>something</span></div>
</t>
"""
})
view2 = self.View.create({
'name': "dummy_ext",
'type': 'qweb',
'inherit_id': view1.id,
'arch': """
<xpath expr="//div" position="inside">
<span>another thing</span>
</xpath>
"""
})
view3 = self.View.create({
'name': "dummy_primary_ext",
'type': 'qweb',
'inherit_id': view1.id,
'mode': 'primary',
'arch': """
<xpath expr="//div" position="inside">
<span>another primary thing</span>
</xpath>
"""
})
# render view and child view with an id
content1 = self.env['ir.qweb'].with_context(check_view_ids=[view1.id, view2.id])._render(view1.id)
content2 = self.env['ir.qweb'].with_context(check_view_ids=[view1.id, view2.id])._render(view2.id)
self.assertEqual(content1, content2)
# render view and child view with an xmlid
self.env.cr.execute("INSERT INTO ir_model_data(name, model, res_id, module)"
"VALUES ('dummy', 'ir.ui.view', %s, 'base')" % view1.id)
self.env.cr.execute("INSERT INTO ir_model_data(name, model, res_id, module)"
"VALUES ('dummy_ext', 'ir.ui.view', %s, 'base')" % view2.id)
content1 = self.env['ir.qweb'].with_context(check_view_ids=[view1.id, view2.id])._render('base.dummy')
content2 = self.env['ir.qweb'].with_context(check_view_ids=[view1.id, view2.id])._render('base.dummy_ext')
self.assertEqual(content1, content2)
# render view and primary extension with an id
content1 = self.env['ir.qweb'].with_context(check_view_ids=[view1.id, view2.id, view3.id])._render(view1.id)
content3 = self.env['ir.qweb'].with_context(check_view_ids=[view1.id, view2.id, view3.id])._render(view3.id)
self.assertNotEqual(content1, content3)
# render view and primary extension with an xmlid
self.env.cr.execute("INSERT INTO ir_model_data(name, model, res_id, module)"
"VALUES ('dummy_primary_ext', 'ir.ui.view', %s, 'base')" % view3.id)
content1 = self.env['ir.qweb'].with_context(check_view_ids=[view1.id, view2.id, view3.id])._render('base.dummy')
content3 = self.env['ir.qweb'].with_context(check_view_ids=[view1.id, view2.id, view3.id])._render('base.dummy_primary_ext')
self.assertNotEqual(content1, content3)
class TestValidationTools(common.BaseCase):
def test_get_domain_idents(self):
res = view_validation.get_domain_identifiers("['|', ('model', '=', parent.model or need_model), ('need_model', '=', False)]")
self.assertEqual(res, ({'model', 'need_model'}, {'parent.model', 'need_model'}))
def test_process_2_level_parents(self):
res = view_validation.get_domain_identifiers("['|', ('model', '=', parent.parent.model)]")
self.assertEqual(res, ({'model'}, {'parent.parent.model'}))
def test_get_dict_asts(self):
res = view_validation.get_dict_asts("{'test': False, 'required': [('model', '!=', False)], 'invisible': ['|', ('model', '=', parent.model or need_model), ('need_model', '=', False)]}")
self.assertEqual(set(res.keys()), set(['test', 'required', 'invisible']))
self.assertIsInstance(res['test'], ast.NameConstant)
self.assertIsInstance(res['required'], ast.List)
self.assertIsInstance(res['invisible'], ast.List)
self.assertEqual(view_validation.get_domain_identifiers(res['invisible']), ({'model', 'need_model'}, {'parent.model', 'need_model'}))
def test_get_expression_identities(self):
self.assertEqual(
view_validation.get_variable_names("context_today().strftime('%Y-%m-%d')"),
set(),
)
self.assertEqual(
view_validation.get_variable_names("field and field[0] or not field2"),
{'field', 'field2'},
)
self.assertEqual(
view_validation.get_variable_names("context_today().strftime('%Y-%m-%d') or field"),
{'field'},
)
self.assertEqual(
view_validation.get_variable_names("(datetime.datetime.combine(context_today(), datetime.time(x,y,z)).to_utc()).strftime('%Y-%m-%d %H:%M:%S')"),
{'x', 'y', 'z'},
)
class TestAccessRights(common.TransactionCase):
@common.users('demo')
def test_access(self):
# a user can not access directly a view
with self.assertRaises(AccessError):
self.env['ir.ui.view'].search([("model", '=', "res.partner"), ('type', '=', 'form')])
# but can call fields_view_get
self.env['res.partner'].fields_view_get(view_type='form')
# unless he does not have access to the model
with self.assertRaises(AccessError):
self.env['ir.ui.view'].fields_view_get(view_type='form')
@common.tagged('post_install', '-at_install', '-standard', 'migration')
class TestAllViews(common.TransactionCase):
def test_views(self):
views = self.env['ir.ui.view'].with_context(lang=None).search([])
for index, view in enumerate(views):
if index % 500 == 0:
_logger.info('checked %s/%s views', index, len(views))
with self.subTest(name=view.name):
view._check_xml()
@common.tagged('post_install', '-at_install', '-standard', 'render_all_views')
class TestRenderAllViews(common.TransactionCase):
@common.users('demo', 'admin')
def test_render_all_views(self):
env = self.env(context={'lang': 'en_US'})
count = 0
elapsed = 0
for model in env.values():
if not model._abstract and model.check_access_rights('read', False):
with self.subTest(model=model):
times = []
for _ in range(5):
model.invalidate_cache()
before = time.perf_counter()
model.fields_view_get()
times.append(time.perf_counter() - before)
count += 1
elapsed += min(times)
_logger.info('Rendered %d views as %s using (best of 5) %ss',
count, self.env.user.name, elapsed)
| 37.450233 | 128,679 |
13,195 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
from odoo.exceptions import ValidationError
from odoo import Command
class TestHasGroup(TransactionCase):
def setUp(self):
super(TestHasGroup, self).setUp()
self.group0 = 'test_user_has_group.group0'
self.group1 = 'test_user_has_group.group1'
group0, group1 = self.env['res.groups']._load_records([
dict(xml_id=self.group0, values={'name': 'group0'}),
dict(xml_id=self.group1, values={'name': 'group1'}),
])
self.test_user = self.env['res.users'].create({
'login': 'testuser',
'partner_id': self.env['res.partner'].create({
'name': "Strawman Test User"
}).id,
'groups_id': [Command.set([group0.id])]
})
self.grp_internal_xml_id = 'base.group_user'
self.grp_internal = self.env.ref(self.grp_internal_xml_id)
self.grp_portal_xml_id = 'base.group_portal'
self.grp_portal = self.env.ref(self.grp_portal_xml_id)
self.grp_public_xml_id = 'base.group_public'
self.grp_public = self.env.ref(self.grp_public_xml_id)
def test_env_uid(self):
Users = self.env['res.users'].with_user(self.test_user)
self.assertTrue(
Users.has_group(self.group0),
"the test user should belong to group0"
)
self.assertFalse(
Users.has_group(self.group1),
"the test user should *not* belong to group1"
)
def test_record(self):
self.assertTrue(
self.test_user.has_group(self.group0),
"the test user should belong to group0",
)
self.assertFalse(
self.test_user.has_group(self.group1),
"the test user shoudl not belong to group1"
)
def test_portal_creation(self):
"""Here we check that portal user creation fails if it tries to create a user
who would also have group_user by implied_group.
Otherwise, it succeeds with the groups we asked for.
"""
grp_public = self.env.ref('base.group_public')
grp_test_portal_xml_id = 'test_user_has_group.portal_implied_group'
grp_test_portal = self.env['res.groups']._load_records([
dict(xml_id=grp_test_portal_xml_id, values={'name': 'Test Group Portal'})
])
grp_test_internal1 = self.env['res.groups']._load_records([
dict(xml_id='test_user_has_group.internal_implied_group1', values={'name': 'Test Group Itnernal 1'})
])
grp_test_internal2_xml_id = 'test_user_has_group.internal_implied_group2'
grp_test_internal2 = self.env['res.groups']._load_records([
dict(xml_id=grp_test_internal2_xml_id, values={'name': 'Test Group Internal 2'})
])
self.grp_portal.implied_ids = grp_test_portal
grp_test_internal1.implied_ids = False
grp_test_internal2.implied_ids = False
portal_user = self.env['res.users'].create({
'login': 'portalTest',
'name': 'Portal test',
'sel_groups_%s_%s_%s' % (self.grp_internal.id, self.grp_portal.id, grp_public.id): self.grp_portal.id,
'sel_groups_%s_%s' % (grp_test_internal1.id, grp_test_internal2.id): grp_test_internal2.id,
})
self.assertTrue(
portal_user.has_group(self.grp_portal_xml_id),
"The portal user should belong to '%s'" % self.grp_portal_xml_id,
)
self.assertTrue(
portal_user.has_group(grp_test_portal_xml_id),
"The portal user should belong to '%s'" % grp_test_portal_xml_id,
)
self.assertTrue(
portal_user.has_group(grp_test_internal2_xml_id),
"The portal user should belong to '%s'" % grp_test_internal2_xml_id
)
self.assertFalse(
portal_user.has_group(self.grp_internal_xml_id),
"The portal user should not belong to '%s'" % self.grp_internal_xml_id
)
portal_user.unlink() # otherwise, badly modifying the implication would raise
grp_test_internal1.implied_ids = self.grp_internal
grp_test_internal2.implied_ids = self.grp_internal
with self.assertRaises(ValidationError): # current group implications forbid to create a portal user
portal_user = self.env['res.users'].create({
'login': 'portalFail',
'name': 'Portal fail',
'sel_groups_%s_%s_%s' % (self.grp_internal.id, self.grp_portal.id, grp_public.id): self.grp_portal.id,
'sel_groups_%s_%s' % (grp_test_internal1.id, grp_test_internal2.id): grp_test_internal2.id,
})
def test_portal_write(self):
"""Check that adding a new group to a portal user works as expected,
except if it implies group_user/public, in chich case it should raise.
"""
grp_test_portal = self.env["res.groups"].create({"name": "implied by portal"})
self.grp_portal.implied_ids = grp_test_portal
portal_user = self.env['res.users'].create({
'login': 'portalTest2',
'name': 'Portal test 2',
'groups_id': [Command.set([self.grp_portal.id])],
})
self.assertEqual(
portal_user.groups_id, (self.grp_portal + grp_test_portal),
"The portal user should have the implied group.",
)
grp_fail = self.env["res.groups"].create(
{"name": "fail", "implied_ids": [Command.set([self.grp_internal.id])]})
with self.assertRaises(ValidationError):
portal_user.write({'groups_id': [Command.link(grp_fail.id)]})
def test_two_user_types(self):
#Create a user with two groups of user types kind (Internal and Portal)
grp_test = self.env['res.groups']._load_records([
dict(xml_id='test_two_user_types.implied_groups', values={'name': 'Test Group'})
])
grp_test.implied_ids += self.grp_internal
grp_test.implied_ids += self.grp_portal
with self.assertRaises(ValidationError):
self.env['res.users'].create({
'login': 'test_two_user_types',
'name': "Test User with two user types",
'groups_id': [Command.set([grp_test.id])]
})
#Add a user with portal to the group Internal
test_user = self.env['res.users'].create({
'login': 'test_user_portal',
'name': "Test User with two user types",
'groups_id': [Command.set([self.grp_portal.id])]
})
with self.assertRaises(ValidationError):
self.grp_internal.users = [Command.link(test_user.id)]
def test_two_user_types_implied_groups(self):
"""Contrarily to test_two_user_types, we simply add an implied_id to a group.
This will trigger the addition of the relevant users to the relevant groups;
if, say, this was done in SQL and thus bypassing the ORM, it would bypass the constraints
and thus give us a case uncovered by the aforementioned test.
"""
grp_test = self.env["res.groups"].create(
{"name": "test", "implied_ids": [Command.set([self.grp_internal.id])]})
test_user = self.env['res.users'].create({
'login': 'test_user_portal',
'name': "Test User with one user types",
'groups_id': [Command.set([grp_test.id])]
})
with self.assertRaises(ValidationError):
grp_test.write({'implied_ids': [Command.link(self.grp_portal.id)]})
def test_demote_user(self):
"""When a user is demoted to the status of portal/public,
we should strip him of all his (previous) rights
"""
group_0 = self.env.ref(self.group0) # the group to which test_user already belongs
group_U = self.env["res.groups"].create({"name": "U", "implied_ids": [Command.set([self.grp_internal.id])]})
self.grp_internal.implied_ids = False # only there to simplify the test by not having to care about its trans_implied_ids
self.test_user.write({'groups_id': [Command.link(group_U.id)]})
self.assertEqual(
self.test_user.groups_id, (group_0 + group_U + self.grp_internal),
"We should have our 2 groups and the implied user group",
)
# Now we demote him. The JS framework sends 3 and 4 commands,
# which is what we write here, but it should work even with a 5 command or whatever.
self.test_user.write({'groups_id': [
Command.unlink(self.grp_internal.id),
Command.unlink(self.grp_public.id),
Command.link(self.grp_portal.id),
]})
# if we screw up the removing groups/adding the implied ids, we could end up in two situations:
# 1. we have a portal user with way too much rights (e.g. 'Contact Creation', which does not imply any other group)
# 2. because a group may be (transitively) implying group_user, then it would raise an exception
# so as a compromise we remove all groups when demoting a user
# (even technical display groups, e.g. TaxB2B, which could be re-added later)
self.assertEqual(
self.test_user.groups_id, (self.grp_portal),
"Here the portal group does not imply any other group, so we should only have this group.",
)
def test_implied_groups(self):
""" We check that the adding of implied ids works correctly for normal users and portal users.
In the second case, working normally means raising if a group implies to give 'group_user'
rights to a portal user.
"""
U = self.env["res.users"]
G = self.env["res.groups"]
group_user = self.env.ref('base.group_user')
group_portal = self.env.ref('base.group_portal')
group_no_one = self.env.ref('base.group_no_one')
group_A = G.create({"name": "A"})
group_AA = G.create({"name": "AA", "implied_ids": [Command.set([group_A.id])]})
group_B = G.create({"name": "B"})
group_BB = G.create({"name": "BB", "implied_ids": [Command.set([group_B.id])]})
# user_a is a normal user, so we expect groups to be added when we add them,
# as well as 'implied_groups'; otherwise nothing else should happen.
# By contrast, for a portal user we want implied groups not to be added
# if and only if it would not give group_user (or group_public) privileges
user_a = U.create({"name": "a", "login": "a", "groups_id": [Command.set([group_AA.id, group_user.id])]})
self.assertEqual(user_a.groups_id, (group_AA + group_A + group_user + group_no_one))
user_b = U.create({"name": "b", "login": "b", "groups_id": [Command.set([group_portal.id, group_AA.id])]})
self.assertEqual(user_b.groups_id, (group_AA + group_A + group_portal))
# user_b is not an internal user, but giving it a new group just added a new group
(user_a + user_b).write({"groups_id": [Command.link(group_BB.id)]})
self.assertEqual(user_a.groups_id, (group_AA + group_A + group_BB + group_B + group_user + group_no_one))
self.assertEqual(user_b.groups_id, (group_AA + group_A + group_BB + group_B + group_portal))
# now we create a group that implies the group_user
# adding it to a user should work normally, whereas adding it to a portal user should raise
group_C = G.create({"name": "C", "implied_ids": [Command.set([group_user.id])]})
user_a.write({"groups_id": [Command.link(group_C.id)]})
self.assertEqual(user_a.groups_id, (group_AA + group_A + group_BB + group_B + group_C + group_user + group_no_one))
with self.assertRaises(ValidationError):
user_b.write({"groups_id": [Command.link(group_C.id)]})
def test_has_group_cleared_cache_on_write(self):
self.registry._clear_cache()
self.assertFalse(self.registry._Registry__cache, "Ensure ormcache is empty")
def populate_cache():
self.test_user.has_group('test_user_has_group.group0')
self.assertTrue(self.registry._Registry__cache, "user.has_group cache must be populated")
populate_cache()
self.env.ref(self.group0).write({"share": True})
self.assertFalse(self.registry._Registry__cache, "Writing on group must invalidate user.has_group cache")
populate_cache()
# call_cache_clearing_methods is called in res.groups.write to invalidate
# cache before calling its parent class method (`odoo.models.Model.write`)
# as explain in the `res.group.write` comment.
# This verifies that calling `call_cache_clearing_methods()` invalidates
# the ormcache of method `user.has_group()`
self.env['ir.model.access'].call_cache_clearing_methods()
self.assertFalse(
self.registry._Registry__cache,
"call_cache_clearing_methods() must invalidate user.has_group cache"
)
| 47.635379 | 13,195 |
3,334 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import common
class TestSingleTransactionCase(common.SingleTransactionCase):
"""
Check the whole-class transaction behavior of SingleTransactionCase.
"""
def test_00(self):
""" Create a partner. """
self.env['res.partner'].create({'name': 'test_per_class_teardown_partner'})
partners = self.env['res.partner'].search([('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(1, len(partners), "Test partner not found.")
def test_01(self):
""" Find the created partner. """
partners = self.env['res.partner'].search([('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(1, len(partners), "Test partner not found.")
def test_20a(self):
""" Create a partner with a XML ID """
pid, _ = self.env['res.partner'].name_create('Mr Blue')
self.env['ir.model.data'].create({'name': 'test_partner_blue',
'module': 'base',
'model': 'res.partner',
'res_id': pid})
def test_20b(self):
""" Resolve xml id with ref() and browse_ref() """
xid = 'base.test_partner_blue'
partner = self.env.ref(xid)
pid = self.ref(xid)
self.assertTrue(pid, "ref() should resolve xid to database ID")
self.assertEqual(pid, partner.id, "ref() is not consistent with env.ref()")
partner2 = self.browse_ref(xid)
self.assertEqual(partner, partner2, "browse_ref() should resolve xid to browse records")
class TestTransactionCase(common.TransactionCase):
"""
Check the per-method transaction behavior of TransactionCase.
"""
def test_00(self):
""" Create a partner. """
partners = self.env['res.partner'].search([('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(0, len(partners), "Test partner found.")
self.env['res.partner'].create({'name': 'test_per_class_teardown_partner'})
partners = self.env['res.partner'].search([('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(1, len(partners), "Test partner not found.")
def test_01(self):
""" Don't find the created partner. """
partners = self.env['res.partner'].search([('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(0, len(partners), "Test partner found.")
def test_20a(self):
""" Create a partner with a XML ID then resolve xml id with ref() and browse_ref() """
pid, _ = self.env['res.partner'].name_create('Mr Yellow')
self.env['ir.model.data'].create({'name': 'test_partner_yellow',
'module': 'base',
'model': 'res.partner',
'res_id': pid})
xid = 'base.test_partner_yellow'
partner = self.env.ref(xid)
pid = self.ref(xid)
self.assertEqual(pid, partner.id, "ref() should resolve xid to database ID")
partner2 = self.browse_ref(xid)
self.assertEqual(partner, partner2, "browse_ref() should resolve xid to browse records")
| 46.305556 | 3,334 |
2,927 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import odoo
import odoo.tests
_logger = logging.getLogger(__name__)
@odoo.tests.tagged('post_install', '-at_install')
class TestReports(odoo.tests.TransactionCase):
def test_reports(self):
domain = [('report_type', 'like', 'qweb')]
for report in self.env['ir.actions.report'].search(domain):
report_model = 'report.%s' % report.report_name
try:
self.env[report_model]
except KeyError:
# Only test the generic reports here
_logger.info("testing report %s", report.report_name)
report_model = self.env[report.model]
report_records = report_model.search([], limit=10)
if not report_records:
_logger.info("no record found skipping report %s", report.report_name)
if not report.multi:
report_records = report_records[:1]
# Test report generation
report._render_qweb_html(report_records.ids)
else:
continue
def test_barcode_check_digit(self):
ean8 = "87111125"
self.assertEqual(self.env['ir.actions.report'].get_barcode_check_digit("0" * 10 + ean8), int(ean8[-1]))
ean13 = "1234567891231"
self.assertEqual(self.env['ir.actions.report'].get_barcode_check_digit("0" * 5 + ean13), int(ean13[-1]))
def test_barcode_encoding(self):
self.assertTrue(self.env['ir.actions.report'].check_barcode_encoding('20220006', 'ean8'))
self.assertTrue(self.env['ir.actions.report'].check_barcode_encoding('93855341', 'ean8'))
self.assertTrue(self.env['ir.actions.report'].check_barcode_encoding('2022071416014', 'ean13'))
self.assertTrue(self.env['ir.actions.report'].check_barcode_encoding('9745213796142', 'ean13'))
self.assertFalse(self.env['ir.actions.report'].check_barcode_encoding('2022a006', 'ean8'), 'should contains digits only')
self.assertFalse(self.env['ir.actions.report'].check_barcode_encoding('20220000', 'ean8'), 'incorrect check digit')
self.assertFalse(self.env['ir.actions.report'].check_barcode_encoding('93855341', 'ean13'), 'ean13 is a 13-digits barcode')
self.assertFalse(self.env['ir.actions.report'].check_barcode_encoding('9745213796142', 'ean8'), 'ean8 is a 8-digits barcode')
self.assertFalse(self.env['ir.actions.report'].check_barcode_encoding('9745213796148', 'ean13'), 'incorrect check digit')
self.assertFalse(self.env['ir.actions.report'].check_barcode_encoding('2022!71416014', 'ean13'), 'should contains digits only')
self.assertFalse(self.env['ir.actions.report'].check_barcode_encoding('0022071416014', 'ean13'), 'when starting with one zero, it indicates that a 12-digit UPC-A code follows')
| 55.226415 | 2,927 |
26,085 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from odoo.exceptions import AccessError
from odoo.tests.common import BaseCase, TransactionCase, tagged, new_test_user
from odoo.tools import profiler
from odoo.tools.profiler import Profiler, ExecutionContext
from odoo.tools.speedscope import Speedscope
@tagged('post_install', '-at_install', 'profiling')
# post_install to ensure mail is already loaded if installed (new_test_user would fail otherwise because of notification_type)
class TestProfileAccess(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.test_profile = cls.env['ir.profile'].create({})
def test_admin_has_access(self):
self.assertEqual(self.env['ir.profile'].search([('id', '=', self.test_profile.id)]), self.test_profile)
self.test_profile.read(['name'])
def test_user_no_access(self):
user = new_test_user(self.env, login='noProfile', groups='base.group_user')
with self.with_user('noProfile'), self.assertRaises(AccessError):
self.env['ir.profile'].search([])
with self.assertRaises(AccessError):
self.test_profile.with_user(user).read(['name'])
@tagged('post_install', '-at_install', 'profiling')
class TestSpeedscope(BaseCase):
def example_profile(self):
return {
'init_stack_trace': [['/path/to/file_1.py', 135, '__main__', 'main()']],
'result': [{ # init frame
'start': 2.0,
'exec_context': (),
'stack': [
['/path/to/file_1.py', 10, 'main', 'do_stuff1(test=do_tests)'],
['/path/to/file_1.py', 101, 'do_stuff1', 'cr.execute(query, params)'],
],
}, {
'start': 3.0,
'exec_context': (),
'stack': [
['/path/to/file_1.py', 10, 'main', 'do_stuff1(test=do_tests)'],
['/path/to/file_1.py', 101, 'do_stuff1', 'cr.execute(query, params)'],
['/path/to/sql_db.py', 650, 'execute', 'res = self._obj.execute(query, params)'],
],
}, { # duplicate frame
'start': 4.0,
'exec_context': (),
'stack': [
['/path/to/file_1.py', 10, 'main', 'do_stuff1(test=do_tests)'],
['/path/to/file_1.py', 101, 'do_stuff1', 'cr.execute(query, params)'],
['/path/to/sql_db.py', 650, 'execute', 'res = self._obj.execute(query, params)'],
],
}, { # other frame
'start': 6.0,
'exec_context': (),
'stack': [
['/path/to/file_1.py', 10, 'main', 'do_stuff1(test=do_tests)'],
['/path/to/file_1.py', 101, 'do_stuff1', 'check'],
['/path/to/sql_db.py', 650, 'check', 'assert x = y'],
],
}, { # out of frame
'start': 10.0,
'exec_context': (),
'stack': [
['/path/to/file_1.py', 10, 'main', 'do_stuff1(test=do_tests)'],
['/path/to/file_1.py', 101, 'do_stuff1', 'for i in range(10):'],
],
}, { # final frame
'start': 10.35,
'exec_context': (),
'stack': None,
}],
}
def test_convert_empty(self):
Speedscope().make()
def test_converts_profile_simple(self):
profile = self.example_profile()
sp = Speedscope(init_stack_trace=profile['init_stack_trace'])
sp.add('profile', profile['result'])
sp.add_output(['profile'], complete=False)
res = sp.make()
frames = res['shared']['frames']
self.assertEqual(len(frames), 4)
profile_combined = res['profiles'][0]
events = [(e['type'], e['frame']) for e in profile_combined['events']]
self.assertEqual(events, [
('O', 0), # /main
('O', 1), # /main/do_stuff1
('O', 2), # /main/do_stuff1/execute
('C', 2), # /main/do_stuff1
('O', 3), # /main/do_stuff1/check
('C', 3), # /main/do_stuff1
('C', 1), # /main
('C', 0), # /
])
self.assertEqual(profile_combined['events'][0]['at'], 0.0)
self.assertEqual(profile_combined['events'][-1]['at'], 8.35)
def test_converts_profile_no_end(self):
profile = self.example_profile()
profile['result'].pop()
sp = Speedscope(init_stack_trace=profile['init_stack_trace'])
sp.add('profile', profile['result'])
sp.add_output(['profile'], complete=False)
res = sp.make()
profile_combined = res['profiles'][0]
events = [(e['type'], e['frame']) for e in profile_combined['events']]
self.assertEqual(events, [
('O', 0), # /main
('O', 1), # /main/do_stuff1
('O', 2), # /main/do_stuff1/execute
('C', 2), # /main/do_stuff1
('O', 3), # /main/do_stuff1/check
('C', 3), # /main/do_stuff1
('C', 1), # /main
('C', 0), # /
])
self.assertEqual(profile_combined['events'][-1]['at'], 8)
def test_converts_init_stack_trace(self):
profile = self.example_profile()
sp = Speedscope(init_stack_trace=profile['init_stack_trace'])
sp.add('profile', profile['result'])
sp.add_output(['profile'], complete=True)
res = sp.make()
profile_combined = res['profiles'][0]
events = [(e['type'], e['frame']) for e in profile_combined['events']]
self.assertEqual(events, [
('O', 4), # /__main__/
('O', 0), # /__main__/main
('O', 1), # /__main__/main/do_stuff1
('O', 2), # /__main__/main/do_stuff1/execute
('C', 2), # /__main__/main/do_stuff1
('O', 3), # /__main__/main/do_stuff1/check
('C', 3), # /__main__/main/do_stuff1
('C', 1), # /__main__/main
('C', 0), # /__main__/
('C', 4), # /
])
self.assertEqual(profile_combined['events'][-1]['at'], 8.35)
def test_end_priority(self):
"""
If a sample as a time (usually a query) we expect to keep the complete frame
even if another concurent frame tics before the end of the current one:
frame duration should always be more reliable.
"""
async_profile = self.example_profile()['result']
sql_profile = self.example_profile()['result']
# make sql_profile a single frame from 2.5 to 5.5
sql_profile = [sql_profile[1]]
sql_profile[0]['start'] = 2.5
sql_profile[0]['time'] = 3
sql_profile[0]['query'] = 'SELECT 1'
sql_profile[0]['full_query'] = 'SELECT 1'
# some check to ensure the take makes sence
self.assertEqual(async_profile[1]['start'], 3)
self.assertEqual(async_profile[2]['start'], 4)
self.assertNotIn('query', async_profile[1]['stack'])
self.assertNotIn('time', async_profile[1]['stack'])
self.assertEqual(async_profile[1]['stack'], async_profile[2]['stack'])
# this last assertion is not really useful but ensures that the samples
# are consistent with the sql one, just missing tue query
sp = Speedscope(init_stack_trace=[])
sp.add('sql', async_profile)
sp.add('traces', sql_profile)
sp.add_output(['sql', 'traces'], complete=False)
res = sp.make()
profile_combined = res['profiles'][0]
events = [
(e['at']+2, e['type'], res['shared']['frames'][e['frame']]['name'])
for e in profile_combined['events']
]
self.assertEqual(events, [
# pylint: disable=bad-continuation
(2.0, 'O', 'main'),
(2.0, 'O', 'do_stuff1'),
(2.5, 'O', 'execute'),
(2.5, 'O', "sql('SELECT 1')"),
(5.5, 'C', "sql('SELECT 1')"), # select ends at 5.5 as expected despite another concurent frame at 3 and 4
(5.5, 'C', 'execute'),
(6.0, 'O', 'check'),
(10.0, 'C', 'check'),
(10.35, 'C', 'do_stuff1'),
(10.35, 'C', 'main'),
])
def test_converts_context(self):
stack = [
['file.py', 10, 'level1', 'level1'],
['file.py', 11, 'level2', 'level2'],
]
profile = {
'init_stack_trace': [['file.py', 1, 'level0', 'level0)']],
'result': [{ # init frame
'start': 2.0,
'exec_context': ((2, {'a': '1'}), (3, {'b': '1'})),
'stack': list(stack),
}, {
'start': 3.0,
'exec_context': ((2, {'a': '1'}), (3, {'b': '2'})),
'stack': list(stack),
}, { # final frame
'start': 10.35,
'exec_context': (),
'stack': None,
}],
}
sp = Speedscope(init_stack_trace=profile['init_stack_trace'])
sp.add('profile', profile['result'])
sp.add_output(['profile'], complete=True)
res = sp.make()
events = [
(e['type'], res['shared']['frames'][e['frame']]['name'])
for e in res['profiles'][0]['events']
]
self.assertEqual(events, [
# pylint: disable=bad-continuation
('O', 'level0'),
('O', 'a=1'),
('O', 'level1'),
('O', 'b=1'),
('O', 'level2'),
('C', 'level2'),
('C', 'b=1'),
('O', 'b=2'),
('O', 'level2'),
('C', 'level2'),
('C', 'b=2'),
('C', 'level1'),
('C', 'a=1'),
('C', 'level0'),
])
def test_converts_context_nested(self):
stack = [
['file.py', 10, 'level1', 'level1'],
['file.py', 11, 'level2', 'level2'],
]
profile = {
'init_stack_trace': [['file.py', 1, 'level0', 'level0)']],
'result': [{ # init frame
'start': 2.0,
'exec_context': ((3, {'a': '1'}), (3, {'b': '1'})), # two contexts at the same level
'stack': list(stack),
}, { # final frame
'start': 10.35,
'exec_context': (),
'stack': None,
}],
}
sp = Speedscope(init_stack_trace=profile['init_stack_trace'])
sp.add('profile', profile['result'])
sp.add_output(['profile'], complete=True)
res = sp.make()
events = [
(e['type'], res['shared']['frames'][e['frame']]['name'])
for e in res['profiles'][0]['events']
]
self.assertEqual(events, [
# pylint: disable=bad-continuation
('O', 'level0'),
('O', 'level1'),
('O', 'a=1'),
('O', 'b=1'),
('O', 'level2'),
('C', 'level2'),
('C', 'b=1'),
('C', 'a=1'),
('C', 'level1'),
('C', 'level0'),
])
def test_converts_context_lower(self):
stack = [
['file.py', 10, 'level4', 'level4'],
['file.py', 11, 'level5', 'level5'],
]
profile = {
'init_stack_trace': [
['file.py', 1, 'level0', 'level0'],
['file.py', 1, 'level1', 'level1'],
['file.py', 1, 'level2', 'level2'],
['file.py', 1, 'level3', 'level3'],
],
'result': [{ # init frame
'start': 2.0,
'exec_context': ((2, {'a': '1'}), (6, {'b': '1'})),
'stack': list(stack),
}, { # final frame
'start': 10.35,
'exec_context': (),
'stack': None,
}],
}
sp = Speedscope(init_stack_trace=profile['init_stack_trace'])
sp.add('profile', profile['result'])
sp.add_output(['profile'], complete=False)
res = sp.make()
events = [
(e['type'], res['shared']['frames'][e['frame']]['name'])
for e in res['profiles'][0]['events']
]
self.assertEqual(events, [
# pylint: disable=bad-continuation
('O', 'level4'),
('O', 'b=1'),
('O', 'level5'),
('C', 'level5'),
('C', 'b=1'),
('C', 'level4'),
])
def test_converts_no_context(self):
stack = [
['file.py', 10, 'level4', 'level4'],
['file.py', 11, 'level5', 'level5'],
]
profile = {
'init_stack_trace': [
['file.py', 1, 'level0', 'level0'],
['file.py', 1, 'level1', 'level1'],
['file.py', 1, 'level2', 'level2'],
['file.py', 1, 'level3', 'level3'],
],
'result': [{ # init frame
'start': 2.0,
'exec_context': ((2, {'a': '1'}), (6, {'b': '1'})),
'stack': list(stack),
}, { # final frame
'start': 10.35,
'exec_context': (),
'stack': None,
}],
}
sp = Speedscope(init_stack_trace=profile['init_stack_trace'])
sp.add('profile', profile['result'])
sp.add_output(['profile'], complete=False, use_context=False)
res = sp.make()
events = [
(e['type'], res['shared']['frames'][e['frame']]['name'])
for e in res['profiles'][0]['events']
]
self.assertEqual(events, [
# pylint: disable=bad-continuation
('O', 'level4'),
('O', 'level5'),
('C', 'level5'),
('C', 'level4'),
])
@tagged('post_install', '-at_install', 'profiling')
class TestProfiling(TransactionCase):
def test_default_values(self):
p = Profiler()
self.assertEqual(p.db, self.env.cr.dbname)
def test_env_profiler_database(self):
p = Profiler(collectors=[])
self.assertEqual(p.db, self.env.cr.dbname)
def test_env_profiler_description(self):
with Profiler(collectors=[], db=None) as p:
self.assertIn('test_env_profiler_description', p.description)
def test_execution_context_save(self):
with Profiler(db=None, collectors=['sql']) as p:
for letter in ('a', 'b'):
stack_level = profiler.stack_size()
with ExecutionContext(letter=letter):
self.env.cr.execute('SELECT 1')
entries = p.collectors[0].entries
self.assertEqual(entries.pop(0)['exec_context'], ((stack_level, {'letter': 'a'}),))
self.assertEqual(entries.pop(0)['exec_context'], ((stack_level, {'letter': 'b'}),))
def test_execution_context_nested(self):
"""
This test checks that an execution can be nested at the same level of the stack.
"""
with Profiler(db=None, collectors=['sql']) as p:
stack_level = profiler.stack_size()
with ExecutionContext(letter='a'):
self.env.cr.execute('SELECT 1')
with ExecutionContext(letter='b'):
self.env.cr.execute('SELECT 1')
with ExecutionContext(letter='c'):
self.env.cr.execute('SELECT 1')
self.env.cr.execute('SELECT 1')
entries = p.collectors[0].entries
self.assertEqual(entries.pop(0)['exec_context'], ((stack_level, {'letter': 'a'}),))
self.assertEqual(entries.pop(0)['exec_context'], ((stack_level, {'letter': 'a'}), (stack_level, {'letter': 'b'})))
self.assertEqual(entries.pop(0)['exec_context'], ((stack_level, {'letter': 'a'}), (stack_level, {'letter': 'c'})))
self.assertEqual(entries.pop(0)['exec_context'], ((stack_level, {'letter': 'a'}),))
def test_sync_recorder(self):
def a():
b()
c()
def b():
pass
def c():
d()
d()
def d():
pass
with Profiler(description='test', collectors=['traces_sync'], db=None) as p:
a()
stacks = [r['stack'] for r in p.collectors[0].entries]
# map stack frames to their function name, and check
stacks_methods = [[frame[2] for frame in stack] for stack in stacks]
self.assertEqual(stacks_methods, [
['a'],
['a', 'b'],
['a'],
['a', 'c'],
['a', 'c', 'd'],
['a', 'c'],
['a', 'c', 'd'],
['a', 'c'],
['a'],
[],
['__exit__'],
['__exit__', 'stop'] # could be removed by cleaning two last frames, or removing last frames only contained in profiler.py
])
# map stack frames to their line number, and check
stacks_lines = [[frame[1] for frame in stack] for stack in stacks]
self.assertEqual(stacks_lines[1][0] + 1, stacks_lines[3][0],
"Call of b() in a() should be one line before call of c()")
def test_qweb_recorder(self):
template = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '''<t t-name="root">
<t t-foreach="{'a': 3, 'b': 2, 'c': 1}" t-as="item">
[<t t-esc="item_index"/>: <t t-call="base.dummy"/> <t t-esc="item_value"/>]
<b t-esc="add_one_query()"/>
</t>
</t>'''
})
child_template = self.env['ir.ui.view'].create({
'name': 'test',
'type': 'qweb',
'arch_db': '<t t-name="dummy"><span><t t-esc="item"/> <t t-esc="add_one_query()"/></span></t>'
})
self.env.cr.execute("INSERT INTO ir_model_data(name, model, res_id, module)"
"VALUES ('dummy', 'ir.ui.view', %s, 'base')", [child_template.id])
values = {'add_one_query': lambda: self.env.cr.execute('SELECT id FROM ir_ui_view LIMIT 1') or 'query'}
result = u"""
[0: <span>a query</span> 3]
<b>query</b>
[1: <span>b query</span> 2]
<b>query</b>
[2: <span>c query</span> 1]
<b>query</b>
"""
# test rendering without profiling
rendered = self.env['ir.qweb']._render(template.id, values)
self.assertEqual(rendered.strip(), result.strip(), 'Without profiling')
# This rendering is used to cache the compiled template method so as
# not to have a number of requests that vary according to the modules
# installed.
with Profiler(description='test', collectors=['qweb'], db=None):
self.env['ir.qweb']._render(template.id, values)
with Profiler(description='test', collectors=['qweb'], db=None) as p:
rendered = self.env['ir.qweb']._render(template.id, values)
# check if qweb is ok
self.assertEqual(rendered.strip(), result.strip())
# check if the arch of all used templates is includes in the result
self.assertEqual(p.collectors[0].entries[0]['results']['archs'], {
template.id: template.arch_db,
child_template.id: child_template.arch_db,
})
# check all directives without duration information
for data in p.collectors[0].entries[0]['results']['data']:
data.pop('delay')
expected = [
# pylint: disable=bad-whitespace
# first template and first directive
{'view_id': template.id, 'xpath': '/t/t', 'directive': """t-foreach="{'a': 3, 'b': 2, 'c': 1}" t-as='item'""", 'query': 0},
# first pass in the loop
{'view_id': template.id, 'xpath': '/t/t/t[1]', 'directive': "t-esc='item_index'", 'query': 0},
{'view_id': template.id, 'xpath': '/t/t/t[2]', 'directive': "t-call='base.dummy'", 'query': 0}, # the compiled template method is in cache
# first pass in the loop: content of the child template
{'view_id': child_template.id, 'xpath': '/t/span/t[1]', 'directive': "t-esc='item'", 'query': 0},
{'view_id': child_template.id, 'xpath': '/t/span/t[2]', 'directive': "t-esc='add_one_query()'", 'query': 1},
{'view_id': template.id, 'xpath': '/t/t/t[3]', 'directive': "t-esc='item_value'", 'query': 0},
{'view_id': template.id, 'xpath': '/t/t/b', 'directive': "t-esc='add_one_query()'", 'query':1},
# second pass in the loop
{'view_id': template.id, 'xpath': '/t/t/t[1]', 'directive': "t-esc='item_index'", 'query': 0},
{'view_id': template.id, 'xpath': '/t/t/t[2]', 'directive': "t-call='base.dummy'", 'query': 0}, # 0 because the template is in cache
{'view_id': child_template.id, 'xpath': '/t/span/t[1]', 'directive': "t-esc='item'", 'query': 0},
{'view_id': child_template.id, 'xpath': '/t/span/t[2]', 'directive': "t-esc='add_one_query()'", 'query': 1},
{'view_id': template.id, 'xpath': '/t/t/t[3]', 'directive': "t-esc='item_value'", 'query': 0},
{'view_id': template.id, 'xpath': '/t/t/b', 'directive': "t-esc='add_one_query()'", 'query':1},
# third pass in the loop
{'view_id': template.id, 'xpath': '/t/t/t[1]', 'directive': "t-esc='item_index'", 'query': 0},
{'view_id': template.id, 'xpath': '/t/t/t[2]', 'directive': "t-call='base.dummy'", 'query': 0},
{'view_id': child_template.id, 'xpath': '/t/span/t[1]', 'directive': "t-esc='item'", 'query': 0},
{'view_id': child_template.id, 'xpath': '/t/span/t[2]', 'directive': "t-esc='add_one_query()'", 'query': 1},
{'view_id': template.id, 'xpath': '/t/t/t[3]', 'directive': "t-esc='item_value'", 'query': 0},
{'view_id': template.id, 'xpath': '/t/t/b', 'directive': "t-esc='add_one_query()'", 'query':1},
]
self.assertEqual(p.collectors[0].entries[0]['results']['data'], expected)
def test_default_recorders(self):
with Profiler(db=None) as p:
queries_start = self.env.cr.sql_log_count
for i in range(10):
self.env['res.partner'].create({'name': 'snail%s' % i})
self.env['res.partner'].flush()
total_queries = self.env.cr.sql_log_count - queries_start
rq = next(r for r in p.collectors if r.name == "sql").entries
self.assertEqual(p.init_stack_trace[-1][2], 'test_default_recorders')
self.assertEqual(p.init_stack_trace[-1][0].split('/')[-1], 'test_profiler.py')
self.assertEqual(len(rq), total_queries)
first_query = rq[0]
self.assertEqual(first_query['stack'][0][2], 'create')
#self.assertIn("self.env['res.partner'].create({", first_query['stack'][0][3])
self.assertGreater(first_query['time'], 0)
self.assertEqual(first_query['stack'][-1][2], 'execute')
self.assertEqual(first_query['stack'][-1][0].split('/')[-1], 'sql_db.py')
def deep_call(func, depth):
""" Call the given function at the given call depth. """
if depth > 0:
deep_call(func, depth - 1)
else:
func()
@tagged('-standard', 'profiling_performance')
class TestPerformance(BaseCase):
def test_collector_max_frequency(self):
"""
Check the creation time of an entry
"""
collector = profiler.Collector()
p = Profiler(collectors=[collector], db=None)
def collect():
collector.add()
# collect on changing stack
with p:
start = time.time()
while start + 1 > time.time():
deep_call(collect, 20)
self.assertGreater(len(collector.entries), 20000) # ~40000
# collect on identical stack
collector = profiler.Collector()
p = Profiler(collectors=[collector], db=None)
def collect_1_s():
start = time.time()
while start + 1 > time.time():
collector.add()
with p:
deep_call(collect_1_s, 20)
self.assertGreater(len(collector.entries), 50000) # ~70000
def test_frequencies_1ms_sleep(self):
"""
Check the number of entries generated in 1s at 1kHz
we need to artificially change the frame as often as possible to avoid
triggering the memory optimisation skipping identical frames
"""
def sleep_1():
time.sleep(0.0001)
def sleep_2():
time.sleep(0.0001)
with Profiler(collectors=['traces_async'], db=None) as res:
start = time.time()
while start + 1 > time.time():
sleep_1()
sleep_2()
entry_count = len(res.collectors[0].entries)
self.assertGreater(entry_count, 700) # ~920
def test_traces_async_memory_optimisation(self):
"""
Identical frames should be saved only once.
We should only have a few entries on a 1 second sleep.
"""
with Profiler(collectors=['traces_async'], db=None) as res:
time.sleep(1)
entry_count = len(res.collectors[0].entries)
self.assertLess(entry_count, 5) # ~3
| 40.949765 | 26,085 |
14,516 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from contextlib import contextmanager
from unittest.mock import patch
from odoo.tests.common import TransactionCase, HttpCase
from odoo import Command
class TransactionCaseWithUserDemo(TransactionCase):
def setUp(self):
super(TransactionCaseWithUserDemo, self).setUp()
self.env.ref('base.partner_admin').write({'name': 'Mitchell Admin'})
self.user_demo = self.env['res.users'].search([('login', '=', 'demo')])
self.partner_demo = self.user_demo.partner_id
if not self.user_demo:
self.env['ir.config_parameter'].sudo().set_param('auth_password_policy.minlength', 4)
# YTI TODO: This could be factorized between the different classes
self.partner_demo = self.env['res.partner'].create({
'name': 'Marc Demo',
'email': '[email protected]',
})
self.user_demo = self.env['res.users'].create({
'login': 'demo',
'password': 'demo',
'partner_id': self.partner_demo.id,
'groups_id': [Command.set([self.env.ref('base.group_user').id, self.env.ref('base.group_partner_manager').id])],
})
class HttpCaseWithUserDemo(HttpCase):
def setUp(self):
super(HttpCaseWithUserDemo, self).setUp()
self.user_admin = self.env.ref('base.user_admin')
self.user_admin.write({'name': 'Mitchell Admin'})
self.partner_admin = self.user_admin.partner_id
self.user_demo = self.env['res.users'].search([('login', '=', 'demo')])
self.partner_demo = self.user_demo.partner_id
if not self.user_demo:
self.env['ir.config_parameter'].sudo().set_param('auth_password_policy.minlength', 4)
self.partner_demo = self.env['res.partner'].create({
'name': 'Marc Demo',
'email': '[email protected]',
})
self.user_demo = self.env['res.users'].create({
'login': 'demo',
'password': 'demo',
'partner_id': self.partner_demo.id,
'groups_id': [Command.set([self.env.ref('base.group_user').id, self.env.ref('base.group_partner_manager').id])],
})
class SavepointCaseWithUserDemo(TransactionCase):
@classmethod
def setUpClass(cls):
super(SavepointCaseWithUserDemo, cls).setUpClass()
cls.user_demo = cls.env['res.users'].search([('login', '=', 'demo')])
cls.partner_demo = cls.user_demo.partner_id
if not cls.user_demo:
cls.env['ir.config_parameter'].sudo().set_param('auth_password_policy.minlength', 4)
cls.partner_demo = cls.env['res.partner'].create({
'name': 'Marc Demo',
'email': '[email protected]',
})
cls.user_demo = cls.env['res.users'].create({
'login': 'demo',
'password': 'demo',
'partner_id': cls.partner_demo.id,
'groups_id': [Command.set([cls.env.ref('base.group_user').id, cls.env.ref('base.group_partner_manager').id])],
})
@classmethod
def _load_partners_set(cls):
cls.partner_category = cls.env['res.partner.category'].create({
'name': 'Sellers',
'color': 2,
})
cls.partner_category_child_1 = cls.env['res.partner.category'].create({
'name': 'Office Supplies',
'parent_id': cls.partner_category.id,
})
cls.partner_category_child_2 = cls.env['res.partner.category'].create({
'name': 'Desk Manufacturers',
'parent_id': cls.partner_category.id,
})
# Load all the demo partners
cls.partners = cls.env['res.partner'].create([
{
'name': 'Inner Works', # Wood Corner
'state_id': cls.env.ref('base.state_us_1').id,
'category_id': [Command.set([cls.partner_category_child_1.id, cls.partner_category_child_2.id,])],
'child_ids': [Command.create({
'name': 'Sheila Ruiz', # 'Willie Burke',
}), Command.create({
'name': 'Wyatt Howard', # 'Ron Gibson',
}), Command.create({
'name': 'Austin Kennedy', # Tom Ruiz
})],
}, {
'name': 'Pepper Street', # 'Deco Addict',
'state_id': cls.env.ref('base.state_us_2').id,
'child_ids': [Command.create({
'name': 'Liam King', # 'Douglas Fletcher',
}), Command.create({
'name': 'Craig Richardson', # 'Floyd Steward',
}), Command.create({
'name': 'Adam Cox', # 'Addison Olson',
})],
}, {
'name': 'AnalytIQ', #'Gemini Furniture',
'state_id': cls.env.ref('base.state_us_3').id,
'child_ids': [Command.create({
'name': 'Pedro Boyd', # Edwin Hansen
}), Command.create({
'name': 'Landon Roberts', # 'Jesse Brown',
'company_id': cls.env.ref('base.main_company').id,
}), Command.create({
'name': 'Leona Shelton', # 'Soham Palmer',
}), Command.create({
'name': 'Scott Kim', # 'Oscar Morgan',
})],
}, {
'name': 'Urban Trends', # 'Ready Mat',
'state_id': cls.env.ref('base.state_us_4').id,
'category_id': [Command.set([cls.partner_category_child_1.id, cls.partner_category_child_2.id,])],
'child_ids': [Command.create({
'name': 'Louella Jacobs', # 'Billy Fox',
}), Command.create({
'name': 'Albert Alexander', # 'Kim Snyder',
}), Command.create({
'name': 'Brad Castillo', # 'Edith Sanchez',
}), Command.create({
'name': 'Sophie Montgomery', # 'Sandra Neal',
}), Command.create({
'name': 'Chloe Bates', # 'Julie Richards',
}), Command.create({
'name': 'Mason Crawford', # 'Travis Mendoza',
}), Command.create({
'name': 'Elsie Kennedy', # 'Theodore Gardner',
})],
}, {
'name': 'Ctrl-Alt-Fix', # 'The Jackson Group',
'state_id': cls.env.ref('base.state_us_5').id,
'child_ids': [Command.create({
'name': 'carole miller', # 'Toni Rhodes',
}), Command.create({
'name': 'Cecil Holmes', # 'Gordon Owens',
})],
}, {
'name': 'Ignitive Labs', # 'Azure Interior',
'state_id': cls.env.ref('base.state_us_6').id,
'child_ids': [Command.create({
'name': 'Jonathan Webb', # 'Brandon Freeman',
}), Command.create({
'name': 'Clinton Clark', # 'Nicole Ford',
}), Command.create({
'name': 'Howard Bryant', # 'Colleen Diaz',
})],
}, {
'name': 'Amber & Forge', # 'Lumber Inc',
'state_id': cls.env.ref('base.state_us_7').id,
'child_ids': [Command.create({
'name': 'Mark Webb', # 'Lorraine Douglas',
})],
}, {
'name': 'Rebecca Day', # 'Chester Reed',
'parent_id': cls.env.ref('base.main_partner').id,
}, {
'name': 'Gabriella Jennings', # 'Dwayne Newman',
'parent_id': cls.env.ref('base.main_partner').id,
}
])
class HttpCaseWithUserPortal(HttpCase):
def setUp(self):
super(HttpCaseWithUserPortal, self).setUp()
self.user_portal = self.env['res.users'].search([('login', '=', 'portal')])
self.partner_portal = self.user_portal.partner_id
if not self.user_portal:
self.env['ir.config_parameter'].sudo().set_param('auth_password_policy.minlength', 4)
self.partner_portal = self.env['res.partner'].create({
'name': 'Joel Willis',
'email': '[email protected]',
})
self.user_portal = self.env['res.users'].with_context(no_reset_password=True).create({
'login': 'portal',
'password': 'portal',
'partner_id': self.partner_portal.id,
'groups_id': [Command.set([self.env.ref('base.group_portal').id])],
})
class MockSmtplibCase:
"""Class which allows you to mock the smtplib feature, to be able to test in depth the
sending of emails. Unlike "MockEmail" which mocks mainly the <ir.mail_server> methods,
here we mainly mock the smtplib to be able to test the <ir.mail_server> model.
"""
@contextmanager
def mock_smtplib_connection(self):
self.emails = []
origin = self
class TestingSMTPSession:
"""SMTP session object returned during the testing.
So we do not connect to real SMTP server. Store the mail
server id used for the SMTP connection and other information.
Can be mocked for testing to know which with arguments the email was sent.
"""
def quit(self):
pass
def send_message(self, message, smtp_from, smtp_to_list):
origin.emails.append({
'smtp_from': smtp_from,
'smtp_to_list': smtp_to_list,
'message': message.as_string(),
'from_filter': self.from_filter,
})
def sendmail(self, smtp_from, smtp_to_list, message_str, mail_options):
origin.emails.append({
'smtp_from': smtp_from,
'smtp_to_list': smtp_to_list,
'message': message_str,
'from_filter': self.from_filter,
})
def set_debuglevel(self, smtp_debug):
pass
def ehlo_or_helo_if_needed(self):
pass
def login(self, user, password):
pass
def starttls(self, keyfile=None, certfile=None, context=None):
pass
self.testing_smtp_session = TestingSMTPSession()
IrMailServer = self.env['ir.mail_server']
connect = IrMailServer.connect
find_mail_server = IrMailServer._find_mail_server
with patch.object(type(IrMailServer), '_is_test_mode', lambda self: False), \
patch('smtplib.SMTP_SSL', side_effect=lambda *args, **kwargs: self.testing_smtp_session), \
patch('smtplib.SMTP', side_effect=lambda *args, **kwargs: self.testing_smtp_session), \
patch.object(type(IrMailServer), 'connect', side_effect=connect) as connect_mocked, \
patch.object(type(IrMailServer), '_find_mail_server', side_effect=find_mail_server) as find_mail_server_mocked:
self.connect_mocked = connect_mocked
self.find_mail_server_mocked = find_mail_server_mocked
yield
def assert_email_sent_smtp(self, smtp_from=None, smtp_to_list=None, message_from=None, from_filter=None, emails_count=1):
"""Check that the given email has been sent.
If one of the parameter is None, it's just ignored and not used to retrieve the email.
:param smtp_from: FROM used for the authentication to the mail server
:param smtp_to_list: List of destination email address
:param message_from: FROM used in the SMTP headers
:param from_filter: from_filter of the <ir.mail_server> used to send the email
Can use a lambda to check the value
:param emails_count: the number of emails which should match the condition
:return: True if at least one email has been found with those parameters
"""
matching_emails = filter(
lambda email:
(smtp_from is None or (
smtp_from(email['smtp_from'])
if callable(smtp_from)
else smtp_from == email['smtp_from'])
)
and (smtp_to_list is None or smtp_to_list == email['smtp_to_list'])
and (message_from is None or 'From: %s' % message_from in email['message'])
and (from_filter is None or from_filter == email['from_filter']),
self.emails,
)
matching_emails_count = len(list(matching_emails))
self.assertTrue(
matching_emails_count == emails_count,
msg='Emails not sent, %i emails match the condition but %i are expected' % (matching_emails_count, emails_count),
)
@classmethod
def _init_mail_servers(cls):
cls.env['ir.config_parameter'].sudo().set_param('mail.catchall.domain', 'test.com')
cls.env['ir.config_parameter'].sudo().set_param('mail.default.from', 'notifications')
cls.env['ir.config_parameter'].sudo().set_param('mail.bounce.alias', 'bounce')
cls.alias_bounce = 'bounce'
cls.alias_domain = 'test.com'
cls.env['ir.mail_server'].search([]).unlink()
ir_mail_server_values = {
'smtp_host': 'smtp_host',
'smtp_encryption': 'none',
}
(
cls.server_domain,
cls.server_user,
cls.server_notification,
cls.server_default,
) = cls.env['ir.mail_server'].create([
{
'name': 'Domain based server',
'from_filter': 'test.com',
** ir_mail_server_values,
}, {
'name': 'User specific server',
'from_filter': '[email protected]',
** ir_mail_server_values,
}, {
'name': 'Server Notifications',
'from_filter': '[email protected]',
** ir_mail_server_values,
}, {
'name': 'Server No From Filter',
'from_filter': False,
** ir_mail_server_values,
},
])
| 42.568915 | 14,516 |
11,815 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import Form
from odoo.tests.common import TransactionCase
from odoo.exceptions import AccessError, UserError
class TestPartner(TransactionCase):
def test_name_search(self):
""" Check name_search on partner, especially with domain based on auto_join
user_ids field. Check specific SQL of name_search correctly handle joined tables. """
test_partner = self.env['res.partner'].create({'name': 'Vlad the Impaler'})
test_user = self.env['res.users'].create({'name': 'Vlad the Impaler', 'login': 'vlad', 'email': '[email protected]'})
ns_res = self.env['res.partner'].name_search('Vlad', operator='ilike')
self.assertEqual(set(i[0] for i in ns_res), set((test_partner | test_user.partner_id).ids))
ns_res = self.env['res.partner'].name_search('Vlad', args=[('user_ids.email', 'ilike', 'vlad')])
self.assertEqual(set(i[0] for i in ns_res), set(test_user.partner_id.ids))
# Check a partner may be searched when current user has no access but sudo is used
public_user = self.env.ref('base.public_user')
with self.assertRaises(AccessError):
test_partner.with_user(public_user).check_access_rule('read')
ns_res = self.env['res.partner'].with_user(public_user).sudo().name_search('Vlad', args=[('user_ids.email', 'ilike', 'vlad')])
self.assertEqual(set(i[0] for i in ns_res), set(test_user.partner_id.ids))
def test_name_get(self):
""" Check name_get on partner, especially with different context
Check name_get correctly return name with context. """
test_partner_jetha = self.env['res.partner'].create({'name': 'Jethala', 'street': 'Powder gali', 'street2': 'Gokuldham Society'})
test_partner_bhide = self.env['res.partner'].create({'name': 'Atmaram Bhide'})
res_jetha = test_partner_jetha.with_context(show_address=1).name_get()
self.assertEqual(res_jetha[0][1], "Jethala\nPowder gali\nGokuldham Society", "name should contain comma separated name and address")
res_bhide = test_partner_bhide.with_context(show_address=1).name_get()
self.assertEqual(res_bhide[0][1], "Atmaram Bhide", "name should contain only name if address is not available, without extra commas")
res_jetha = test_partner_jetha.with_context(show_address=1, address_inline=1).name_get()
self.assertEqual(res_jetha[0][1], "Jethala, Powder gali, Gokuldham Society", "name should contain comma separated name and address")
res_bhide = test_partner_bhide.with_context(show_address=1, address_inline=1).name_get()
self.assertEqual(res_bhide[0][1], "Atmaram Bhide", "name should contain only name if address is not available, without extra commas")
def test_company_change_propagation(self):
""" Check propagation of company_id across children """
User = self.env['res.users']
Partner = self.env['res.partner']
Company = self.env['res.company']
company_1 = Company.create({'name': 'company_1'})
company_2 = Company.create({'name': 'company_2'})
test_partner_company = Partner.create({'name': 'This company'})
test_user = User.create({'name': 'This user', 'login': 'thisu', 'email': '[email protected]', 'company_id': company_1.id, 'company_ids': [company_1.id]})
test_user.partner_id.write({'parent_id': test_partner_company.id})
test_partner_company.write({'company_id': company_1.id})
self.assertEqual(test_user.partner_id.company_id.id, company_1.id, "The new company_id of the partner company should be propagated to its children")
test_partner_company.write({'company_id': False})
self.assertFalse(test_user.partner_id.company_id.id, "If the company_id is deleted from the partner company, it should be propagated to its children")
with self.assertRaises(UserError, msg="You should not be able to update the company_id of the partner company if the linked user of a child partner is not an allowed to be assigned to that company"), self.cr.savepoint():
test_partner_company.write({'company_id': company_2.id})
def test_commercial_field_sync(self):
"""Check if commercial fields are synced properly: testing with VAT field"""
Partner = self.env['res.partner']
company_1 = Partner.create({'name': 'company 1', 'is_company': True, 'vat': 'BE0123456789'})
company_2 = Partner.create({'name': 'company 2', 'is_company': True, 'vat': 'BE9876543210'})
partner = Partner.create({'name': 'someone', 'is_company': False, 'parent_id': company_1.id})
Partner.flush()
self.assertEqual(partner.vat, company_1.vat, "VAT should be inherited from the company 1")
# create a delivery address for the partner
delivery = Partner.create({'name': 'somewhere', 'type': 'delivery', 'parent_id': partner.id})
self.assertEqual(delivery.commercial_partner_id.id, company_1.id, "Commercial partner should be recomputed")
self.assertEqual(delivery.vat, company_1.vat, "VAT should be inherited from the company 1")
# move the partner to another company
partner.write({'parent_id': company_2.id})
partner.flush()
self.assertEqual(partner.commercial_partner_id.id, company_2.id, "Commercial partner should be recomputed")
self.assertEqual(partner.vat, company_2.vat, "VAT should be inherited from the company 2")
self.assertEqual(delivery.commercial_partner_id.id, company_2.id, "Commercial partner should be recomputed on delivery")
self.assertEqual(delivery.vat, company_2.vat, "VAT should be inherited from the company 2 to delivery")
def test_lang_computation_code(self):
""" Check computation of lang: coming from installed languages, forced
default value and propagation from parent."""
default_lang_info = self.env['res.lang'].get_installed()[0]
default_lang_code = default_lang_info[0]
self.assertNotEqual(default_lang_code, 'de_DE') # should not be the case, just to ease test
self.assertNotEqual(default_lang_code, 'fr_FR') # should not be the case, just to ease test
# default is installed lang
partner = self.env['res.partner'].create({'name': "Test Company"})
self.assertEqual(partner.lang, default_lang_code)
# check propagation of parent to child
child = self.env['res.partner'].create({'name': 'First Child', 'parent_id': partner.id})
self.assertEqual(child.lang, default_lang_code)
# activate another languages to test language propagation when being in multi-lang
self.env['res.lang']._activate_lang('de_DE')
self.env['res.lang']._activate_lang('fr_FR')
# default from context > default from installed
partner = self.env['res.partner'].with_context(default_lang='de_DE').create({'name': "Test Company"})
self.assertEqual(partner.lang, 'de_DE')
first_child = self.env['res.partner'].create({'name': 'First Child', 'parent_id': partner.id})
partner.write({'lang': 'fr_FR'})
second_child = self.env['res.partner'].create({'name': 'Second Child', 'parent_id': partner.id})
# check user input is kept
self.assertEqual(partner.lang, 'fr_FR')
self.assertEqual(first_child.lang, 'de_DE')
self.assertEqual(second_child.lang, 'fr_FR')
def test_lang_computation_form_view(self):
""" Check computation of lang: coming from installed languages, forced
default value and propagation from parent."""
default_lang_info = self.env['res.lang'].get_installed()[0]
default_lang_code = default_lang_info[0]
self.assertNotEqual(default_lang_code, 'de_DE') # should not be the case, just to ease test
self.assertNotEqual(default_lang_code, 'fr_FR') # should not be the case, just to ease test
# default is installed lang
partner_form = Form(self.env['res.partner'], 'base.view_partner_form')
partner_form.name = "Test Company"
self.assertEqual(partner_form.lang, default_lang_code, "New partner's lang should be default one")
partner = partner_form.save()
self.assertEqual(partner.lang, default_lang_code)
# check propagation of parent to child
with partner_form.child_ids.new() as child:
child.name = "First Child"
self.assertEqual(child.lang, default_lang_code, "Child contact's lang should have the same as its parent")
partner = partner_form.save()
self.assertEqual(partner.child_ids.lang, default_lang_code)
# activate another languages to test language propagation when being in multi-lang
self.env['res.lang']._activate_lang('de_DE')
self.env['res.lang']._activate_lang('fr_FR')
# default from context > default from installed
partner_form = Form(
self.env['res.partner'].with_context(default_lang='de_DE'),
'base.view_partner_form'
)
partner_form.is_company = True
partner_form.name = "Test Company"
self.assertEqual(partner_form.lang, 'de_DE', "New partner's lang should take default from context")
with partner_form.child_ids.new() as child:
child.name = "First Child"
self.assertEqual(child.lang, 'de_DE', "Child contact's lang should be the same as its parent.")
partner_form.lang = 'fr_FR'
self.assertEqual(partner_form.lang, 'fr_FR', "New partner's lang should take user input")
with partner_form.child_ids.new() as child:
child.name = "Second Child"
self.assertEqual(child.lang, 'fr_FR', "Child contact's lang should be the same as its parent.")
partner = partner_form.save()
# check final values (kept from form input)
self.assertEqual(partner.lang, 'fr_FR')
self.assertEqual(partner.child_ids.filtered(lambda p: p.name == "First Child").lang, 'de_DE')
self.assertEqual(partner.child_ids.filtered(lambda p: p.name == "Second Child").lang, 'fr_FR')
def test_partner_merge_wizard_dst_partner_id(self):
""" Check that dst_partner_id in merge wizard displays id along with partner name """
test_partner = self.env['res.partner'].create({'name': 'Radu the Handsome'})
expected_partner_name = '%s (%s)' % (test_partner.name, test_partner.id)
partner_merge_wizard = self.env['base.partner.merge.automatic.wizard'].with_context(
{'partner_show_db_id': True, 'default_dst_partner_id': test_partner}).new()
self.assertEqual(partner_merge_wizard.dst_partner_id.display_name, expected_partner_name, "'Destination Contact' name should contain db ID in brackets")
def test_display_address_missing_key(self):
""" Check _display_address when some keys are missing. As a defaultdict is used, missing keys should be
filled with empty strings. """
country = self.env["res.country"].create({"name": "TestCountry", "address_format": "%(city)s %(zip)s"})
partner = self.env["res.partner"].create({
"name": "TestPartner",
"country_id": country.id,
"city": "TestCity",
"zip": "12345",
})
before = partner._display_address()
# Manually update the country address_format because placeholders are checked by create
self.env.cr.execute(
"UPDATE res_country SET address_format ='%%(city)s %%(zip)s %%(nothing)s' WHERE id=%s",
[country.id]
)
self.env["res.country"].invalidate_cache()
self.assertEqual(before, partner._display_address().strip())
| 59.671717 | 11,815 |
13,496 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
import logging
from odoo import exceptions
from odoo.addons.base.tests.common import TransactionCaseWithUserDemo
from odoo.tests.common import TransactionCase, ADMIN_USER_ID, tagged
_logger = logging.getLogger(__name__)
def noid(seq):
""" Removes values that are not relevant for the test comparisons """
for d in seq:
d.pop('id', None)
d.pop('action_id', None)
return seq
class FiltersCase(TransactionCaseWithUserDemo):
def setUp(self):
super(FiltersCase, self).setUp()
self.USER_NG = self.env['res.users'].name_search('demo')[0]
self.USER_ID = self.USER_NG[0]
def build(self, model, *args):
Model = self.env[model].with_user(ADMIN_USER_ID)
for vals in args:
Model.create(vals)
class TestGetFilters(FiltersCase):
def test_own_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=self.USER_ID, model_id='ir.filters'))
filters = self.env['ir.filters'].with_user(self.USER_ID).get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='b', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='d', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
])
def test_global_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
dict(name='c', user_id=False, model_id='ir.filters'),
dict(name='d', user_id=False, model_id='ir.filters'),
)
filters = self.env['ir.filters'].with_user(self.USER_ID).get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='b', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='d', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
])
def test_no_third_party_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=ADMIN_USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=ADMIN_USER_ID, model_id='ir.filters') )
filters = self.env['ir.filters'].with_user(self.USER_ID).get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
])
class TestOwnDefaults(FiltersCase):
def test_new_no_filter(self):
"""
When creating a @is_default filter with no existing filter, that new
filter gets the default flag
"""
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=True,
domain='[]', context='{}', sort='[]')
])
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='c', user_id=self.USER_NG, is_default=True, domain='[]', context='{}', sort='[]'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, the flag should be *moved* from the old to the new filter
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='c', user_id=self.USER_NG, is_default=True, domain='[]', context='{}', sort='[]'),
])
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag the flag should be moved
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=True, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
])
class TestGlobalDefaults(FiltersCase):
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=False, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='c', user_id=False, is_default=True, domain='[]', context='{}', sort='[]'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
with self.assertRaises(exceptions.UserError):
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
with self.assertRaises(exceptions.UserError):
Filters.create_or_replace({
'name': 'a',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_default_filter(self):
"""
Replacing the current default global filter should not generate any error
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
context_value = "{'some_key': True}"
Filters.create_or_replace({
'name': 'b',
'model_id': 'ir.filters',
'user_id': False,
'context': context_value,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=False, is_default=True, domain='[]', context=context_value, sort='[]'),
])
class TestReadGroup(TransactionCase):
"""Test function read_group with groupby on a many2one field to a model
(in test, "user_id" to "res.users") which is ordered by an inherited not stored field (in
test, "name" inherited from "res.partners").
"""
def test_read_group_1(self):
Users = self.env['res.users']
self.assertEqual(Users._order, "name, login", "Model res.users must be ordered by name, login")
self.assertFalse(Users._fields['name'].store, "Field name is not stored in res.users")
Filters = self.env['ir.filters']
filter_a = Filters.create(dict(name="Filter_A", model_id="ir.filters"))
filter_b = Filters.create(dict(name="Filter_B", model_id="ir.filters"))
filter_b.write(dict(user_id=False))
res = Filters.read_group([], ['name', 'user_id'], ['user_id'])
self.assertTrue(any(val['user_id'] == False for val in res), "At least one group must contain val['user_id'] == False.")
@tagged('post_install', '-at_install', 'migration')
class TestAllFilters(TransactionCase):
def check_filter(self, name, model, domain, fields, groupby, order, context):
if groupby:
try:
self.env[model].with_context(context).read_group(domain, fields, groupby, orderby=order)
except ValueError as e:
raise self.failureException("Test filter '%s' failed: %s" % (name, e)) from None
except KeyError as e:
raise self.failureException("Test filter '%s' failed: field or aggregate %s does not exist"% (name, e)) from None
elif domain:
try:
self.env[model].with_context(context).search(domain, order=order)
except ValueError as e:
raise self.failureException("Test filter '%s' failed: %s" % (name, e)) from None
else:
_logger.info("No domain or group by in filter %s with model %s and context %s", name, model, context)
def test_filters(self):
for filter_ in self.env['ir.filters'].search([]):
with self.subTest(name=filter_.name):
context = ast.literal_eval(filter_.context)
groupby = context.get('group_by')
self.check_filter(
name=filter_.name,
model=filter_.model_id,
domain=filter_._get_eval_domain(),
fields=[field.split(':')[0] for field in (groupby or [])],
groupby=groupby,
order=','.join(ast.literal_eval(filter_.sort)),
context=context,
)
| 40.89697 | 13,496 |
12,148 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase, tagged, TagsSelector, BaseCase
@tagged('nodatabase')
class TestSetTags(TransactionCase):
def test_set_tags_empty(self):
"""Test the set_tags decorator with an empty set of tags"""
@tagged()
class FakeClass(TransactionCase):
pass
fc = FakeClass()
self.assertTrue(hasattr(fc, 'test_tags'))
self.assertEqual(fc.test_tags, {'at_install', 'standard'})
self.assertEqual(fc.test_module, 'base')
def test_set_tags_not_decorated(self):
"""Test that a TransactionCase has some test_tags by default"""
class FakeClass(TransactionCase):
pass
fc = FakeClass()
self.assertTrue(hasattr(fc, 'test_tags'))
self.assertEqual(fc.test_tags, {'at_install', 'standard'})
self.assertEqual(fc.test_module, 'base')
def test_set_tags_single_tag(self):
"""Test the set_tags decorator with a single tag"""
@tagged('slow')
class FakeClass(TransactionCase):
pass
fc = FakeClass()
self.assertEqual(fc.test_tags, {'at_install', 'standard', 'slow'})
self.assertEqual(fc.test_module, 'base')
def test_set_tags_multiple_tags(self):
"""Test the set_tags decorator with multiple tags"""
@tagged('slow', 'nightly')
class FakeClass(TransactionCase):
pass
fc = FakeClass()
self.assertEqual(fc.test_tags, {'at_install', 'standard', 'slow', 'nightly'})
self.assertEqual(fc.test_module, 'base')
def test_inheritance(self):
"""Test inheritance when using the 'tagged' decorator"""
@tagged('slow')
class FakeClassA(TransactionCase):
pass
@tagged('nightly')
class FakeClassB(FakeClassA):
pass
fc = FakeClassB()
self.assertEqual(fc.test_tags, {'at_install', 'standard', 'nightly'})
self.assertEqual(fc.test_module, 'base')
class FakeClassC(FakeClassA):
pass
fc = FakeClassC()
self.assertEqual(fc.test_tags, {'at_install', 'standard'})
self.assertEqual(fc.test_module, 'base')
def test_untagging(self):
"""Test that one can remove the 'standard' tag"""
@tagged('-standard')
class FakeClassA(TransactionCase):
pass
fc = FakeClassA()
self.assertEqual(fc.test_tags, {'at_install'})
self.assertEqual(fc.test_module, 'base')
@tagged('-standard', '-base', '-at_install', 'post_install')
class FakeClassB(TransactionCase):
pass
fc = FakeClassB()
self.assertEqual(fc.test_tags, {'post_install'})
@tagged('-standard', '-base', 'fast')
class FakeClassC(TransactionCase):
pass
fc = FakeClassC()
self.assertEqual(fc.test_tags, {'fast', 'at_install'})
@tagged('nodatabase')
class TestSelector(TransactionCase):
def test_selector_parser(self):
"""Test the parser part of the TagsSelector class"""
tags = TagsSelector('+slow')
self.assertEqual({('slow', None, None, None),}, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('+slow,nightly')
self.assertEqual({('slow', None, None, None), ('nightly', None, None, None)}, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('+slow,-standard')
self.assertEqual({('slow', None, None, None),}, tags.include)
self.assertEqual({('standard', None, None, None),}, tags.exclude)
# same with space after the comma
tags = TagsSelector('+slow, -standard')
self.assertEqual({('slow', None, None, None),}, tags.include)
self.assertEqual({('standard', None, None, None),}, tags.exclude)
# same with space before and after the comma
tags = TagsSelector('+slow , -standard')
self.assertEqual({('slow', None, None, None), }, tags.include)
self.assertEqual({('standard', None, None, None), }, tags.exclude)
tags = TagsSelector('+slow ,-standard,+js')
self.assertEqual({('slow', None, None, None),('js', None, None, None)}, tags.include)
self.assertEqual({('standard', None, None, None),}, tags.exclude)
# without +
tags = TagsSelector('slow, ')
self.assertEqual({('slow', None, None, None), }, tags.include)
self.assertEqual(set(), tags.exclude)
# duplicates
tags = TagsSelector('+slow,-standard, slow,-standard ')
self.assertEqual({('slow', None, None, None), }, tags.include)
self.assertEqual({('standard', None, None, None), }, tags.exclude)
tags = TagsSelector('')
self.assertEqual(set(), tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('/module') # all standard test of a module
self.assertEqual({('standard', 'module', None, None), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('*/module') # all tests of a module
self.assertEqual({(None, 'module', None, None), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector(':class') # all standard test of a class
self.assertEqual({('standard', None, 'class', None), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('.method')
self.assertEqual({('standard', None, None, 'method'), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector(':class.method')
self.assertEqual({('standard', None, 'class', 'method'), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('/module:class.method') # only a specific test func in a module (standard)
self.assertEqual({('standard', 'module', 'class', 'method'), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('*/module:class.method') # only a specific test func in a module
self.assertEqual({(None, 'module', 'class', 'method'), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('-/module:class.method') # disable a specific test func in a module
self.assertEqual({('standard', None, None, None), }, tags.include) # all strandard
self.assertEqual({(None, 'module', 'class', 'method'), }, tags.exclude) # exept the test func
tags = TagsSelector('-*/module:class.method')
self.assertEqual({('standard', None, None, None), }, tags.include)
self.assertEqual({(None, 'module', 'class', 'method'), }, tags.exclude)
tags = TagsSelector('tag/module')
self.assertEqual({('tag', 'module', None, None), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('tag.method')
self.assertEqual({('tag', None, None, 'method'), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('*/module,-standard') # all non standard test of a module
self.assertEqual({(None, 'module', None, None), }, tags.include) # all in module
self.assertEqual({('standard', None, None, None), }, tags.exclude) # exept standard ones
@tagged('nodatabase')
class TestSelectorSelection(TransactionCase):
def test_selector_selection(self):
"""Test check_tags use cases"""
class Test_A(TransactionCase):
pass
@tagged('stock')
class Test_B(BaseCase):
pass
@tagged('stock', 'slow')
class Test_C(BaseCase):
pass
@tagged('standard', 'slow')
class Test_D(BaseCase):
pass
@tagged('-at_install', 'post_install')
class Test_E(TransactionCase):
pass
no_tags_obj = Test_A()
stock_tag_obj = Test_B()
multiple_tags_obj = Test_C()
multiple_tags_standard_obj = Test_D()
post_install_obj = Test_E()
# if 'standard' in not explicitly removed, tests without tags are
# considered tagged standard and they are run by default if
# not explicitly deselected with '-standard' or if 'standard' is not
# selectected along with another test tag
# same as "--test-tags=''" parameters:
tags = TagsSelector('')
self.assertFalse(tags.check(no_tags_obj))
# same as "--test-tags '+slow'":
tags = TagsSelector('+slow')
self.assertFalse(tags.check(no_tags_obj))
# same as "--test-tags '+slow,+fake'":
tags = TagsSelector('+slow,fake')
self.assertFalse(tags.check(no_tags_obj))
# same as "--test-tags '+slow,+standard'":
tags = TagsSelector('slow,standard')
self.assertTrue(no_tags_obj)
# same as "--test-tags '+slow,-standard'":
tags = TagsSelector('slow,-standard')
self.assertFalse(tags.check(no_tags_obj))
# same as "--test-tags '-slow,-standard'":
tags = TagsSelector('-slow,-standard')
self.assertFalse(tags.check(no_tags_obj))
# same as "--test-tags '-slow,+standard'":
tags = TagsSelector('-slow,+standard')
self.assertTrue(tags.check(no_tags_obj))
tags = TagsSelector('')
self.assertFalse(tags.check(stock_tag_obj))
tags = TagsSelector('slow')
self.assertFalse(tags.check(stock_tag_obj))
tags = TagsSelector('standard')
self.assertTrue(tags.check(stock_tag_obj))
tags = TagsSelector('slow,standard')
self.assertTrue(tags.check(stock_tag_obj))
tags = TagsSelector('slow,-standard')
self.assertFalse(tags.check(stock_tag_obj))
tags = TagsSelector('+stock')
self.assertTrue(tags.check(stock_tag_obj))
tags = TagsSelector('stock,fake')
self.assertTrue(tags.check(stock_tag_obj))
tags = TagsSelector('stock,standard')
self.assertTrue(tags.check(stock_tag_obj))
tags = TagsSelector('-stock')
self.assertFalse(tags.check(stock_tag_obj))
tags = TagsSelector('')
self.assertFalse(tags.check(multiple_tags_obj))
tags = TagsSelector('-stock')
self.assertFalse(tags.check(multiple_tags_obj))
tags = TagsSelector('-slow')
self.assertFalse(tags.check(multiple_tags_obj))
tags = TagsSelector('slow')
self.assertTrue(tags.check(multiple_tags_obj))
tags = TagsSelector('slow,stock')
self.assertTrue(tags.check(multiple_tags_obj))
tags = TagsSelector('-slow,stock')
self.assertFalse(tags.check(multiple_tags_obj))
tags = TagsSelector('slow,stock,-slow')
self.assertFalse(tags.check(multiple_tags_obj))
tags = TagsSelector('')
self.assertFalse(tags.check(multiple_tags_standard_obj))
tags = TagsSelector('standard')
self.assertTrue(tags.check(multiple_tags_standard_obj))
tags = TagsSelector('slow')
self.assertTrue(tags.check(multiple_tags_standard_obj))
tags = TagsSelector('slow,fake')
self.assertTrue(tags.check(multiple_tags_standard_obj))
tags = TagsSelector('-slow')
self.assertFalse(tags.check(multiple_tags_standard_obj))
tags = TagsSelector('-standard')
self.assertFalse(tags.check(multiple_tags_standard_obj))
tags = TagsSelector('-slow,-standard')
self.assertFalse(tags.check(multiple_tags_standard_obj))
tags = TagsSelector('standard,-slow')
self.assertFalse(tags.check(multiple_tags_standard_obj))
tags = TagsSelector('slow,-standard')
self.assertFalse(tags.check(multiple_tags_standard_obj))
# Mimic the real post_install use case
# That uses a second tags selector
tags = TagsSelector('standard')
position = TagsSelector('post_install')
self.assertTrue(tags.check(post_install_obj) and position.check(post_install_obj))
| 35.624633 | 12,148 |
38,071 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import AccessError, ValidationError
from odoo.tools import mute_logger
from odoo.tools.translate import quote, unquote, xml_translate, html_translate
from odoo.tests.common import TransactionCase, BaseCase, new_test_user
from psycopg2 import IntegrityError
class TranslationToolsTestCase(BaseCase):
def assertItemsEqual(self, a, b, msg=None):
self.assertEqual(sorted(a), sorted(b), msg)
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEqual(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
def test_translate_xml_base(self):
""" Test xml_translate() without formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah blah blah</h1>
Put some more text here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah', 'Put some more text here'])
def test_translate_xml_text(self):
""" Test xml_translate() on plain text. """
terms = []
source = "Blah blah blah"
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms, [source])
def test_translate_xml_unicode(self):
""" Test xml_translate() on plain text with unicode characters. """
terms = []
source = u"Un heureux évènement"
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms, [source])
def test_translate_xml_text_entity(self):
""" Test xml_translate() on plain text with HTML escaped entities. """
terms = []
source = "Blah&nbsp;blah&nbsp;blah"
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms, [source])
def test_translate_xml_inline1(self):
""" Test xml_translate() with formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put some <b>more text</b> here'])
def test_translate_xml_inline2(self):
""" Test xml_translate() with formatting elements embedding other elements. """
terms = []
source = """<form string="Form stuff">
<b><h1>Blah <i>blah</i> blah</h1></b>
Put <em>some <b>more text</b></em> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put <em>some <b>more text</b></em> here'])
def test_translate_xml_inline3(self):
""" Test xml_translate() with formatting elements without actual text. """
terms = []
source = """<form string="Form stuff">
<div>
<span class="before"/>
<h1>Blah blah blah</h1>
<span class="after">
<i class="hack"/>
</span>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah'])
def test_translate_xml_inline4(self):
""" Test xml_translate() with inline elements with translated attrs only. """
terms = []
source = """<form string="Form stuff">
<div>
<label for="stuff"/>
<span class="fa fa-globe" title="Title stuff"/>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['Form stuff', '<span class="fa fa-globe" title="Title stuff"/>'])
def test_translate_xml_inline5(self):
""" Test xml_translate() with inline elements with empty translated attrs only. """
terms = []
source = """<form string="Form stuff">
<div>
<label for="stuff"/>
<span class="fa fa-globe" title=""/>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms, ['Form stuff'])
def test_translate_xml_t(self):
""" Test xml_translate() with t-* attributes. """
terms = []
source = """<t t-name="stuff">
stuff before
<span t-field="o.name"/>
stuff after
</t>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_off(self):
""" Test xml_translate() with attribute translate="off". """
terms = []
source = """<div>
stuff before
<div t-translation="off">Do not translate this</div>
stuff after
</div>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_attribute(self):
""" Test xml_translate() with <attribute> elements. """
terms = []
source = """<field name="foo" position="attributes">
<attribute name="string">Translate this</attribute>
<attribute name="option">Do not translate this</attribute>
</field>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['Translate this'])
def test_translate_xml_a(self):
""" Test xml_translate() with <a> elements. """
terms = []
source = """<t t-name="stuff">
<ul class="nav navbar-nav">
<li class="nav-item">
<a class="nav-link oe_menu_leaf" href="/web#menu_id=42&action=54">
<span class="oe_menu_text">Blah</span>
</a>
</li>
</ul>
</t>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['<span class="oe_menu_text">Blah</span>'])
def test_translate_xml_with_namespace(self):
""" Test xml_translate() on elements with namespaces. """
terms = []
# do not slit the long line below, otherwise the result will not match
source = """<Invoice xmlns:cac="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2" xmlns:cbc="urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2" xmlns="urn:oasis:names:specification:ubl:schema:xsd:Invoice-2">
<cbc:UBLVersionID t-esc="version_id"/>
<t t-foreach="[1, 2, 3, 4]" t-as="value">
Oasis <cac:Test t-esc="value"/>
</t>
</Invoice>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms, ['Oasis'])
result = xml_translate(lambda term: term, source)
self.assertEqual(result, source)
def test_translate_xml_invalid_translations(self):
""" Test xml_translate() with invalid translations. """
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
translations = {
"Put some <b>more text</b> here": "Mettre <b>plus de texte</i> ici",
}
expect = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Mettre <b>plus de texte ici
</b><field name="foo"/>
</form>"""
result = xml_translate(translations.get, source)
self.assertEqual(result, expect)
def test_translate_html(self):
""" Test html_translate(). """
source = """<blockquote>A <h2>B</h2> C</blockquote>"""
result = html_translate(lambda term: term, source)
self.assertEqual(result, source)
def test_translate_html_i(self):
""" Test xml_translate() and html_translate() with <i> elements. """
source = """<p>A <i class="fa-check"></i> B</p>"""
result = xml_translate(lambda term: term, source)
self.assertEqual(result, """<p>A <i class="fa-check"/> B</p>""")
result = html_translate(lambda term: term, source)
self.assertEqual(result, source)
class TestTranslation(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.env['res.lang']._activate_lang('fr_FR')
cls.env.ref('base.module_base')._update_translations(['fr_FR'])
cls.customers = cls.env['res.partner.category'].create({'name': 'Customers'})
cls.env['ir.translation'].create({
'type': 'model',
'name': 'res.partner.category,name',
'module': 'base',
'lang': 'fr_FR',
'res_id': cls.customers.id,
'value': 'Clients',
'state': 'translated',
})
def test_101_create_translated_record(self):
category = self.customers.with_context({})
self.assertEqual(category.name, 'Customers', "Error in basic name_get")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients', "Translation not found")
def test_102_duplicate_record(self):
category = self.customers.with_context({'lang': 'fr_FR'}).copy()
category_no = category.with_context({})
self.assertEqual(category_no.name, 'Customers', "Duplication did not set untranslated value")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients', "Did not found translation for initial value")
translation_fr = self.env['ir.translation'].search([
('name', '=', 'res.partner.category,name'),
('res_id', '=', category.id),
('lang', '=', 'fr_FR'),
])
self.assertEqual(translation_fr.src, 'Customers', "Did not set English version as source")
def test_103_duplicate_record_fr(self):
category = self.customers.with_context({'lang': 'fr_FR'}).copy({'name': 'Clients (copie)'})
self.assertEqual(category.env.context.get('lang'), 'fr_FR')
category_no = category.with_context({})
self.assertEqual(category_no.name, 'Clients (copie)', "Duplication should set untranslated value")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients (copie)', "Did not used default value for translated value")
translation_fr = self.env['ir.translation'].search([
('name', '=', 'res.partner.category,name'),
('res_id', '=', category.id),
('lang', '=', 'fr_FR'),
])
self.assertEqual(translation_fr.src, 'Clients (copie)', "Did not set new name as source")
def test_104_orderby_translated_field(self):
""" Test search ordered by a translated field. """
# create a category with a French translation
padawans = self.env['res.partner.category'].create({'name': 'Padawans'})
padawans_fr = padawans.with_context(lang='fr_FR')
padawans_fr.write({'name': 'Apprentis'})
# search for categories, and sort them by (translated) name
categories = padawans_fr.search([('id', 'in', [self.customers.id, padawans.id])], order='name')
self.assertEqual(categories.ids, [padawans.id, self.customers.id],
"Search ordered by translated name should return Padawans (Apprentis) before Customers (Clients)")
def test_105_duplicated_translation(self):
""" Test synchronizing translations with duplicated source """
# create a category with a French translation
padawans = self.env['res.partner.category'].create({'name': 'Padawan'})
self.env['ir.translation'].create({
'type': 'model',
'name': 'res.partner.category,name',
'module':'base',
'lang': 'fr_FR',
'res_id': padawans.id,
'value': 'Apprenti',
'state': 'translated',
})
# change name and insert a duplicate manually
padawans.write({'name': 'Padawans'})
with self.assertRaises(IntegrityError), mute_logger('odoo.sql_db'):
with self.env.cr.savepoint():
self.env['ir.translation'].create({
'type': 'model',
'name': 'res.partner.category,name',
'module':'base',
'lang': 'fr_FR',
'res_id': padawans.id,
'value': 'Apprentis',
'state': 'translated',
})
self.env['ir.translation'].translate_fields('res.partner.category', padawans.id, 'name')
translations = self.env['ir.translation'].search([
('res_id', '=', padawans.id), ('name', '=', 'res.partner.category,name'), ('lang', '=', 'fr_FR'),
])
self.assertEqual(len(translations), 1, "Translations were not duplicated after `translate_fields` call")
self.assertEqual(translations.value, "Apprenti", "The first translation must stay")
def test_106_en_us_translation(self):
""" Test synchronizing translations with duplicated source """
# create a category with a French translation
cheese = self.env['res.partner.category'].create({'name': 'Cheese'})
self.env['ir.translation'].translate_fields('res.partner.category', cheese.id, 'name')
translations = self.env['ir.translation'].search([('name', '=', 'res.partner.category,name'), ('res_id', '=', cheese.id)], order='lang')
self.assertEqual(len(translations), 2)
self.assertRecordValues(translations,
[{'lang': 'en_US', 'src': 'Cheese', 'value': ''},
{'lang': 'fr_FR', 'src': 'Cheese', 'value': ''}])
# Translate in both language
translations[0].value = 'The Cheese'
translations[1].value = 'Fromage'
# lang=None bypass translation system
self.assertEqual(cheese.with_context(lang=None).name, 'Cheese')
self.assertEqual(cheese.with_context(lang='fr_FR').name, 'Fromage')
self.assertEqual(cheese.with_context(lang='en_US').name, 'The Cheese')
cheese.flush()
# set a new master value
cheese.with_context(lang='en_US').write({'name': 'Delicious Cheese'})
# every src must be updated
self.assertEqual(cheese.with_context(lang=None).name, 'Delicious Cheese')
self.assertRecordValues(translations,
[{'lang': 'en_US', 'src': 'Delicious Cheese', 'value': 'Delicious Cheese'},
{'lang': 'fr_FR', 'src': 'Delicious Cheese', 'value': 'Fromage'}])
self.assertEqual(cheese.with_context(lang=None).name, 'Delicious Cheese')
self.assertEqual(cheese.with_context(lang='fr_FR').name, 'Fromage')
self.assertEqual(cheese.with_context(lang='en_US').name, 'Delicious Cheese')
def test_107_duplicate_record_en(self):
category = self.customers.with_context({'lang': 'en_US'}).copy()
category_no = category.with_context({})
self.assertEqual(category_no.name, 'Customers', "Duplication did not set untranslated value")
category_fr = category.with_context({'lang': 'fr_FR'})
self.assertEqual(category_fr.name, 'Clients', "Did not found translation for initial value")
translation_fr = self.env['ir.translation'].search([
('name', '=', 'res.partner.category,name'),
('res_id', '=', category.id),
('lang', '=', 'fr_FR'),
])
self.assertEqual(translation_fr.src, 'Customers', "Did not set English version as source")
class TestTranslationWrite(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.category = cls.env['res.partner.category'].create({'name': 'Reblochon'})
def test_01_en(self):
langs = self.env['res.lang'].get_installed()
self.assertEqual([('en_US', 'English (US)')], langs, "Test did not started with expected languages")
self.category.with_context(lang='en_US').write({'name': 'English Name'})
name = self.category.with_context(lang=None).read(['name'])
self.assertEqual(name[0]['name'], "English Name", "Reference field not updated")
translations = self.env['ir.translation'].search([
('name', '=', 'res.partner.category,name'),
('res_id', '=', self.category.id),
('lang', '=', 'en_US'),
])
self.assertEqual(len(translations), 0, "No English translation should be created when writing in English")
def test_02_en_translated(self):
langs = self.env['res.lang'].get_installed()
self.assertEqual([('en_US', 'English (US)')], langs, "Test did not started with expected languages")
translation = self.env['ir.translation'].create({
'type': 'model',
'name': 'res.partner.category,name',
'lang': 'en_US',
'res_id': self.category.id,
'src': 'Reblochon',
'value': 'Translated Name',
'state': 'translated',
})
self.category.with_context(lang='en_US').write({'name': 'English Name'})
translation_value = translation.read(['value'])
self.assertEqual(translation_value[0]['value'], "English Name", "Existing translation was not updated")
source_name = self.category.with_context(lang=None).read(['name'])
self.assertEqual(source_name[0]['name'], "English Name", "Reference field not updated")
def test_03_fr_single(self):
self.env['res.lang']._activate_lang('fr_FR')
self.env['res.partner'].with_context(active_test=False).search([]).write({'lang': 'fr_FR'})
self.env.ref('base.lang_en').active = False
langs = self.env['res.lang'].get_installed()
self.assertEqual([('fr_FR', 'French / Français')], langs, "Test did not started with expected languages")
self.category.with_context(lang='fr_FR').write({'name': 'French Name'})
source_name = self.category.with_context(lang=None).read(['name'])
self.assertEqual(source_name[0]['name'], "French Name", "Reference field not updated")
translations = self.env['ir.translation'].search([
('name', '=', 'res.partner.category,name'),
('res_id', '=', self.category.id),
('lang', '=', 'fr_FR'),
])
self.assertEqual(len(translations), 0, "No French translation should be created when writing in French")
def test_04_fr_multi(self):
self.env['res.lang']._activate_lang('fr_FR')
langs = self.env['res.lang'].get_installed()
self.assertEqual([('en_US', 'English (US)'), ('fr_FR', 'French / Français')], langs,
"Test did not started with expected languages")
self.env['ir.translation'].create({
'type': 'model',
'name': 'res.partner.category,name',
'lang': 'en_US',
'res_id': self.category.id,
'src': 'Reblochon',
'value': 'Translated Name',
'state': 'translated',
})
self.category.with_context(lang='fr_FR').write({'name': 'French Name'})
self.category.with_context(lang='en_US').write({'name': 'English Name'})
translations = self.env['ir.translation'].search([
('name', '=', 'res.partner.category,name'),
('res_id', '=', self.category.id),
], order='lang')
self.assertRecordValues(translations, [
{'src': 'English Name', 'value': 'English Name', 'lang': 'en_US'},
{'src': 'English Name', 'value': 'French Name', 'lang': 'fr_FR'}
])
def test_04_fr_multi_no_en(self):
self.env['res.lang']._activate_lang('fr_FR')
self.env['res.lang']._activate_lang('es_ES')
self.env['res.partner'].with_context(active_test=False).search([]).write({'lang': 'fr_FR'})
self.env.ref('base.lang_en').active = False
langs = self.env['res.lang'].get_installed()
self.assertEqual([('fr_FR', 'French / Français'), ('es_ES', 'Spanish / Español')], langs,
"Test did not start with the expected languages")
self.category.with_context(lang='fr_FR').write({'name': 'French Name'})
self.category.with_context(lang='es_ES').write({'name': 'Spanish Name'})
self.category.with_context(lang=None).write({'name': 'None Name'})
translations = self.env['ir.translation'].search([
('name', '=', 'res.partner.category,name'),
('res_id', '=', self.category.id),
], order='lang')
self.assertRecordValues(translations, [
{'src': 'None Name', 'value': 'Spanish Name', 'lang': 'es_ES'},
{'src': 'None Name', 'value': 'French Name', 'lang': 'fr_FR'},
])
def test_05_remove_multi_empty_string(self):
self._test_05_remove_multi("")
def test_05_remove_multi_false(self):
self._test_05_remove_multi(False)
def _test_05_remove_multi(self, empty_value):
self.env['res.lang']._activate_lang('fr_FR')
langs = self.env['res.lang'].get_installed()
self.assertEqual([('en_US', 'English (US)'), ('fr_FR', 'French / Français')], langs,
"Test did not started with expected languages")
belgium = self.env.ref('base.be')
# vat_label is translatable and not required
belgium.with_context(lang='en_US').write({'vat_label': 'VAT'})
belgium.with_context(lang='fr_FR').write({'vat_label': 'TVA'})
translations = self.env['ir.translation'].search([
('name', '=', 'res.country,vat_label'),
('res_id', '=', belgium.id),
])
self.assertEqual(len(translations), 2, "Translations are not created")
# remove the value
belgium.with_context(lang='fr_FR').write({'vat_label': empty_value})
# should recover the initial value from db
self.assertFalse(
belgium.with_context(lang='fr_FR').vat_label,
"Value was not reset"
)
self.assertFalse(
belgium.with_context(lang='en_US').vat_label,
"Value was not reset in other languages"
)
self.assertFalse(
belgium.with_context(lang=None).vat_label,
"Value was not reset on the field model"
)
translations = self.env['ir.translation'].search([
('name', '=', 'res.country,vat_label'),
('res_id', '=', belgium.id),
])
self.assertEqual(len(translations), 0, "Translations were not removed")
# simulate remove the English translation in the interface
belgium.with_context(lang='fr_FR').write({'vat_label': 'TVA'})
belgium.with_context(lang='en_US').write({'vat_label': 'VAT'})
self.env['ir.translation'].translate_fields('res.country', belgium.id, 'vat_label')
en_translation = self.env['ir.translation'].search([
('name', '=', 'res.country,vat_label'),
('res_id', '=', belgium.id),
('lang', '=', 'en_US'),
])
en_translation.write({'value': ''})
# should recover the initial value from db
self.assertEqual(
"TVA", belgium.with_context(lang='fr_FR').vat_label,
"French translation was not kept"
)
self.assertEqual(
"VAT", belgium.with_context(lang='en_US').vat_label,
"Did not fallback to source when reset"
)
def test_orphan(self):
""" What happens with orphan translations. """
self.env['res.lang']._activate_lang('fr_FR')
# create a user with access rights on partner categories
user = new_test_user(self.env, 'deleter')
group = self.env.ref('base.group_partner_manager')
user.groups_id = [(4, group.id)]
# this access rule triggers a MissingError
self.env['ir.rule'].create({
'model_id': self.env['ir.model']._get_id('res.partner.category'),
'groups': [(4, group.id)],
'domain_force': "[('name', 'ilike', 'e')]",
})
# create a translation, and delete the record from the database
translation = self.env['ir.translation'].create({
'type': 'model',
'name': 'res.partner.category,name',
'lang': 'fr_FR',
'res_id': self.category.id,
'src': 'Reblochon',
'value': 'Parfum Exquis',
'state': 'translated',
})
translation.flush()
translation.invalidate_cache()
self.cr.execute("DELETE FROM res_partner_category WHERE id=%s", [self.category.id])
# deleting the translation should be possible, provided the user has
# access rights on the translation's model
user0 = new_test_user(self.env, 'cannot modify category')
with self.assertRaises(AccessError):
translation.with_user(user0).unlink()
translation.with_user(user).unlink()
# however, creating orphan translations should not be possible
with self.assertRaises(ValidationError):
translation.with_user(user).create({
'type': 'model',
'name': 'res.partner.category,name',
'lang': 'fr_FR',
'res_id': self.category.id,
'src': 'Reblochon',
'value': 'Parfum Exquis',
'state': 'translated',
})
def test_write(self):
""" What happens with orphan translations. """
self.env['res.lang']._activate_lang('fr_FR')
# create a user with access rights on partner categories
user = new_test_user(self.env, 'updater')
group = self.env.ref('base.group_system')
user.groups_id = [(4, group.id)]
action = user.env["ir.actions.act_window"].create({
"name": "Dummy Action",
"res_model": "res.users",
"help": "<p>Cheese</p>",
})
# create a translation, and delete the record from the database
translation = user.env['ir.translation'].create({
'type': 'model_terms',
'name': 'ir.actions.act_window,help',
'lang': 'fr_FR',
'res_id': action.id,
'src': 'Cheese',
'value': 'Fromage',
'state': 'translated',
})
translation.flush()
translation.invalidate_cache()
# deleting the translation should be possible, provided the user has
# access rights on the translation's model
user0 = new_test_user(self.env, 'cannot modify an action')
with self.assertRaises(AccessError):
translation.with_user(user0).unlink()
translation.with_user(user).unlink()
def test_field_selection(self):
""" Test translations of field selections. """
field = self.env['ir.model']._fields['state']
self.assertEqual([key for key, _ in field.selection], ['manual', 'base'])
ir_field = self.env['ir.model.fields']._get('ir.model', 'state')
ir_field = ir_field.with_context(lang='fr_FR')
ir_field.selection_ids[0].name = 'Custo'
ir_field.selection_ids[1].name = 'Pas touche!'
fg = self.env['ir.model'].fields_get(['state'])
self.assertEqual(fg['state']['selection'], field.selection)
fg = self.env['ir.model'].with_context(lang='fr_FR').fields_get(['state'])
self.assertEqual(fg['state']['selection'],
[('manual', 'Custo'), ('base', 'Pas touche!')])
def test_fields_view_get(self):
""" Test translations of field descriptions in fields_view_get(). """
self.env['res.lang']._activate_lang('fr_FR')
# add translation for the string of field ir.model.name
ir_model_field = self.env['ir.model.fields']._get('ir.model', 'name')
LABEL = "Description du Modèle"
self.env['ir.translation'].create({
'type': 'model',
'name': 'ir.model.fields,field_description',
'lang': 'fr_FR',
'res_id': ir_model_field.id,
'src': 'Name',
'value': LABEL,
})
# check that fields_get() returns the expected label
model = self.env['ir.model'].with_context(lang='fr_FR')
info = model.fields_get(['name'])
self.assertEqual(info['name']['string'], LABEL)
# check that fields_view_get() also returns the expected label
info = model.fields_view_get()['fields']
self.assertEqual(info['name']['string'], LABEL)
class TestXMLTranslation(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.env['res.lang']._activate_lang('fr_FR')
cls.env['res.lang']._activate_lang('nl_NL')
cls.env.ref('base.module_base')._update_translations(['fr_FR', 'nl_NL'])
def create_view(self, archf, terms, **kwargs):
view = self.env['ir.ui.view'].create({
'name': 'test',
'model': 'res.partner',
'arch': archf % terms,
})
# DLE P70: `_sync_terms_translations`, which delete translations for which there is no value, is called sooner than before
# because it's called in `_write`, which is called by `flush`, which is called by the `search`.
# `arch_db` is in `_write` instead of `create` because `arch_db` is the inverse of `arch`.
# We need to flush `arch_db` before creating the translations otherwise the translation for which there is no value will be deleted,
# while the `test_sync_update` specifically needs empty translations
view.flush()
for lang, trans_terms in kwargs.items():
for src, val in zip(terms, trans_terms):
self.env['ir.translation'].create({
'type': 'model_terms',
'name': 'ir.ui.view,arch_db',
'lang': lang,
'res_id': view.id,
'src': src,
'value': val,
'state': 'translated',
})
return view
def test_copy(self):
""" Create a simple view, fill in translations, and copy it. """
archf = '<form string="%s"><div>%s</div><div>%s</div></form>'
terms_en = ('Knife', 'Fork', 'Spoon')
terms_fr = ('Couteau', 'Fourchette', 'Cuiller')
view0 = self.create_view(archf, terms_en, fr_FR=terms_fr)
env_en = self.env(context={})
env_fr = self.env(context={'lang': 'fr_FR'})
# check translated field
self.assertEqual(view0.with_env(env_en).arch_db, archf % terms_en)
self.assertEqual(view0.with_env(env_fr).arch_db, archf % terms_fr)
# copy without lang
view1 = view0.with_env(env_en).copy({})
self.assertEqual(view1.with_env(env_en).arch_db, archf % terms_en)
self.assertEqual(view1.with_env(env_fr).arch_db, archf % terms_fr)
# copy with lang='fr_FR'
view2 = view0.with_env(env_fr).copy({})
self.assertEqual(view2.with_env(env_en).arch_db, archf % terms_en)
self.assertEqual(view2.with_env(env_fr).arch_db, archf % terms_fr)
# copy with lang='fr_FR' and translate=html_translate
self.patch(type(self.env['ir.ui.view']).arch_db, 'translate', html_translate)
view3 = view0.with_env(env_fr).copy({})
self.assertEqual(view3.with_env(env_en).arch_db, archf % terms_en)
self.assertEqual(view3.with_env(env_fr).arch_db, archf % terms_fr)
def test_spaces(self):
""" Create translations where value has surrounding spaces. """
archf = '<form string="%s"><div>%s</div><div>%s</div></form>'
terms_en = ('Knife', 'Fork', 'Spoon')
terms_fr = (' Couteau', 'Fourchette ', ' Cuiller ')
self.create_view(archf, terms_en, fr_FR=terms_fr)
def test_sync(self):
""" Check translations after minor change in source terms. """
archf = '<form string="X">%s</form>'
terms_en = ('Bread and cheeze',)
terms_fr = ('Pain et fromage',)
terms_nl = ('Brood and kaas',)
view = self.create_view(archf, terms_en, en_US=terms_en, fr_FR=terms_fr, nl_NL=terms_nl)
env_nolang = self.env(context={})
env_en = self.env(context={'lang': 'en_US'})
env_fr = self.env(context={'lang': 'fr_FR'})
env_nl = self.env(context={'lang': 'nl_NL'})
self.assertEqual(view.with_env(env_nolang).arch_db, archf % terms_en)
self.assertEqual(view.with_env(env_en).arch_db, archf % terms_en)
self.assertEqual(view.with_env(env_fr).arch_db, archf % terms_fr)
self.assertEqual(view.with_env(env_nl).arch_db, archf % terms_nl)
# modify source term in view (fixed type in 'cheeze')
terms_en = ('Bread and cheese',)
view.with_env(env_en).write({'arch_db': archf % terms_en})
# check whether translations have been synchronized
self.assertEqual(view.with_env(env_nolang).arch_db, archf % terms_en)
self.assertEqual(view.with_env(env_en).arch_db, archf % terms_en)
self.assertEqual(view.with_env(env_fr).arch_db, archf % terms_fr)
self.assertEqual(view.with_env(env_nl).arch_db, archf % terms_nl)
view = self.create_view(archf, terms_fr, en_US=terms_en, fr_FR=terms_fr, nl_NL=terms_nl)
# modify source term in view in another language with close term
new_terms_fr = ('Pains et fromage',)
view.with_env(env_fr).write({'arch_db': archf % new_terms_fr})
# check whether translations have been synchronized
self.assertEqual(view.with_env(env_nolang).arch_db, archf % new_terms_fr)
self.assertEqual(view.with_env(env_en).arch_db, archf % terms_en)
self.assertEqual(view.with_env(env_fr).arch_db, archf % new_terms_fr)
self.assertEqual(view.with_env(env_nl).arch_db, archf % terms_nl)
def test_sync_update(self):
""" Check translations after major changes in source terms. """
archf = '<form string="X"><div>%s</div><div>%s</div></form>'
terms_src = ('Subtotal', 'Subtotal:')
terms_en = ('', 'Sub total:')
view = self.create_view(archf, terms_src, en_US=terms_en)
translations = self.env['ir.translation'].search([
('type', '=', 'model_terms'),
('name', '=', "ir.ui.view,arch_db"),
('res_id', '=', view.id),
])
self.assertEqual(len(translations), 2)
# modifying the arch should sync existing translations without errors
new_arch = archf % ('Subtotal', 'Subtotal:<br/>')
view.write({"arch_db": new_arch})
translations = self.env['ir.translation'].search([
('type', '=', 'model_terms'),
('name', '=', "ir.ui.view,arch_db"),
('res_id', '=', view.id),
])
# 'Subtotal' being src==value, it will be discared
# 'Subtotal:' will be discarded as it match 'Subtotal' instead of 'Subtotal:<br/>'
self.assertEqual(len(translations), 0)
def test_cache_consistency(self):
view = self.env["ir.ui.view"].create({
"name": "test_translate_xml_cache_invalidation",
"model": "res.partner",
"arch": "<form><b>content</b></form>",
})
view_fr = view.with_context({"lang": "fr_FR"})
self.assertIn("<b>", view.arch_db)
self.assertIn("<b>", view_fr.arch_db)
# write with no lang, and check consistency in other languages
view.write({"arch_db": "<form><i>content</i></form>"})
self.assertIn("<i>", view.arch_db)
self.assertIn("<i>", view_fr.arch_db)
| 44.674883 | 38,063 |
8,430 | py | PYTHON | 15.0 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tools import cloc
from odoo.tests import TransactionCase, tagged
XML_TEST = """<!-- Comment -->
<?xml version="1.0" encoding="UTF-8"?>
<odoo>
<node>Line</node>
<!-- Comment -->
<node>Line</node>
<!-- Comment
Multi
Line -->
<![CDATA[
Line
]]>
<![CDATA[
<!-- comment in CDATA -->
cdata Line
yes6]]>
<![CDATA[<!-- not a comment-->]]>
<![CDATA[<!-- not a comment
but counted as is
-->]]>
<!-- <![CDATA[ This is a valid comment ]]> -->
<!-- <![CDATA[ Multi line
comment]]> -->
<record id="my_id" model="model">
<field name="name">name</field>
</record>
<![CDATA[ <!-- no a comment]]>
<node>not a comment but found as is</node>
<!-- comment -->
<node>After closed comment back to normal</node>
</odoo>
"""
PY_TEST_NO_RETURN = '''line = 1
line = 2'''
PY_TEST = '''
# comment 1
def func(): # eol comment 3
""" docstring
"""
pass
def query():
long_query = """
SELECT *
FROM table
WHERE id = 1;
"""
return query
print(i.lineno, i, getattr(i,'s',None), getattr(i,'value',None))
'''
JS_TEST = '''
/*
comment
*/
function() {
return 1+2; // comment
}
function() {
hello = 4; /*
comment
*/
console.log(hello);
regex = /\/*h/;
legit_code_counted = 1;
regex2 = /.*/;
}
'''
CSS_TEST = '''
/*
Comment
*/
p {
text-align: center;
color: red;
text-overflow: ' /* ';
}
#content, #footer, #supplement {
position: absolute;
left: 510px;
width: 200px;
text-overflow: ' */ ';
}
'''
SCSS_TEST = '''
/*
Comment
*/
// Standalone list views
.o_content > .o_list_view > .table-responsive > .table {
// List views always have the table-sm class, maybe we should remove
// it (and consider it does not exist) and change the default table paddings
@include o-list-view-full-width-padding($base: $table-cell-padding-sm, $ratio: 2);
&:not(.o_list_table_grouped) {
@include media-breakpoint-up(xl) {
@include o-list-view-full-width-padding($base: $table-cell-padding-sm, $ratio: 2.5);
}
}
.o_optional_columns_dropdown_toggle {
padding: 8px 10px;
}
}
#content, #footer, #supplement {
text-overflow: '/*';
left: 510px;
width: 200px;
text-overflow: '*/';
}
'''
class TestClocCustomization(TransactionCase):
def create_xml_id(self, module, name, rec):
self.env['ir.model.data'].create({
'name': name,
'model': rec._name,
'res_id': rec.id,
'module': module,
})
def create_field(self, name):
field = self.env['ir.model.fields'].with_context(studio=True).create({
'name': name,
'field_description': name,
'model': 'res.partner',
'model_id': self.env.ref('base.model_res_partner').id,
'ttype': 'integer',
'store': False,
'compute': "for rec in self: rec['x_invoice_count'] = 10",
})
# Simulate the effect of https://github.com/odoo/odoo/commit/9afce4805fc8bac45fdba817488aa867fddff69b
# Updating a module create xml_id of the module even for manual field if it's the original module
# of the model
self.create_xml_id('base', name, field)
return field
def create_server_action(self, name):
return self.env['ir.actions.server'].create({
'name': name,
'code': """
for rec in records:
rec['name'] = test
""",
'state': 'code',
'type': 'ir.actions.server',
'model_id': self.env.ref('base.model_res_partner').id,
})
def test_ignore_auto_generated_computed_field(self):
"""
Check that we count custom fields with no module or studio not auto generated
Having an xml_id but no existing module is consider as not belonging to a module
"""
f1 = self.create_field('x_invoice_count')
self.create_xml_id('studio_customization', 'invoice_count', f1)
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 0, 'Studio auto generated count field should not be counted in cloc')
f2 = self.create_field('x_studio_custom_field')
self.create_xml_id('studio_customization', 'studio_custom', f2)
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 1, 'Count other studio computed field')
self.create_field('x_custom_field')
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 2, 'Count fields without xml_id')
f4 = self.create_field('x_custom_field_export')
self.create_xml_id('__export__', 'studio_custom', f4)
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 3, 'Count fields with xml_id but without module')
def test_several_xml_id(self):
sa = self.create_server_action("Test double xml_id")
self.create_xml_id("__export__", "first", sa)
self.create_xml_id("base", "second", sa)
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 2, 'Count Should count SA with a non standard xml_id')
self.create_xml_id("__import__", "third", sa)
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 2, 'SA with several xml_id should be counted only once')
def test_cloc_exclude_xml_id(self):
sa = self.create_server_action("Test double xml_id")
self.create_xml_id("__cloc_exclude__", "sa_first", sa)
self.create_xml_id("__upgrade__", "sa_second", sa)
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 0, 'Should not count SA with cloc_exclude xml_id')
f1 = self.create_field('x_invoice_count')
self.create_xml_id("__cloc_exclude__", "field_first", f1)
self.create_xml_id("__upgrade__", "field_second", f1)
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 0, 'Should not count Field with cloc_exclude xml_id')
def test_field_no_xml_id(self):
self.env['ir.model.fields'].create({
'name': "x_no_xml_id",
'field_description': "no_xml_id",
'model': 'res.partner',
'model_id': self.env.ref('base.model_res_partner').id,
'ttype': 'integer',
'store': False,
'compute': "for rec in self: rec['x_invoice_count'] = 10",
})
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 1, 'Should count field with no xml_id at all')
class TestClocParser(TransactionCase):
def test_parser(self):
cl = cloc.Cloc()
xml_count = cl.parse_xml(XML_TEST)
self.assertEqual(xml_count, (18, 31))
py_count = cl.parse_py(PY_TEST_NO_RETURN)
self.assertEqual(py_count, (2, 2))
py_count = cl.parse_py(PY_TEST)
if self._python_version >= (3, 8, 0):
# Multi line str lineno return the begining of the str
# in python 3.8, it result in a different count for
# multi str used in expressions
self.assertEqual(py_count, (7, 16))
else:
self.assertEqual(py_count, (8, 16))
js_count = cl.parse_js(JS_TEST)
self.assertEqual(js_count, (10, 17))
css_count = cl.parse_css(CSS_TEST)
self.assertEqual(css_count, (11, 17))
scss_count = cl.parse_scss(SCSS_TEST)
self.assertEqual(scss_count, (17, 26))
@tagged('post_install', '-at_install')
class TestClocStdNoCusto(TransactionCase):
def test_no_custo_install(self):
"""
Make sure after the installation of module
no database customization is counted
"""
cl = cloc.Cloc()
cl.count_customization(self.env)
self.assertEqual(cl.code.get('odoo/studio', 0), 0, 'Module should not generate customization in database')
| 31.573034 | 8,430 |
1,338 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
class TestMenu(TransactionCase):
def test_00_menu_deletion(self):
"""Verify that menu deletion works properly when there are child menus, and those
are indeed made orphans"""
Menu = self.env['ir.ui.menu']
root = Menu.create({'name': 'Test root'})
child1 = Menu.create({'name': 'Test child 1', 'parent_id': root.id})
child2 = Menu.create({'name': 'Test child 2', 'parent_id': root.id})
child21 = Menu.create({'name': 'Test child 2-1', 'parent_id': child2.id})
all_ids = [root.id, child1.id, child2.id, child21.id]
# delete and check that direct children are promoted to top-level
# cfr. explanation in menu.unlink()
root.unlink()
# Generic trick necessary for search() calls to avoid hidden menus
Menu = self.env['ir.ui.menu'].with_context({'ir.ui.menu.full_list': True})
remaining = Menu.search([('id', 'in', all_ids)], order="id")
self.assertEqual([child1.id, child2.id, child21.id], remaining.ids)
orphans = Menu.search([('id', 'in', all_ids), ('parent_id', '=', False)], order="id")
self.assertEqual([child1.id, child2.id], orphans.ids)
| 44.6 | 1,338 |
1,273 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
from odoo.tools import get_cache_key_counter
class TestOrmcache(TransactionCase):
def test_ormcache(self):
""" Test the effectiveness of the ormcache() decorator. """
IMD = self.env['ir.model.data']
XMLID = 'base.group_no_one'
# retrieve the cache, its key and stat counter
cache, key, counter = get_cache_key_counter(IMD._xmlid_lookup, XMLID)
hit = counter.hit
miss = counter.miss
# clear the caches of ir.model.data, retrieve its key and
IMD.clear_caches()
self.assertNotIn(key, cache)
# lookup some reference
self.env.ref(XMLID)
self.assertEqual(counter.hit, hit)
self.assertEqual(counter.miss, miss + 1)
self.assertIn(key, cache)
# lookup again
self.env.ref(XMLID)
self.assertEqual(counter.hit, hit + 1)
self.assertEqual(counter.miss, miss + 1)
self.assertIn(key, cache)
# lookup again
self.env.ref(XMLID)
self.assertEqual(counter.hit, hit + 2)
self.assertEqual(counter.miss, miss + 1)
self.assertIn(key, cache)
| 32.641026 | 1,273 |
2,192 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import functools
from odoo.tests.common import BaseCase
from odoo.tools import frozendict
from odoo.tools.func import compose
from odoo import Command
class TestCompose(BaseCase):
def test_basic(self):
str_add = compose(str, lambda a, b: a + b)
self.assertEqual(str_add(1, 2), "3")
def test_decorator(self):
""" ensure compose() can be partially applied as a decorator
"""
@functools.partial(compose, str)
def mul(a, b):
return a * b
self.assertEqual(mul(5, 42), u"210")
class TestFrozendict(BaseCase):
def test_frozendict_immutable(self):
""" Ensure that a frozendict is immutable. """
vals = {'name': 'Joe', 'age': 42}
frozen_vals = frozendict(vals)
# check __setitem__, __delitem__
with self.assertRaises(Exception):
frozen_vals['surname'] = 'Jack'
with self.assertRaises(Exception):
frozen_vals['name'] = 'Jack'
with self.assertRaises(Exception):
del frozen_vals['name']
# check update, setdefault, pop, popitem, clear
with self.assertRaises(Exception):
frozen_vals.update({'surname': 'Jack'})
with self.assertRaises(Exception):
frozen_vals.update({'name': 'Jack'})
with self.assertRaises(Exception):
frozen_vals.setdefault('surname', 'Jack')
with self.assertRaises(Exception):
frozen_vals.pop('surname', 'Jack')
with self.assertRaises(Exception):
frozen_vals.pop('name', 'Jack')
with self.assertRaises(Exception):
frozen_vals.popitem()
with self.assertRaises(Exception):
frozen_vals.clear()
def test_frozendict_hash(self):
""" Ensure that a frozendict is hashable. """
# dict with simple values
hash(frozendict({'name': 'Joe', 'age': 42}))
# dict with tuples, lists, and embedded dicts
hash(frozendict({
'user_id': (42, 'Joe'),
'line_ids': [Command.create({'values': [42]})],
}))
| 33.212121 | 2,192 |
6,033 | py | PYTHON | 15.0 | import base64
import unittest
try:
import magic
except ImportError:
magic = None
from odoo.tests.common import BaseCase
from odoo.tools.mimetypes import guess_mimetype, get_extension
PNG = b'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVQI12P4//8/AAX+Av7czFnnAAAAAElFTkSuQmCC'
GIF = b"R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs="
BMP = b"""Qk1+AAAAAAAAAHoAAABsAAAAAQAAAAEAAAABABgAAAAAAAQAAAATCwAAEwsAAAAAAAAAAAAAQkdScwAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAD///8A"""
JPG = """/9j/4AAQSkZJRgABAQEASABIAAD//gATQ3JlYXRlZCB3aXRoIEdJTVD/2wBDAP
//////////////////////////////////////////////////////////////////////////////////////2wBDAf///////
///////////////////////////////////////////////////////////////////////////////wgARCAABAAEDAREAAhEB
AxEB/8QAFAABAAAAAAAAAAAAAAAAAAAAAv/EABQBAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhADEAAAAUf/xAAUEAEAAAAAAAA
AAAAAAAAAAAAA/9oACAEBAAEFAn//xAAUEQEAAAAAAAAAAAAAAAAAAAAA/9oACAEDAQE/AX//xAAUEQEAAAAAAAAAAAAAAAAAAA
AA/9oACAECAQE/AX//xAAUEAEAAAAAAAAAAAAAAAAAAAAA/9oACAEBAAY/An//xAAUEAEAAAAAAAAAAAAAAAAAAAAA/9oACAEBA
AE/IX//2gAMAwEAAgADAAAAEB//xAAUEQEAAAAAAAAAAAAAAAAAAAAA/9oACAEDAQE/EH//xAAUEQEAAAAAAAAAAAAAAAAAAAAA
/9oACAECAQE/EH//xAAUEAEAAAAAAAAAAAAAAAAAAAAA/9oACAEBAAE/EH//2Q=="""
SVG = b"""PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iaXNvLTg4NTktMSI/PjwhRE9DVFlQRSBzdmcgUFVCTElDICItLy9XM0MvL0RURCBTVkcgMjAwMDExMDIvL0VOIlxuICJodHRwOi8vd3d3LnczLm9yZy9UUi8yMDAwL0NSLVNWRy0yMDAwMTEwMi9EVEQvc3ZnLTIwMDAxMTAyLmR0ZCI+PHN2ZyB3aWR0aD0iMTAwJSIgaGVpZ2h0PSIxMDAlIj48ZyB0cmFuc2Zvcm09InRyYW5zbGF0ZSg1MCw1MCkiPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxNTAiIGhlaWdodD0iNTAiIHN0eWxlPSJmaWxsOnJlZDsiIC8+PC9nPjwvc3ZnPg=="""
NAMESPACED_SVG = b"""<svg:svg xmlns:svg="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
<svg:rect x="10" y="10" width="80" height="80" fill="green" />
</svg:svg>"""
# minimal zip file with an empty `t.txt` file
ZIP = b"""UEsDBBQACAAIAGFva1AAAAAAAAAAAAAAAAAFACAAdC50eHRVVA0AB5bgaF6W4GheluBoXnV4CwABBOgDAAAE6AMAAA
MAUEsHCAAAAAACAAAAAAAAAFBLAQIUAxQACAAIAGFva1AAAAAAAgAAAAAAAAAFACAAAAAAAAAAAACkgQAAAAB0LnR4dFVUDQAHlu
BoXpbgaF6W4GhedXgLAAEE6AMAAAToAwAAUEsFBgAAAAABAAEAUwAAAFUAAAAAAA=="""
XML = b"""<?xml version='1.0' encoding='utf-8'?>
<Document xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="urn:iso:std:iso:20022:tech:xsd:pain.001.001.03">
<CstmrCdtTrfInitn>
<GrpHdr>
<MsgId>123456Odoo S.A.893873733</MsgId>
<CreDtTm>2018-11-21T09:47:32</CreDtTm>
<NbOfTxs>0</NbOfTxs>
<CtrlSum>0.0</CtrlSum>
<InitgPty>
<Nm>Odoo S.A.</Nm>
<Id>
<OrgId>
<Othr>
<Id>BE0477472701</Id>
<Issr>KBO-BCE</Issr>
</Othr>
</OrgId>
</Id>
</InitgPty>
</GrpHdr>
</CstmrCdtTrfInitn>
</Document>
"""
class test_guess_mimetype(BaseCase):
def test_default_mimetype_empty(self):
mimetype = guess_mimetype(b'')
# odoo implementation returns application/octet-stream by default
# if available, python-magic returns application/x-empty
self.assertIn(mimetype, ('application/octet-stream', 'application/x-empty'))
def test_default_mimetype(self):
mimetype = guess_mimetype(b'', default='test')
# if available, python-magic returns application/x-empty
self.assertIn(mimetype, ('test', 'application/x-empty'))
def test_mimetype_octet_stream(self):
mimetype = guess_mimetype(b'\0')
self.assertEqual(mimetype, 'application/octet-stream')
def test_mimetype_png(self):
content = base64.b64decode(PNG)
mimetype = guess_mimetype(content, default='test')
self.assertEqual(mimetype, 'image/png')
def test_mimetype_bmp(self):
content = base64.b64decode(BMP)
mimetype = guess_mimetype(content, default='test')
# mimetype should match image/bmp, image/x-ms-bmp, ...
self.assertRegex(mimetype, r'image/.*\bbmp')
def test_mimetype_jpg(self):
content = base64.b64decode(JPG)
mimetype = guess_mimetype(content, default='test')
self.assertEqual(mimetype, 'image/jpeg')
def test_mimetype_gif(self):
content = base64.b64decode(GIF)
mimetype = guess_mimetype(content, default='test')
self.assertEqual(mimetype, 'image/gif')
def test_mimetype_svg(self):
content = base64.b64decode(SVG)
mimetype = guess_mimetype(content, default='test')
self.assertTrue(mimetype.startswith('image/svg'))
mimetype = guess_mimetype(NAMESPACED_SVG, default='test')
self.assertTrue(mimetype.startswith('image/svg'))
# Tests that whitespace padded SVG are not detected as SVG in odoo implementation
if not magic:
mimetype = guess_mimetype(b" " + content, default='test')
self.assertNotIn("svg", mimetype)
def test_mimetype_zip(self):
content = base64.b64decode(ZIP)
mimetype = guess_mimetype(content, default='test')
self.assertEqual(mimetype, 'application/zip')
def test_mimetype_xml(self):
mimetype = guess_mimetype(XML, default='test')
self.assertEqual(mimetype, 'application/xml')
def test_mimetype_get_extension(self):
self.assertEqual(get_extension('filename.Abc'), '.abc')
self.assertEqual(get_extension('filename.scss'), '.scss')
self.assertEqual(get_extension('filename.torrent'), '.torrent')
self.assertEqual(get_extension('.htaccess'), '.htaccess')
# enough to suppose that extension is present and don't suffix the filename
self.assertEqual(get_extension('filename.tar.gz'), '.gz')
self.assertEqual(get_extension('filename'), '')
self.assertEqual(get_extension('filename.'), '')
self.assertEqual(get_extension('filename.not_alnum'), '')
self.assertEqual(get_extension('filename.with space'), '')
self.assertEqual(get_extension('filename.notAnExtension'), '')
if __name__ == '__main__':
unittest.main()
| 45.360902 | 6,033 |
7,843 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from xmlrpc.client import Binary
from odoo.exceptions import AccessDenied, AccessError
from odoo.http import _request_stack
import odoo.tools
from odoo.tests import common
from odoo.service import common as auth, model
from odoo.tools import DotDict
@common.tagged('post_install', '-at_install')
class TestXMLRPC(common.HttpCase):
def setUp(self):
super(TestXMLRPC, self).setUp()
self.admin_uid = self.env.ref('base.user_admin').id
def xmlrpc(self, model, method, *args, **kwargs):
return self.xmlrpc_object.execute_kw(
common.get_db_name(), self.admin_uid, 'admin',
model, method, args, kwargs
)
def test_01_xmlrpc_login(self):
""" Try to login on the common service. """
db_name = common.get_db_name()
uid = self.xmlrpc_common.login(db_name, 'admin', 'admin')
self.assertEqual(uid, self.admin_uid)
def test_xmlrpc_ir_model_search(self):
""" Try a search on the object service. """
o = self.xmlrpc_object
db_name = common.get_db_name()
ids = o.execute(db_name, self.admin_uid, 'admin', 'ir.model', 'search', [])
self.assertIsInstance(ids, list)
ids = o.execute(db_name, self.admin_uid, 'admin', 'ir.model', 'search', [], {})
self.assertIsInstance(ids, list)
def test_xmlrpc_read_group(self):
groups = self.xmlrpc_object.execute(
common.get_db_name(), self.admin_uid, 'admin',
'res.partner', 'read_group', [], ['is_company', 'color'], ['parent_id']
)
def test_xmlrpc_name_search(self):
self.xmlrpc_object.execute(
common.get_db_name(), self.admin_uid, 'admin',
'res.partner', 'name_search', "admin"
)
def test_xmlrpc_html_field(self):
sig = '<p>bork bork bork <span style="font-weight: bork">bork</span><br></p>'
r = self.env['res.users'].create({
'name': 'bob',
'login': 'bob',
'signature': sig
})
self.assertEqual(str(r.signature), sig)
[x] = self.xmlrpc('res.users', 'read', r.id, ['signature'])
self.assertEqual(x['signature'], sig)
def test_xmlrpc_frozendict_marshalling(self):
""" Test that the marshalling of a frozendict object works properly over XMLRPC """
ctx = self.xmlrpc_object.execute(
common.get_db_name(), self.admin_uid, 'admin',
'res.users', 'context_get',
)
self.assertEqual(ctx['lang'], 'en_US')
self.assertEqual(ctx['tz'], 'Europe/Brussels')
def test_jsonrpc_read_group(self):
self._json_call(
common.get_db_name(), self.admin_uid, 'admin',
'res.partner', 'read_group', [], ['is_company', 'color'], ['parent_id']
)
def test_jsonrpc_name_search(self):
# well that's some sexy sexy call right there
self._json_call(
common.get_db_name(),
self.admin_uid, 'admin',
'res.partner', 'name_search', 'admin'
)
def _json_call(self, *args):
self.opener.post("http://%s:%s/jsonrpc" % (common.HOST, odoo.tools.config['http_port']), json={
'jsonrpc': '2.0',
'id': None,
'method': 'call',
'params': {
'service': 'object',
'method': 'execute',
'args': args
}
})
def test_xmlrpc_attachment_raw(self):
ids = self.env['ir.attachment'].create({'name': 'n', 'raw': b'\x01\02\03'}).ids
[att] = self.xmlrpc_object.execute(
common.get_db_name(), self.admin_uid, 'admin',
'ir.attachment', 'read', ids, ['raw'])
self.assertEqual(att['raw'], '', "actual binary data should be blanked out on read")
# really just for the test cursor
@common.tagged('post_install', '-at_install')
class TestAPIKeys(common.HttpCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._user = cls.env['res.users'].create({
'name': "Bylan",
'login': 'byl',
'password': 'ananananan',
'tz': 'Australia/Eucla',
})
def setUp(self):
super().setUp()
# needs a fake request in order to call methods protected with check_identity
fake_req = DotDict({
# various things go and access request items
'httprequest': DotDict({
'environ': {'REMOTE_ADDR': 'localhost'},
'cookies': {},
}),
# bypass check_identity flow
'session': {'identity-check-last': time.time()}
})
_request_stack.push(fake_req)
self.addCleanup(_request_stack.pop)
def test_trivial(self):
uid = auth.dispatch('authenticate', [self.env.cr.dbname, 'byl', 'ananananan', {}])
self.assertEqual(uid, self._user.id)
ctx = model.dispatch('execute_kw', [
self.env.cr.dbname, uid, 'ananananan',
'res.users', 'context_get', []
])
self.assertEqual(ctx['tz'], 'Australia/Eucla')
def test_wrongpw(self):
# User.authenticate raises but RPC.authenticate returns False
uid = auth.dispatch('authenticate', [self.env.cr.dbname, 'byl', 'aws', {}])
self.assertFalse(uid)
with self.assertRaises(AccessDenied):
model.dispatch('execute_kw', [
self.env.cr.dbname, self._user.id, 'aws',
'res.users', 'context_get', []
])
def test_key(self):
env = self.env(user=self._user)
r = env['res.users.apikeys.description'].create({
'name': 'a',
}).make_key()
k = r['context']['default_key']
uid = auth.dispatch('authenticate', [self.env.cr.dbname, 'byl', 'ananananan', {}])
self.assertEqual(uid, self._user.id)
uid = auth.dispatch('authenticate', [self.env.cr.dbname, 'byl', k, {}])
self.assertEqual(uid, self._user.id)
ctx = model.dispatch('execute_kw', [
self.env.cr.dbname, uid, k,
'res.users', 'context_get', []
])
self.assertEqual(ctx['tz'], 'Australia/Eucla')
def test_delete(self):
env = self.env(user=self._user)
env['res.users.apikeys.description'].create({'name': 'b',}).make_key()
env['res.users.apikeys.description'].create({'name': 'b',}).make_key()
env['res.users.apikeys.description'].create({'name': 'b',}).make_key()
k0, k1, k2 = env['res.users.apikeys'].search([])
# user can remove their own keys
k0.remove()
self.assertFalse(k0.exists())
# admin can remove user keys
k1.with_user(self.env.ref('base.user_admin')).remove ()
self.assertFalse(k1.exists())
# other user can't remove user keys
u = self.env['res.users'].create({
'name': 'a',
'login': 'a',
'groups_id': self.env.ref('base.group_user').ids,
})
with self.assertRaises(AccessError):
k2.with_user(u).remove()
def test_disabled(self):
env = self.env(user=self._user)
k = env['res.users.apikeys.description'].create({'name': 'b',}).make_key()['context']['default_key']
self._user.active = False
with self.assertRaises(AccessDenied):
model.dispatch('execute_kw', [
self.env.cr.dbname, self._user.id, 'ananananan',
'res.users', 'context_get', []
])
with self.assertRaises(AccessDenied):
model.dispatch('execute_kw', [
self.env.cr.dbname, self._user.id, k,
'res.users', 'context_get', []
])
| 36.47907 | 7,843 |
11,793 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from math import log10
from odoo.tests.common import TransactionCase
from odoo.tools import float_compare, float_is_zero, float_repr, float_round, float_split, float_split_str
class TestFloatPrecision(TransactionCase):
""" Tests on float precision. """
def test_rounding_02(self):
""" Test rounding methods with 2 digits. """
currency = self.env.ref('base.EUR')
def try_round(amount, expected):
digits = max(0, -int(log10(currency.rounding)))
result = float_repr(currency.round(amount), precision_digits=digits)
self.assertEqual(result, expected, 'Rounding error: got %s, expected %s' % (result, expected))
try_round(2.674,'2.67')
try_round(2.675,'2.68') # in Python 2.7.2, round(2.675,2) gives 2.67
try_round(-2.675,'-2.68') # in Python 2.7.2, round(2.675,2) gives 2.67
try_round(0.001,'0.00')
try_round(-0.001,'-0.00')
try_round(0.0049,'0.00') # 0.0049 is closer to 0 than to 0.01, so should round down
try_round(0.005,'0.01') # the rule is to round half away from zero
try_round(-0.005,'-0.01') # the rule is to round half away from zero
try_round(6.6 * 0.175, '1.16') # 6.6 * 0.175 is rounded to 1.15 with epsilon = 53
try_round(-6.6 * 0.175, '-1.16')
def try_zero(amount, expected):
self.assertEqual(currency.is_zero(amount), expected,
"Rounding error: %s should be zero!" % amount)
try_zero(0.01, False)
try_zero(-0.01, False)
try_zero(0.001, True)
try_zero(-0.001, True)
try_zero(0.0046, True)
try_zero(-0.0046, True)
try_zero(2.68-2.675, False) # 2.68 - 2.675 = 0.005 -> rounds to 0.01
try_zero(2.68-2.676, True) # 2.68 - 2.675 = 0.004 -> rounds to 0.0
try_zero(2.676-2.68, True) # 2.675 - 2.68 = -0.004 -> rounds to -0.0
try_zero(2.675-2.68, False) # 2.675 - 2.68 = -0.005 -> rounds to -0.01
def try_compare(amount1, amount2, expected):
self.assertEqual(currency.compare_amounts(amount1, amount2), expected,
"Rounding error, compare_amounts(%s,%s) should be %s" % (amount1, amount2, expected))
try_compare(0.001, 0.001, 0)
try_compare(-0.001, -0.001, 0)
try_compare(0.001, 0.002, 0)
try_compare(-0.001, -0.002, 0)
try_compare(2.675, 2.68, 0)
try_compare(2.676, 2.68, 0)
try_compare(-2.676, -2.68, 0)
try_compare(2.674, 2.68, -1)
try_compare(-2.674, -2.68, 1)
try_compare(3, 2.68, 1)
try_compare(-3, -2.68, -1)
try_compare(0.01, 0, 1)
try_compare(-0.01, 0, -1)
def test_rounding_03(self):
""" Test rounding methods with 3 digits. """
def try_round(amount, expected, digits=3, method='HALF-UP'):
value = float_round(amount, precision_digits=digits, rounding_method=method)
result = float_repr(value, precision_digits=digits)
self.assertEqual(result, expected, 'Rounding error: got %s, expected %s' % (result, expected))
try_round(2.6745, '2.675')
try_round(-2.6745, '-2.675')
try_round(2.6744, '2.674')
try_round(-2.6744, '-2.674')
try_round(0.0004, '0.000')
try_round(-0.0004, '-0.000')
try_round(357.4555, '357.456')
try_round(-357.4555, '-357.456')
try_round(457.4554, '457.455')
try_round(-457.4554, '-457.455')
# Try some rounding value with rounding method UP instead of HALF-UP
# We use 8.175 because when normalizing 8.175 with precision_digits=3 it gives
# us 8175,0000000001234 as value, and if not handle correctly the rounding UP
# value will be incorrect (should be 8,175 and not 8,176)
try_round(8.175, '8.175', method='UP')
try_round(8.1751, '8.176', method='UP')
try_round(-8.175, '-8.175', method='UP')
try_round(-8.1751, '-8.176', method='UP')
try_round(-6.000, '-6.000', method='UP')
try_round(1.8, '2', 0, method='UP')
try_round(-1.8, '-2', 0, method='UP')
# Try some rounding value with rounding method DOWN instead of HALF-UP
# We use 2.425 because when normalizing 2.425 with precision_digits=3 it gives
# us 2424.9999999999995 as value, and if not handle correctly the rounding DOWN
# value will be incorrect (should be 2.425 and not 2.424)
try_round(2.425, '2.425', method='DOWN')
try_round(2.4249, '2.424', method='DOWN')
try_round(-2.425, '-2.425', method='DOWN')
try_round(-2.4249, '-2.424', method='DOWN')
try_round(-2.500, '-2.500', method='DOWN')
try_round(1.8, '1', 0, method='DOWN')
try_round(-1.8, '-1', 0, method='DOWN')
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
# Note: max precision for double floats is 53 bits of precision or
# 17 significant decimal digits
for magnitude in range(7):
for frac, exp, prec in zip(fractions, expecteds, precisions):
for sign in [-1,1]:
for x in range(0, 10000, 97):
n = x * 10 ** magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, digits=prec)
def try_zero(amount, expected):
self.assertEqual(float_is_zero(amount, precision_digits=3), expected,
"Rounding error: %s should be zero!" % amount)
try_zero(0.0002, True)
try_zero(-0.0002, True)
try_zero(0.00034, True)
try_zero(0.0005, False)
try_zero(-0.0005, False)
try_zero(0.0008, False)
try_zero(-0.0008, False)
def try_compare(amount1, amount2, expected):
self.assertEqual(float_compare(amount1, amount2, precision_digits=3), expected,
"Rounding error, compare_amounts(%s,%s) should be %s" % (amount1, amount2, expected))
try_compare(0.0003, 0.0004, 0)
try_compare(-0.0003, -0.0004, 0)
try_compare(0.0002, 0.0005, -1)
try_compare(-0.0002, -0.0005, 1)
try_compare(0.0009, 0.0004, 1)
try_compare(-0.0009, -0.0004, -1)
try_compare(557.4555, 557.4556, 0)
try_compare(-557.4555, -557.4556, 0)
try_compare(657.4444, 657.445, -1)
try_compare(-657.4444, -657.445, 1)
# Rounding to unusual rounding units (e.g. coin values)
def try_round(amount, expected, precision_rounding=None, method='HALF-UP'): # pylint: disable=function-redefined
value = float_round(amount, precision_rounding=precision_rounding, rounding_method=method)
result = float_repr(value, precision_digits=2)
self.assertEqual(result, expected, 'Rounding error: got %s, expected %s' % (result, expected))
try_round(-457.4554, '-457.45', precision_rounding=0.05)
try_round(457.444, '457.50', precision_rounding=0.5)
try_round(457.3, '455.00', precision_rounding=5)
try_round(457.5, '460.00', precision_rounding=5)
try_round(457.1, '456.00', precision_rounding=3)
try_round(2.5, '2.50', precision_rounding=0.05, method='DOWN')
try_round(-2.5, '-2.50', precision_rounding=0.05, method='DOWN')
def test_rounding_04(self):
""" check that proper rounding is performed for float persistence """
currency = self.env.ref('base.EUR')
currency_rate = self.env['res.currency.rate']
def try_roundtrip(value, expected, date):
rate = currency_rate.create({'name': date,
'rate': value,
'currency_id': currency.id})
self.assertEqual(rate.rate, expected,
'Roundtrip error: got %s back from db, expected %s' % (rate, expected))
# res.currency.rate no more uses 6 digits of precision by default, it now uses whatever precision it gets
try_roundtrip(10000.999999, 10000.999999, '2000-01-03')
#TODO re-enable those tests when tests are made on dedicated models
# (res.currency.rate don't accept negative value anymore)
#try_roundtrip(-2.6748955, -2.674896, '2000-01-02')
#try_roundtrip(-10000.999999, -10000.999999, '2000-01-04')
def test_float_split_05(self):
""" Test split method with 2 digits. """
currency = self.env.ref('base.EUR')
def try_split(value, expected, split_fun, rounding=None):
digits = max(0, -int(log10(currency.rounding))) if rounding is None else rounding
result = split_fun(value, precision_digits=digits)
self.assertEqual(result, expected, 'Split error: got %s, expected %s' % (result, expected))
try_split(2.674, ('2', '67'), float_split_str)
try_split(2.675, ('2', '68'), float_split_str) # in Python 2.7.2, round(2.675,2) gives 2.67
try_split(-2.675, ('-2', '68'), float_split_str) # in Python 2.7.2, round(2.675,2) gives 2.67
try_split(0.001, ('0', '00'), float_split_str)
try_split(-0.001, ('-0', '00'), float_split_str)
try_split(42, ('42', '00'), float_split_str)
try_split(0.1, ('0', '10'), float_split_str)
try_split(13.0, ('13', ''), float_split_str, rounding=0)
try_split(2.674, (2, 67), float_split)
try_split(2.675, (2, 68), float_split) # in Python 2.7.2, round(2.675,2) gives 2.67
try_split(-2.675, (-2, 68), float_split) # in Python 2.7.2, round(2.675,2) gives 2.67
try_split(0.001, (0, 0), float_split)
try_split(-0.001, (0, 0), float_split)
try_split(42, (42, 0), float_split)
try_split(0.1, (0, 10), float_split)
try_split(13.0, (13, 0), float_split, rounding=0)
def test_rounding_invalid(self):
""" verify that invalid parameters are forbidden """
with self.assertRaises(AssertionError):
float_is_zero(0.01, precision_digits=3, precision_rounding=0.01)
with self.assertRaises(AssertionError):
float_is_zero(0.0, precision_rounding=0.0)
with self.assertRaises(AssertionError):
float_is_zero(0.0, precision_rounding=-0.1)
with self.assertRaises(AssertionError):
float_compare(0.01, 0.02, precision_digits=3, precision_rounding=0.01)
with self.assertRaises(AssertionError):
float_compare(1.0, 1.0, precision_rounding=0.0)
with self.assertRaises(AssertionError):
float_compare(1.0, 1.0, precision_rounding=-0.1)
with self.assertRaises(AssertionError):
float_round(0.01, precision_digits=3, precision_rounding=0.01)
with self.assertRaises(AssertionError):
float_round(1.25, precision_rounding=0.0)
with self.assertRaises(AssertionError):
float_round(1.25, precision_rounding=-0.1)
def test_amount_to_text_10(self):
""" verify that amount_to_text works as expected """
currency = self.env.ref('base.EUR')
amount_target = currency.amount_to_text(0.29)
amount_test = currency.amount_to_text(0.28)
self.assertNotEqual(amount_test, amount_target,
"Amount in text should not depend on float representation")
| 47.361446 | 11,793 |
21,114 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from odoo import tools
from odoo.addons.base.tests import test_mail_examples
from odoo.addons.base.tests.common import MockSmtplibCase
from odoo.tests import tagged
from odoo.tests.common import TransactionCase
from odoo.tools import mute_logger
@tagged('mail_server')
class TestIrMailServer(TransactionCase, MockSmtplibCase):
def setUp(self):
self._init_mail_servers()
def _build_email(self, mail_from, return_path=None):
return self.env['ir.mail_server'].build_email(
email_from=mail_from,
email_to='dest@example-é.com',
subject='subject', body='body',
headers={'Return-Path': return_path} if return_path else None
)
def test_match_from_filter(self):
"""Test the from_filter field on the "ir.mail_server"."""
match_from_filter = self.env['ir.mail_server']._match_from_filter
# Should match
tests = [
('[email protected]', 'mail.example.com'),
('[email protected]', 'mail.EXAMPLE.com'),
('[email protected]', '[email protected]'),
('[email protected]', False),
('"[email protected]" <[email protected]>', 'mail.example.com'),
('"[email protected]" <[email protected]>', 'mail.example.com'),
]
for email, from_filter in tests:
self.assertTrue(match_from_filter(email, from_filter))
# Should not match
tests = [
('[email protected]', '[email protected]'),
('[email protected]', 'test.com'),
('[email protected]', 'mail.éxample.com'),
('[email protected]', 'mail.example.com'),
('[email protected]', 'mmail.example.com'),
('"[email protected]" <[email protected]>', 'mail.example.com'),
]
for email, from_filter in tests:
self.assertFalse(match_from_filter(email, from_filter))
def test_mail_body(self):
bodies = [
'content',
'<p>content</p>',
'<head><meta content="text/html; charset=utf-8" http-equiv="Content-Type"></head><body><p>content</p></body>',
test_mail_examples.MISC_HTML_SOURCE,
test_mail_examples.QUOTE_THUNDERBIRD_HTML,
]
expected_list = [
'content',
'content',
'content',
"test1\n*test2*\ntest3\ntest4\ntest5\ntest6 test7\ntest8 test9\ntest10\ntest11\ntest12\ngoogle [1]\ntest link [2]\n\n\n[1] http://google.com\n[2] javascript:alert('malicious code')",
'On 01/05/2016 10:24 AM, Raoul\nPoilvache wrote:\n\n* Test reply. The suite. *\n\n--\nRaoul Poilvache\n\nTop cool !!!\n\n--\nRaoul Poilvache',
]
for body, expected in zip(bodies, expected_list):
message = self.env['ir.mail_server'].build_email(
'[email protected]',
'[email protected]',
body=body,
subject='Subject',
subtype='html',
)
body_alternative = False
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue # skip container
if part.get_content_type() == 'text/plain':
if not part.get_payload():
continue
body_alternative = tools.ustr(part.get_content())
# remove ending new lines as it just adds noise
body_alternative = body_alternative.strip('\n')
self.assertEqual(body_alternative, expected)
@mute_logger('odoo.models.unlink')
def test_mail_server_priorities(self):
"""Test if we choose the right mail server to send an email.
Priorities are
1. Forced mail server (e.g.: in mass mailing)
- If the "from_filter" of the mail server match the notification email
use the notifications email in the "From header"
- Otherwise spoof the "From" (because we force the mail server but we don't
know which email use to send it)
2. A mail server for which the "from_filter" match the "From" header
3. A mail server for which the "from_filter" match the domain of the "From" header
4. The mail server used for notifications
5. A mail server without "from_filter" (and so spoof the "From" header because we
do not know for which email address it can be used)
"""
# sanity checks
self.assertTrue(self.env['ir.mail_server']._get_default_from_address(), 'Notifications email must be set for testing')
self.assertTrue(self.env['ir.mail_server']._get_default_bounce_address(), 'Bounce email must be set for testing')
mail_server, mail_from = self.env['ir.mail_server']._find_mail_server(email_from='[email protected]')
self.assertEqual(mail_server, self.server_user)
self.assertEqual(mail_from, '[email protected]')
mail_server, mail_from = self.env['ir.mail_server']._find_mail_server(email_from='"Name [email protected]" <[email protected]>')
self.assertEqual(mail_server, self.server_user, 'Must extract email from full name')
self.assertEqual(mail_from, '"Name [email protected]" <[email protected]>', 'Must keep the given mail from')
# Should not be case sensitive
mail_server, mail_from = self.env['ir.mail_server']._find_mail_server(email_from='[email protected]')
self.assertEqual(mail_server, self.server_user, 'Mail from is case insensitive')
self.assertEqual(mail_from, '[email protected]', 'Should not change the mail from')
mail_server, mail_from = self.env['ir.mail_server']._find_mail_server(email_from='[email protected]')
self.assertEqual(mail_server, self.server_domain)
self.assertEqual(mail_from, '[email protected]')
# Cover a different condition that the "email case insensitive" test
mail_server, mail_from = self.env['ir.mail_server']._find_mail_server(email_from='[email protected]')
self.assertEqual(mail_server, self.server_domain, 'Domain is case insensitive')
self.assertEqual(mail_from, '[email protected]', 'Domain is case insensitive')
mail_server, mail_from = self.env['ir.mail_server']._find_mail_server(email_from='"Test" <test@unknown_domain.com>')
self.assertEqual(mail_server, self.server_notification, 'Should take the notification email')
self.assertEqual(mail_from, '[email protected]')
# remove the notifications email to simulate a mis-configured Odoo database
# so we do not have the choice, we have to spoof the FROM
# (otherwise we can not send the email)
self.env['ir.config_parameter'].sudo().set_param('mail.catchall.domain', False)
with mute_logger('odoo.addons.base.models.ir_mail_server'):
mail_server, mail_from = self.env['ir.mail_server']._find_mail_server(email_from='test@unknown_domain.com')
self.assertEqual(mail_server.from_filter, False, 'No notifications email set, must be forced to spoof the FROM')
self.assertEqual(mail_from, 'test@unknown_domain.com')
@mute_logger('odoo.models.unlink')
def test_mail_server_send_email(self):
IrMailServer = self.env['ir.mail_server']
default_bounce_adress = self.env['ir.mail_server']._get_default_bounce_address()
# A mail server is configured for the email
with self.mock_smtplib_connection():
message = self._build_email(mail_from='[email protected]')
IrMailServer.send_email(message)
self.assertEqual(len(self.emails), 1)
self.assert_email_sent_smtp(
smtp_from='[email protected]',
message_from='[email protected]',
from_filter='[email protected]',
)
# No mail server are configured for the email address,
# so it will use the notifications email instead and encapsulate the old email
with self.mock_smtplib_connection():
message = self._build_email(mail_from='"Name" <test@unknown_domain.com>')
IrMailServer.send_email(message)
self.assertEqual(len(self.emails), 1)
self.assert_email_sent_smtp(
smtp_from='[email protected]',
message_from='"Name" <[email protected]>',
from_filter='[email protected]',
)
# Same situation, but the original email has no name part
with self.mock_smtplib_connection():
message = self._build_email(mail_from='test@unknown_domain.com')
IrMailServer.send_email(message)
self.assertEqual(len(self.emails), 1)
self.assert_email_sent_smtp(
smtp_from='[email protected]',
message_from='"test" <[email protected]>',
from_filter='[email protected]',
)
# A mail server is configured for the entire domain name, so we can use the bounce
# email address because the mail server supports it
with self.mock_smtplib_connection():
message = self._build_email(mail_from='[email protected]')
IrMailServer.send_email(message)
self.assertEqual(len(self.emails), 1)
self.assert_email_sent_smtp(
smtp_from=default_bounce_adress,
message_from='[email protected]',
from_filter='test.com',
)
# remove the notification server
# so <[email protected]> will use the <test.com> mail server
self.server_notification.unlink()
# The mail server configured for the notifications email has been removed
# but we can still use the mail server configured for test.com
# and so we will be able to use the bounce address
# because we use the mail server for "test.com"
with self.mock_smtplib_connection():
message = self._build_email(mail_from='"Name" <test@unknown_domain.com>')
IrMailServer.send_email(message)
self.assertEqual(len(self.emails), 1)
self.assert_email_sent_smtp(
smtp_from=default_bounce_adress,
message_from='"Name" <[email protected]>',
from_filter='test.com',
)
# Test that the mail from / recipient envelop are encoded using IDNA
self.env['ir.config_parameter'].sudo().set_param('mail.catchall.domain', 'ééééééé.com')
with self.mock_smtplib_connection():
message = self._build_email(mail_from='test@ééééééé.com')
IrMailServer.send_email(message)
self.assertEqual(len(self.emails), 1)
self.assert_email_sent_smtp(
smtp_from='[email protected]',
smtp_to_list=['[email protected]'],
message_from='test@=?utf-8?b?w6nDqcOpw6nDqcOpw6k=?=.com',
from_filter=False,
)
# Test the case when the "mail.default.from" contains a full email address and not just the local part
# the domain of this default email address can be different than the catchall domain
self.env['ir.config_parameter'].sudo().set_param('mail.default.from', 'test@custom_domain.com')
self.server_default.from_filter = 'custom_domain.com'
with self.mock_smtplib_connection():
message = self._build_email(mail_from='"Name" <test@unknown_domain.com>')
IrMailServer.send_email(message)
self.assert_email_sent_smtp(
smtp_from='test@custom_domain.com',
smtp_to_list=['[email protected]'],
message_from='"Name" <test@custom_domain.com>',
from_filter='custom_domain.com',
)
# Test when forcing the mail server and when smtp_encryption is "starttls"
self.server_domain.smtp_encryption = "starttls"
with self.mock_smtplib_connection():
message = self._build_email(mail_from='[email protected]')
IrMailServer.send_email(message, mail_server_id=self.server_domain.id)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from='[email protected]',
message_from='[email protected]',
from_filter='test.com',
)
@mute_logger('odoo.models.unlink')
def test_mail_server_send_email_smtp_session(self):
"""Test all the cases when we provide the SMTP session.
The results must be the same as passing directly the parameter to "send_email".
"""
IrMailServer = self.env['ir.mail_server']
default_bounce_adress = self.env['ir.mail_server']._get_default_bounce_address()
# A mail server is configured for the email
with self.mock_smtplib_connection():
smtp_session = IrMailServer.connect(smtp_from='[email protected]')
message = self._build_email(mail_from='[email protected]')
IrMailServer.send_email(message, smtp_session=smtp_session)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from='[email protected]',
message_from='[email protected]',
from_filter='[email protected]',
)
# No mail server are configured for the email address,
# so it will use the notifications email instead and encapsulate the old email
with self.mock_smtplib_connection():
smtp_session = IrMailServer.connect(smtp_from='"Name" <test@unknown_domain.com>')
message = self._build_email(mail_from='"Name" <test@unknown_domain.com>')
IrMailServer.send_email(message, smtp_session=smtp_session)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from='[email protected]',
message_from='"Name" <[email protected]>',
from_filter='[email protected]',
)
# A mail server is configured for the entire domain name, so we can use the bounce
# email address because the mail server supports it
with self.mock_smtplib_connection():
smtp_session = IrMailServer.connect(smtp_from='[email protected]')
message = self._build_email(mail_from='[email protected]')
IrMailServer.send_email(message, smtp_session=smtp_session)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from=default_bounce_adress,
message_from='[email protected]',
from_filter='test.com',
)
# remove the notification server
# so <[email protected]> will use the <test.com> mail server
self.server_notification.unlink()
# The mail server configured for the notifications email has been removed
# but we can still use the mail server configured for test.com
with self.mock_smtplib_connection():
smtp_session = IrMailServer.connect(smtp_from='"Name" <test@unknown_domain.com>')
message = self._build_email(mail_from='"Name" <test@unknown_domain.com>')
IrMailServer.send_email(message, smtp_session=smtp_session)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from=default_bounce_adress,
message_from='"Name" <[email protected]>',
from_filter='test.com',
)
@mute_logger('odoo.models.unlink')
@patch.dict("odoo.tools.config.options", {"from_filter": "test.com"})
def test_mail_server_binary_arguments_domain(self):
"""Test the configuration provided in the odoo-bin arguments.
This config is used when no mail server exists.
"""
IrMailServer = self.env['ir.mail_server']
default_bounce_adress = self.env['ir.mail_server']._get_default_bounce_address()
# Remove all mail server so we will use the odoo-bin arguments
self.env['ir.mail_server'].search([]).unlink()
self.assertFalse(self.env['ir.mail_server'].search([]))
# Use an email in the domain of the "from_filter"
with self.mock_smtplib_connection():
message = self._build_email(mail_from='[email protected]')
IrMailServer.send_email(message)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from=default_bounce_adress,
message_from='[email protected]',
from_filter='test.com',
)
# Test if the domain name is normalized before comparison
with self.mock_smtplib_connection():
message = self._build_email(mail_from='[email protected]')
IrMailServer.send_email(message)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from=default_bounce_adress,
message_from='[email protected]',
from_filter='test.com',
)
# Use an email outside of the domain of the "from_filter"
# So we will use the notifications email in the headers and the bounce address
# in the envelop because the "from_filter" allows to use the entire domain
with self.mock_smtplib_connection():
message = self._build_email(mail_from='test@unknown_domain.com')
IrMailServer.send_email(message)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from=default_bounce_adress,
message_from='"test" <[email protected]>',
from_filter='test.com',
)
@mute_logger('odoo.models.unlink')
@patch.dict("odoo.tools.config.options", {"from_filter": "test.com"})
def test_mail_server_binary_arguments_domain_smtp_session(self):
"""Test the configuration provided in the odoo-bin arguments.
This config is used when no mail server exists.
Use a pre-configured SMTP session.
"""
IrMailServer = self.env['ir.mail_server']
default_bounce_adress = self.env['ir.mail_server']._get_default_bounce_address()
# Remove all mail server so we will use the odoo-bin arguments
self.env['ir.mail_server'].search([]).unlink()
self.assertFalse(self.env['ir.mail_server'].search([]))
# Use an email in the domain of the "from_filter"
with self.mock_smtplib_connection():
smtp_session = IrMailServer.connect(smtp_from='[email protected]')
message = self._build_email(mail_from='[email protected]')
IrMailServer.send_email(message, smtp_session=smtp_session)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from=default_bounce_adress,
message_from='[email protected]',
from_filter='test.com',
)
# Use an email outside of the domain of the "from_filter"
# So we will use the notifications email in the headers and the bounce address
# in the envelop because the "from_filter" allows to use the entire domain
with self.mock_smtplib_connection():
smtp_session = IrMailServer.connect(smtp_from='test@unknown_domain.com')
message = self._build_email(mail_from='test@unknown_domain.com')
IrMailServer.send_email(message, smtp_session=smtp_session)
self.connect_mocked.assert_called_once()
self.assert_email_sent_smtp(
smtp_from=default_bounce_adress,
message_from='"test" <[email protected]>',
from_filter='test.com',
)
@mute_logger('odoo.models.unlink')
@patch.dict('odoo.tools.config.options', {'from_filter': 'test.com'})
def test_mail_server_mail_default_from_filter(self):
"""Test that the config parameter "mail.default.from_filter" overwrite the odoo-bin
argument "--from-filter"
"""
self.env['ir.config_parameter'].sudo().set_param('mail.default.from_filter', 'example.com')
IrMailServer = self.env['ir.mail_server']
# Remove all mail server so we will use the odoo-bin arguments
IrMailServer.search([]).unlink()
self.assertFalse(IrMailServer.search([]))
# Use an email in the domain of the config parameter "mail.default.from_filter"
with self.mock_smtplib_connection():
message = self._build_email(mail_from='[email protected]')
IrMailServer.send_email(message)
self.assert_email_sent_smtp(
smtp_from='[email protected]',
message_from='[email protected]',
from_filter='example.com',
)
| 46.267544 | 21,098 |
21,815 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, Command
from odoo.addons.base.tests.common import SavepointCaseWithUserDemo
from odoo.tools import mute_logger
from odoo.exceptions import AccessError
class TestAPI(SavepointCaseWithUserDemo):
""" test the new API of the ORM """
@classmethod
def setUpClass(cls):
super(TestAPI, cls).setUpClass()
cls._load_partners_set()
def assertIsRecordset(self, value, model):
self.assertIsInstance(value, models.BaseModel)
self.assertEqual(value._name, model)
def assertIsRecord(self, value, model):
self.assertIsRecordset(value, model)
self.assertTrue(len(value) <= 1)
def assertIsNull(self, value, model):
self.assertIsRecordset(value, model)
self.assertFalse(value)
@mute_logger('odoo.models')
def test_00_query(self):
""" Build a recordset, and check its contents. """
domain = [('name', 'ilike', 'j'), ('id', 'in', self.partners.ids)]
partners = self.env['res.partner'].search(domain)
# partners is a collection of browse records
self.assertTrue(partners)
# partners and its contents are instance of the model
self.assertIsRecordset(partners, 'res.partner')
for p in partners:
self.assertIsRecord(p, 'res.partner')
@mute_logger('odoo.models')
def test_01_query_offset(self):
""" Build a recordset with offset, and check equivalence. """
partners1 = self.env['res.partner'].search([('id', 'in', self.partners.ids)], offset=5)
partners2 = self.env['res.partner'].search([('id', 'in', self.partners.ids)])[5:]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('odoo.models')
def test_02_query_limit(self):
""" Build a recordset with offset, and check equivalence. """
partners1 = self.env['res.partner'].search([('id', 'in', self.partners.ids)], order='id asc', limit=5)
partners2 = self.env['res.partner'].search([('id', 'in', self.partners.ids)], order='id asc')[:5]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('odoo.models')
def test_03_query_offset_limit(self):
""" Build a recordset with offset and limit, and check equivalence. """
partners1 = self.env['res.partner'].search([('id', 'in', self.partners.ids)], order='id asc', offset=3, limit=7)
partners2 = self.env['res.partner'].search([('id', 'in', self.partners.ids)], order='id asc')[3:10]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('odoo.models')
def test_04_query_count(self):
""" Test the search method with count=True. """
self.cr.execute("SELECT COUNT(*) FROM res_partner WHERE active")
count1 = self.cr.fetchone()[0]
count2 = self.env['res.partner'].search([], count=True)
self.assertIsInstance(count1, int)
self.assertIsInstance(count2, int)
self.assertEqual(count1, count2)
@mute_logger('odoo.models')
def test_05_immutable(self):
""" Check that a recordset remains the same, even after updates. """
domain = [('name', 'ilike', 'g'), ('id', 'in', self.partners.ids)]
partners = self.env['res.partner'].search(domain)
self.assertTrue(partners)
ids = partners.ids
# modify those partners, and check that partners has not changed
partners.write({'active': False})
self.assertEqual(ids, partners.ids)
# redo the search, and check that the result is now empty
partners2 = self.env['res.partner'].search(domain)
self.assertFalse(partners2)
@mute_logger('odoo.models')
def test_06_fields(self):
""" Check that relation fields return records, recordsets or nulls. """
user = self.env.user
self.assertIsRecord(user, 'res.users')
self.assertIsRecord(user.partner_id, 'res.partner')
self.assertIsRecordset(user.groups_id, 'res.groups')
for name, field in self.partners._fields.items():
if field.type == 'many2one':
for p in self.partners:
self.assertIsRecord(p[name], field.comodel_name)
elif field.type == 'reference':
for p in self.partners:
if p[name]:
self.assertIsRecord(p[name], field.comodel_name)
elif field.type in ('one2many', 'many2many'):
for p in self.partners:
self.assertIsRecordset(p[name], field.comodel_name)
@mute_logger('odoo.models')
def test_07_null(self):
""" Check behavior of null instances. """
# select a partner without a parent
partner = self.env['res.partner'].search([('parent_id', '=', False), ('id', 'in', self.partners.ids)])[0]
# check partner and related null instances
self.assertTrue(partner)
self.assertIsRecord(partner, 'res.partner')
self.assertFalse(partner.parent_id)
self.assertIsNull(partner.parent_id, 'res.partner')
self.assertIs(partner.parent_id.id, False)
self.assertFalse(partner.parent_id.user_id)
self.assertIsNull(partner.parent_id.user_id, 'res.users')
self.assertIs(partner.parent_id.user_id.name, False)
self.assertFalse(partner.parent_id.user_id.groups_id)
self.assertIsRecordset(partner.parent_id.user_id.groups_id, 'res.groups')
@mute_logger('odoo.models')
def test_40_new_new(self):
""" Call new-style methods in the new API style. """
partners = self.env['res.partner'].search([('name', 'ilike', 'g'), ('id', 'in', self.partners.ids)])
self.assertTrue(partners)
# call method write on partners itself, and check its effect
partners.write({'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('odoo.models')
def test_45_new_new(self):
""" Call new-style methods on records (new API style). """
partners = self.env['res.partner'].search([('name', 'ilike', 'g'), ('id', 'in', self.partners.ids)])
self.assertTrue(partners)
# call method write on partner records, and check its effects
for p in partners:
p.write({'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('odoo.models')
@mute_logger('odoo.addons.base.models.ir_model')
def test_50_environment(self):
""" Test environment on records. """
# partners and reachable records are attached to self.env
partners = self.env['res.partner'].search([('name', 'ilike', 'j'), ('id', 'in', self.partners.ids)])
self.assertEqual(partners.env, self.env)
for x in (partners, partners[0], partners[0].company_id):
self.assertEqual(x.env, self.env)
for p in partners:
self.assertEqual(p.env, self.env)
# check that the current user can read and modify company data
partners[0].company_id.name
partners[0].company_id.write({'name': 'Fools'})
# create an environment with a demo user
demo = self.env['res.users'].create({
'name': 'test_environment_demo',
'login': 'test_environment_demo',
'password': 'test_environment_demo',
})
demo_env = self.env(user=demo)
self.assertNotEqual(demo_env, self.env)
# partners and related records are still attached to self.env
self.assertEqual(partners.env, self.env)
for x in (partners, partners[0], partners[0].company_id):
self.assertEqual(x.env, self.env)
for p in partners:
self.assertEqual(p.env, self.env)
# create record instances attached to demo_env
demo_partners = partners.with_user(demo)
self.assertEqual(demo_partners.env, demo_env)
for x in (demo_partners, demo_partners[0], demo_partners[0].company_id):
self.assertEqual(x.env, demo_env)
for p in demo_partners:
self.assertEqual(p.env, demo_env)
# demo user can read but not modify company data
demo_partner = self.env['res.partner'].search([('name', '=', 'Landon Roberts')]).with_user(demo)
self.assertTrue(demo_partner.company_id, 'This partner is supposed to be linked to a company')
demo_partner.company_id.name
with self.assertRaises(AccessError):
demo_partner.company_id.write({'name': 'Pricks'})
# remove demo user from all groups
demo.write({'groups_id': [Command.clear()]})
# demo user can no longer access partner data
with self.assertRaises(AccessError):
demo_partner.company_id.name
@mute_logger('odoo.models')
def test_60_cache(self):
""" Check the record cache behavior """
Partners = self.env['res.partner']
pids = []
data = {
'partner One': ['Partner One - One', 'Partner One - Two'],
'Partner Two': ['Partner Two - One'],
'Partner Three': ['Partner Three - One'],
}
for p in data:
pids.append(Partners.create({
'name': p,
'child_ids': [Command.create({'name': c}) for c in data[p]],
}).id)
partners = Partners.search([('id', 'in', pids)])
partner1, partner2 = partners[0], partners[1]
children1, children2 = partner1.child_ids, partner2.child_ids
self.assertTrue(children1)
self.assertTrue(children2)
# take a child contact
child = children1[0]
self.assertEqual(child.parent_id, partner1)
self.assertIn(child, partner1.child_ids)
self.assertNotIn(child, partner2.child_ids)
# fetch data in the cache
for p in partners:
p.name, p.company_id.name, p.user_id.name, p.contact_address
self.env.cache.check(self.env)
# change its parent
child.write({'parent_id': partner2.id})
self.env.cache.check(self.env)
# check recordsets
self.assertEqual(child.parent_id, partner2)
self.assertNotIn(child, partner1.child_ids)
self.assertIn(child, partner2.child_ids)
self.assertEqual(set(partner1.child_ids + child), set(children1))
self.assertEqual(set(partner2.child_ids), set(children2 + child))
self.env.cache.check(self.env)
# delete it
child.unlink()
self.env.cache.check(self.env)
# check recordsets
self.assertEqual(set(partner1.child_ids), set(children1) - set([child]))
self.assertEqual(set(partner2.child_ids), set(children2))
self.env.cache.check(self.env)
# convert from the cache format to the write format
partner = partner1
partner.country_id, partner.child_ids
data = partner._convert_to_write(partner._cache)
self.assertEqual(data['country_id'], partner.country_id.id)
self.assertEqual(data['child_ids'], [Command.set(partner.child_ids.ids)])
@mute_logger('odoo.models')
def test_60_prefetch(self):
""" Check the record cache prefetching """
partners = self.env['res.partner'].search([('id', 'in', self.partners.ids)], limit=models.PREFETCH_MAX)
self.assertTrue(len(partners) > 1)
# all the records in partners are ready for prefetching
self.assertItemsEqual(partners.ids, partners._prefetch_ids)
# reading ONE partner should fetch them ALL
for partner in partners:
state = partner.state_id
break
partner_ids_with_field = [partner.id
for partner in partners
if 'state_id' in partner._cache]
self.assertItemsEqual(partner_ids_with_field, partners.ids)
# partners' states are ready for prefetching
state_ids = {
partner._cache['state_id']
for partner in partners
if partner._cache['state_id'] is not None
}
self.assertTrue(len(state_ids) > 1)
self.assertItemsEqual(state_ids, state._prefetch_ids)
# reading ONE partner country should fetch ALL partners' countries
for partner in partners:
if partner.state_id:
partner.state_id.name
break
state_ids_with_field = [st.id for st in partners.state_id if 'name' in st._cache]
self.assertItemsEqual(state_ids_with_field, state_ids)
@mute_logger('odoo.models')
def test_60_prefetch_model(self):
""" Check the prefetching model. """
partners = self.env['res.partner'].search([('id', 'in', self.partners.ids)], limit=models.PREFETCH_MAX)
self.assertTrue(partners)
def same_prefetch(a, b):
self.assertEqual(set(a._prefetch_ids), set(b._prefetch_ids))
def diff_prefetch(a, b):
self.assertNotEqual(set(a._prefetch_ids), set(b._prefetch_ids))
# the recordset operations below use different prefetch sets
diff_prefetch(partners, partners.browse())
diff_prefetch(partners, partners[0])
diff_prefetch(partners, partners[:5])
# the recordset operations below share the prefetch set
same_prefetch(partners, partners.browse(partners.ids))
same_prefetch(partners, partners.with_user(self.user_demo))
same_prefetch(partners, partners.with_context(active_test=False))
same_prefetch(partners, partners[:10].with_prefetch(partners._prefetch_ids))
# iteration and relational fields should use the same prefetch set
self.assertEqual(type(partners).country_id.type, 'many2one')
self.assertEqual(type(partners).bank_ids.type, 'one2many')
self.assertEqual(type(partners).category_id.type, 'many2many')
vals0 = {
'name': 'Empty relational fields',
'country_id': False,
'bank_ids': [],
'category_id': [],
}
vals1 = {
'name': 'Non-empty relational fields',
'country_id': self.ref('base.be'),
'bank_ids': [Command.create({'acc_number': 'FOO42'})],
'category_id': [Command.link(self.partner_category.id)],
}
partners = partners.create(vals0) + partners.create(vals1)
for partner in partners:
same_prefetch(partner, partners)
same_prefetch(partner.country_id, partners.country_id)
same_prefetch(partner.bank_ids, partners.bank_ids)
same_prefetch(partner.category_id, partners.category_id)
@mute_logger('odoo.models')
def test_60_prefetch_read(self):
""" Check that reading a field computes it on self only. """
Partner = self.env['res.partner']
field = type(Partner).company_type
self.assertTrue(field.compute and not field.store)
partner1 = Partner.create({'name': 'Foo'})
partner2 = Partner.create({'name': 'Bar', 'parent_id': partner1.id})
self.assertEqual(partner1.child_ids, partner2)
# reading partner1 should not prefetch 'company_type' on partner2
self.env.clear()
partner1 = partner1.with_prefetch()
partner1.read(['company_type'])
self.assertIn('company_type', partner1._cache)
self.assertNotIn('company_type', partner2._cache)
# reading partner1 should not prefetch 'company_type' on partner2
self.env.clear()
partner1 = partner1.with_prefetch()
partner1.read(['child_ids', 'company_type'])
self.assertIn('company_type', partner1._cache)
self.assertNotIn('company_type', partner2._cache)
@mute_logger('odoo.models')
def test_70_one(self):
""" Check method one(). """
# check with many records
ps = self.env['res.partner'].search([('name', 'ilike', 'a'), ('id', 'in', self.partners.ids)])
self.assertTrue(len(ps) > 1)
with self.assertRaises(ValueError):
ps.ensure_one()
p1 = ps[0]
self.assertEqual(len(p1), 1)
self.assertEqual(p1.ensure_one(), p1)
p0 = self.env['res.partner'].browse()
self.assertEqual(len(p0), 0)
with self.assertRaises(ValueError):
p0.ensure_one()
@mute_logger('odoo.models')
def test_80_contains(self):
""" Test membership on recordset. """
p1 = self.env['res.partner'].search([('name', 'ilike', 'a'), ('id', 'in', self.partners.ids)], limit=1).ensure_one()
ps = self.env['res.partner'].search([('name', 'ilike', 'a'), ('id', 'in', self.partners.ids)])
self.assertTrue(p1 in ps)
@mute_logger('odoo.models')
def test_80_set_operations(self):
""" Check set operations on recordsets. """
pa = self.env['res.partner'].search([('name', 'ilike', 'a'), ('id', 'in', self.partners.ids)])
pb = self.env['res.partner'].search([('name', 'ilike', 'b'), ('id', 'in', self.partners.ids)])
self.assertTrue(pa)
self.assertTrue(pb)
self.assertTrue(set(pa) & set(pb))
concat = pa + pb
self.assertEqual(list(concat), list(pa) + list(pb))
self.assertEqual(len(concat), len(pa) + len(pb))
difference = pa - pb
self.assertEqual(len(difference), len(set(difference)))
self.assertEqual(set(difference), set(pa) - set(pb))
self.assertLessEqual(difference, pa)
intersection = pa & pb
self.assertEqual(len(intersection), len(set(intersection)))
self.assertEqual(set(intersection), set(pa) & set(pb))
self.assertLessEqual(intersection, pa)
self.assertLessEqual(intersection, pb)
union = pa | pb
self.assertEqual(len(union), len(set(union)))
self.assertEqual(set(union), set(pa) | set(pb))
self.assertGreaterEqual(union, pa)
self.assertGreaterEqual(union, pb)
# one cannot mix different models with set operations
ps = pa
ms = self.env['ir.ui.menu'].search([])
self.assertNotEqual(ps._name, ms._name)
self.assertNotEqual(ps, ms)
with self.assertRaises(TypeError):
res = ps + ms
with self.assertRaises(TypeError):
res = ps - ms
with self.assertRaises(TypeError):
res = ps & ms
with self.assertRaises(TypeError):
res = ps | ms
with self.assertRaises(TypeError):
res = ps < ms
with self.assertRaises(TypeError):
res = ps <= ms
with self.assertRaises(TypeError):
res = ps > ms
with self.assertRaises(TypeError):
res = ps >= ms
@mute_logger('odoo.models')
def test_80_filter(self):
""" Check filter on recordsets. """
ps = self.partners
customers = ps.browse([p.id for p in ps if p.employee])
# filter on a single field
self.assertEqual(ps.filtered(lambda p: p.employee), customers)
self.assertEqual(ps.filtered('employee'), customers)
# filter on a sequence of fields
self.assertEqual(
ps.filtered(lambda p: p.parent_id.employee),
ps.filtered('parent_id.employee')
)
@mute_logger('odoo.models')
def test_80_map(self):
""" Check map on recordsets. """
ps = self.partners
parents = ps.browse()
for p in ps:
parents |= p.parent_id
# map a single field
self.assertEqual(ps.mapped(lambda p: p.parent_id), parents)
self.assertEqual(ps.mapped('parent_id'), parents)
self.assertEqual(ps.parent_id, parents)
# map a sequence of fields
self.assertEqual(
ps.mapped(lambda p: p.parent_id.name),
[p.parent_id.name for p in ps]
)
self.assertEqual(
ps.mapped('parent_id.name'),
[p.name for p in parents]
)
self.assertEqual(
ps.parent_id.mapped('name'),
[p.name for p in parents]
)
# map an empty sequence of fields
self.assertEqual(ps.mapped(''), ps)
@mute_logger('odoo.models')
def test_80_sorted(self):
""" Check sorted on recordsets. """
ps = self.env['res.partner'].search([('id', 'in', self.partners.ids)])
# sort by model order
qs = ps[:len(ps) // 2] + ps[len(ps) // 2:]
self.assertEqual(qs.sorted().ids, ps.ids)
# sort by name, with a function or a field name
by_name_ids = [p.id for p in sorted(ps, key=lambda p: p.name)]
self.assertEqual(ps.sorted(lambda p: p.name).ids, by_name_ids)
self.assertEqual(ps.sorted('name').ids, by_name_ids)
# sort by inverse name, with a field name
by_name_ids = [p.id for p in sorted(ps, key=lambda p: p.name, reverse=True)]
self.assertEqual(ps.sorted('name', reverse=True).ids, by_name_ids)
class TestExternalAPI(SavepointCaseWithUserDemo):
def test_call_kw(self):
"""kwargs is not modified by the execution of the call"""
partner = self.env['res.partner'].create({'name': 'MyPartner1'})
args = (partner.ids, ['name'])
kwargs = {'context': {'test': True}}
api.call_kw(self.env['res.partner'], 'read', args, kwargs)
self.assertEqual(kwargs, {'context': {'test': True}})
| 40.699627 | 21,815 |
13,408 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
from odoo.exceptions import AccessError
from odoo.addons.base.tests.common import TransactionCaseWithUserDemo
from odoo.tests.common import TransactionCase
from odoo.tools.misc import mute_logger
from odoo import Command
# test group that demo user should not have
GROUP_SYSTEM = 'base.group_system'
class TestACL(TransactionCaseWithUserDemo):
def setUp(self):
super(TestACL, self).setUp()
self.erp_system_group = self.env.ref(GROUP_SYSTEM)
def _set_field_groups(self, model, field_name, groups):
field = model._fields[field_name]
self.patch(field, 'groups', groups)
def test_field_visibility_restriction(self):
"""Check that model-level ``groups`` parameter effectively restricts access to that
field for users who do not belong to one of the explicitly allowed groups"""
currency = self.env['res.currency'].with_user(self.user_demo)
# Add a view that adds a label for the field we are going to check
extension = self.env["ir.ui.view"].create({
"name": "Add separate label for decimal_places",
"model": "res.currency",
"inherit_id": self.env.ref("base.view_currency_form").id,
"arch": """
<data>
<field name="decimal_places" position="attributes">
<attribute name="nolabel">1</attribute>
</field>
<field name="decimal_places" position="before">
<label for="decimal_places"/>
</field>
</data>
""",
})
currency = currency.with_context(check_view_ids=extension.ids)
# Verify the test environment first
original_fields = currency.fields_get([])
form_view = currency.fields_view_get(False, 'form')
view_arch = etree.fromstring(form_view.get('arch'))
has_group_system = self.user_demo.has_group(GROUP_SYSTEM)
self.assertFalse(has_group_system, "`demo` user should not belong to the restricted group before the test")
self.assertIn('decimal_places', original_fields, "'decimal_places' field must be properly visible before the test")
self.assertNotEqual(view_arch.xpath("//field[@name='decimal_places'][@nolabel='1']"), [],
"Field 'decimal_places' must be found in view definition before the test")
self.assertNotEqual(view_arch.xpath("//label[@for='decimal_places']"), [],
"Label for 'decimal_places' must be found in view definition before the test")
# restrict access to the field and check it's gone
self._set_field_groups(currency, 'decimal_places', GROUP_SYSTEM)
fields = currency.fields_get([])
form_view = currency.fields_view_get(False, 'form')
view_arch = etree.fromstring(form_view.get('arch'))
self.assertNotIn('decimal_places', fields, "'decimal_places' field should be gone")
self.assertEqual(view_arch.xpath("//field[@name='decimal_places']"), [],
"Field 'decimal_places' must not be found in view definition")
self.assertEqual(view_arch.xpath("//label[@for='decimal_places']"), [],
"Label for 'decimal_places' must not be found in view definition")
# Make demo user a member of the restricted group and check that the field is back
self.erp_system_group.users += self.user_demo
has_group_system = self.user_demo.has_group(GROUP_SYSTEM)
fields = currency.fields_get([])
form_view = currency.fields_view_get(False, 'form')
view_arch = etree.fromstring(form_view.get('arch'))
self.assertTrue(has_group_system, "`demo` user should now belong to the restricted group")
self.assertIn('decimal_places', fields, "'decimal_places' field must be properly visible again")
self.assertNotEqual(view_arch.xpath("//field[@name='decimal_places']"), [],
"Field 'decimal_places' must be found in view definition again")
self.assertNotEqual(view_arch.xpath("//label[@for='decimal_places']"), [],
"Label for 'decimal_places' must be found in view definition again")
@mute_logger('odoo.models')
def test_field_crud_restriction(self):
"Read/Write RPC access to restricted field should be forbidden"
partner = self.env['res.partner'].browse(1).with_user(self.user_demo)
# Verify the test environment first
has_group_system = self.user_demo.has_group(GROUP_SYSTEM)
self.assertFalse(has_group_system, "`demo` user should not belong to the restricted group")
self.assertTrue(partner.read(['bank_ids']))
self.assertTrue(partner.write({'bank_ids': []}))
# Now restrict access to the field and check it's forbidden
self._set_field_groups(partner, 'bank_ids', GROUP_SYSTEM)
with self.assertRaises(AccessError):
partner.read(['bank_ids'])
with self.assertRaises(AccessError):
partner.write({'bank_ids': []})
# Add the restricted group, and check that it works again
self.erp_system_group.users += self.user_demo
has_group_system = self.user_demo.has_group(GROUP_SYSTEM)
self.assertTrue(has_group_system, "`demo` user should now belong to the restricted group")
self.assertTrue(partner.read(['bank_ids']))
self.assertTrue(partner.write({'bank_ids': []}))
@mute_logger('odoo.models')
def test_fields_browse_restriction(self):
"""Test access to records having restricted fields"""
# Invalidate cache to avoid restricted value to be available
# in the cache
self.user_demo.invalidate_cache()
partner = self.env['res.partner'].with_user(self.user_demo)
self._set_field_groups(partner, 'email', GROUP_SYSTEM)
# accessing fields must no raise exceptions...
partner = partner.search([], limit=1)
partner.name
# ... except if they are restricted
with self.assertRaises(AccessError):
with mute_logger('odoo.models'):
partner.email
def test_view_create_edit_button_invisibility(self):
""" Test form view Create, Edit, Delete button visibility based on access right of model"""
methods = ['create', 'edit', 'delete']
company = self.env['res.company'].with_user(self.user_demo)
company_view = company.fields_view_get(False, 'form')
view_arch = etree.fromstring(company_view['arch'])
for method in methods:
self.assertEqual(view_arch.get(method), 'false')
def test_view_create_edit_button_visibility(self):
""" Test form view Create, Edit, Delete button visibility based on access right of model"""
self.erp_system_group.users += self.user_demo
methods = ['create', 'edit', 'delete']
company = self.env['res.company'].with_user(self.user_demo)
company_view = company.fields_view_get(False, 'form')
view_arch = etree.fromstring(company_view['arch'])
for method in methods:
self.assertIsNone(view_arch.get(method))
def test_m2o_field_create_edit_invisibility(self):
""" Test many2one field Create and Edit option visibility based on access rights of relation field"""
methods = ['create', 'write']
company = self.env['res.company'].with_user(self.user_demo)
company_view = company.fields_view_get(False, 'form')
view_arch = etree.fromstring(company_view['arch'])
field_node = view_arch.xpath("//field[@name='currency_id']")
self.assertTrue(len(field_node), "currency_id field should be in company from view")
for method in methods:
self.assertEqual(field_node[0].get('can_' + method), 'false')
def test_m2o_field_create_edit_visibility(self):
""" Test many2one field Create and Edit option visibility based on access rights of relation field"""
self.erp_system_group.users += self.user_demo
methods = ['create', 'write']
company = self.env['res.company'].with_user(self.user_demo)
company_view = company.fields_view_get(False, 'form')
view_arch = etree.fromstring(company_view['arch'])
field_node = view_arch.xpath("//field[@name='currency_id']")
self.assertTrue(len(field_node), "currency_id field should be in company from view")
for method in methods:
self.assertEqual(field_node[0].get('can_' + method), 'true')
class TestIrRule(TransactionCaseWithUserDemo):
def test_ir_rule(self):
model_res_partner = self.env.ref('base.model_res_partner')
group_user = self.env.ref('base.group_user')
# create an ir_rule for the Employee group with an blank domain
rule1 = self.env['ir.rule'].create({
'name': 'test_rule1',
'model_id': model_res_partner.id,
'domain_force': False,
'groups': [Command.set(group_user.ids)],
})
# read as demo user the partners (one blank domain)
partners_demo = self.env['res.partner'].with_user(self.user_demo)
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# same with domain 1=1
rule1.domain_force = "[(1,'=',1)]"
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# same with domain []
rule1.domain_force = "[]"
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# create another ir_rule for the Employee group (to test multiple rules)
rule2 = self.env['ir.rule'].create({
'name': 'test_rule2',
'model_id': model_res_partner.id,
'domain_force': False,
'groups': [Command.set(group_user.ids)],
})
# read as demo user with domains [] and blank
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# same with domains 1=1 and blank
rule1.domain_force = "[(1,'=',1)]"
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# same with domains 1=1 and 1=1
rule2.domain_force = "[(1,'=',1)]"
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# create another ir_rule for the Employee group (to test multiple rules)
rule3 = self.env['ir.rule'].create({
'name': 'test_rule3',
'model_id': model_res_partner.id,
'domain_force': False,
'groups': [Command.set(group_user.ids)],
})
# read the partners as demo user
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# same with domains 1=1, 1=1 and 1=1
rule3.domain_force = "[(1,'=',1)]"
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# modify the global rule on res_company which triggers a recursive check
# of the rules on company
global_rule = self.env.ref('base.res_company_rule_employee')
global_rule.domain_force = "[('id','in', company_ids)]"
# read as demo user (exercising the global company rule)
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# Modify the ir_rule for employee to have a rule that fordids seeing any
# record. We use a domain with implicit AND operator for later tests on
# normalization.
rule2.domain_force = "[('id','=',False),('name','=',False)]"
# check that demo user still sees partners, because group-rules are OR'ed
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partner.")
# create a new group with demo user in it, and a complex rule
group_test = self.env['res.groups'].create({
'name': 'Test Group',
'users': [Command.set(self.user_demo.ids)],
})
# add the rule to the new group, with a domain containing an implicit
# AND operator, which is more tricky because it will have to be
# normalized before combining it
rule3.write({
'domain_force': "[('name','!=',False),('id','!=',False)]",
'groups': [Command.set(group_test.ids)],
})
# read the partners again as demo user, which should give results
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see partners even with the combined rules.")
# delete global domains (to combine only group domains)
self.env['ir.rule'].search([('groups', '=', False)]).unlink()
# read the partners as demo user (several group domains, no global domain)
partners = partners_demo.search([])
self.assertTrue(partners, "Demo user should see some partners.")
| 47.715302 | 13,408 |
9,075 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from functools import partial
import odoo
from odoo.sql_db import db_connect, TestCursor
from odoo.tests import common
from odoo.tests.common import BaseCase
from odoo.tools.misc import config, mute_logger
ADMIN_USER_ID = common.ADMIN_USER_ID
def registry():
return odoo.registry(common.get_db_name())
class TestExecute(BaseCase):
""" Try cr.execute with wrong parameters """
@mute_logger('odoo.sql_db')
def test_execute_bad_params(self):
"""
Try to use iterable but non-list or int params in query parameters.
"""
with registry().cursor() as cr:
with self.assertRaises(ValueError):
cr.execute("SELECT id FROM res_users WHERE login=%s", 'admin')
with self.assertRaises(ValueError):
cr.execute("SELECT id FROM res_users WHERE id=%s", 1)
with self.assertRaises(ValueError):
cr.execute("SELECT id FROM res_users WHERE id=%s", '1')
class TestTestCursor(common.TransactionCase):
def setUp(self):
super().setUp()
# make the registry in test mode
self.registry.enter_test_mode(self.cr)
self.addCleanup(self.registry.leave_test_mode)
# now we make a test cursor for self.cr
self.cr = self.registry.cursor()
self.addCleanup(self.cr.close)
self.env = odoo.api.Environment(self.cr, odoo.SUPERUSER_ID, {})
self.record = self.env['res.partner'].create({'name': 'Foo'})
def write(self, record, value):
record.ref = value
def flush(self, record):
record.flush(['ref'])
def check(self, record, value):
self.assertEqual(record.read(['ref'])[0]['ref'], value)
def test_single_cursor(self):
""" Check the behavior of a single test cursor. """
self.assertIsInstance(self.cr, TestCursor)
self.write(self.record, 'A')
self.cr.commit()
self.write(self.record, 'B')
self.cr.rollback()
self.check(self.record, 'A')
self.write(self.record, 'C')
self.cr.rollback()
self.check(self.record, 'A')
def test_sub_commit(self):
""" Check the behavior of a subcursor that commits. """
self.assertIsInstance(self.cr, TestCursor)
self.write(self.record, 'A')
self.cr.commit()
self.write(self.record, 'B')
self.flush(self.record)
# check behavior of a "sub-cursor" that commits
with self.registry.cursor() as cr:
self.assertIsInstance(cr, TestCursor)
record = self.record.with_env(self.env(cr=cr))
self.check(record, 'B')
self.write(record, 'C')
self.check(self.record, 'C')
self.cr.rollback()
self.check(self.record, 'A')
def test_sub_rollback(self):
""" Check the behavior of a subcursor that rollbacks. """
self.assertIsInstance(self.cr, TestCursor)
self.write(self.record, 'A')
self.cr.commit()
self.write(self.record, 'B')
self.flush(self.record)
# check behavior of a "sub-cursor" that rollbacks
with self.assertRaises(ValueError):
with self.registry.cursor() as cr:
self.assertIsInstance(cr, TestCursor)
record = self.record.with_env(self.env(cr=cr))
self.check(record, 'B')
self.write(record, 'C')
raise ValueError(42)
self.check(self.record, 'B')
self.cr.rollback()
self.check(self.record, 'A')
def test_borrow_connection(self):
"""Tests the behavior of the postgresql connection pool recycling/borrowing"""
origin_db_port = config['db_port']
if not origin_db_port and hasattr(self.env.cr._cnx, 'info'):
# Check the edge case of the db port set,
# which is set as an integer in our DSN/connection_info
# but as string in the DSN of psycopg2
# The connections must be recycled/borrowed when the db_port is set
# e.g
# `connection.dsn`
# {'database': '14.0', 'port': 5432, 'sslmode': 'prefer'}
# must match
# `cr._cnx.dsn`
# 'port=5432 sslmode=prefer dbname=14.0'
config['db_port'] = self.env.cr._cnx.info.port
cursors = []
try:
connection = db_connect(self.cr.dbname)
# Case #1: 2 cursors, both opened/used, do not recycle/borrow.
# The 2nd cursor must not use the connection of the 1st cursor as it's used (not closed).
cursors.append(connection.cursor())
cursors.append(connection.cursor())
# Ensure the port is within psycopg's dsn, as explained in an above comment,
# we want to test the behavior of the connections borrowing including the port provided in the dsn.
if config['db_port']:
self.assertTrue('port=' in cursors[0]._cnx.dsn)
# Check the connection of the 1st cursor is different than the connection of the 2nd cursor.
self.assertNotEqual(id(cursors[0]._cnx), id(cursors[1]._cnx))
# Case #2: Close 1st cursor, open 3rd cursor, must recycle/borrow.
# The 3rd must recycle/borrow the connection of the 1st one.
cursors[0].close()
cursors.append(connection.cursor())
# Check the connection of this 3rd cursor uses the connection of the 1st cursor that has been closed.
self.assertEqual(id(cursors[0]._cnx), id(cursors[2]._cnx))
finally:
# Cleanups:
# - Close the cursors which have been left opened
# - Reset the config `db_port`
for cursor in cursors:
if not cursor.closed:
cursor.close()
config['db_port'] = origin_db_port
class TestCursorHooks(common.TransactionCase):
def setUp(self):
super().setUp()
self.log = []
def prepare_hooks(self, cr):
self.log.clear()
cr.precommit.add(partial(self.log.append, 'preC'))
cr.postcommit.add(partial(self.log.append, 'postC'))
cr.prerollback.add(partial(self.log.append, 'preR'))
cr.postrollback.add(partial(self.log.append, 'postR'))
self.assertEqual(self.log, [])
def test_hooks_on_cursor(self):
cr = self.registry.cursor()
# check hook on commit()
self.prepare_hooks(cr)
cr.commit()
self.assertEqual(self.log, ['preC', 'postC'])
# check hook on flush(), then on rollback()
self.prepare_hooks(cr)
cr.flush()
self.assertEqual(self.log, ['preC'])
cr.rollback()
self.assertEqual(self.log, ['preC', 'preR', 'postR'])
# check hook on close()
self.prepare_hooks(cr)
cr.close()
self.assertEqual(self.log, ['preR', 'postR'])
def test_hooks_on_testcursor(self):
self.registry.enter_test_mode(self.cr)
self.addCleanup(self.registry.leave_test_mode)
cr = self.registry.cursor()
# check hook on commit(); post-commit hooks are ignored
self.prepare_hooks(cr)
cr.commit()
self.assertEqual(self.log, ['preC'])
# check hook on flush(), then on rollback()
self.prepare_hooks(cr)
cr.flush()
self.assertEqual(self.log, ['preC'])
cr.rollback()
self.assertEqual(self.log, ['preC', 'preR', 'postR'])
# check hook on close()
self.prepare_hooks(cr)
cr.close()
self.assertEqual(self.log, ['preR', 'postR'])
class TestCursorHooksTransactionCaseCleanup(common.TransactionCase):
"""Check savepoint cases handle commit hooks properly."""
def test_isolation_first(self):
def mutate_second_test_ref():
for name in ['precommit', 'postcommit', 'prerollback', 'postrollback']:
del self.env.cr.precommit.data.get(f'test_cursor_hooks_savepoint_case_cleanup_test_second_{name}', [''])[0]
self.env.cr.precommit.add(mutate_second_test_ref)
def test_isolation_second(self):
references = [['not_empty']] * 4
cr = self.env.cr
commit_callbacks = [cr.precommit, cr.postcommit, cr.prerollback, cr.postrollback]
callback_names = ['precommit', 'postcommit', 'prerollback', 'postrollback']
for callback_name, callbacks, reference in zip(callback_names, commit_callbacks, references):
callbacks.data.setdefault(f"test_cursor_hooks_savepoint_case_cleanup_test_second_{callback_name}", reference)
for callback in commit_callbacks:
callback.run()
for callback_name, reference in zip(callback_names, references):
self.assertTrue(bool(reference), f"{callback_name} failed to clean up between transaction tests")
self.assertTrue(reference[0] == 'not_empty', f"{callback_name} failed to clean up between transaction tests")
| 37.8125 | 9,075 |
8,013 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import ValidationError
from odoo.tests.common import TransactionCase
class TestIrDefault(TransactionCase):
def test_defaults(self):
""" check the mechanism of user-defined defaults """
companyA = self.env.company
companyB = companyA.create({'name': 'CompanyB'})
user1 = self.env.user
user2 = user1.create({'name': 'u2', 'login': 'u2'})
user3 = user1.create({'name': 'u3', 'login': 'u3',
'company_id': companyB.id,
'company_ids': companyB.ids})
# create some default value for some model
IrDefault1 = self.env['ir.default']
IrDefault2 = IrDefault1.with_user(user2)
IrDefault3 = IrDefault1.with_user(user3)
# set a default value for all users
IrDefault1.search([('field_id.model', '=', 'res.partner')]).unlink()
IrDefault1.set('res.partner', 'ref', 'GLOBAL', user_id=False, company_id=False)
self.assertEqual(IrDefault1.get_model_defaults('res.partner'), {'ref': 'GLOBAL'},
"Can't retrieve the created default value for all users.")
self.assertEqual(IrDefault2.get_model_defaults('res.partner'), {'ref': 'GLOBAL'},
"Can't retrieve the created default value for all users.")
self.assertEqual(IrDefault3.get_model_defaults('res.partner'), {'ref': 'GLOBAL'},
"Can't retrieve the created default value for all users.")
# set a default value for current company (behavior of 'set default' from debug mode)
IrDefault1.set('res.partner', 'ref', 'COMPANY', user_id=False, company_id=True)
self.assertEqual(IrDefault1.get_model_defaults('res.partner'), {'ref': 'COMPANY'},
"Can't retrieve the created default value for company.")
self.assertEqual(IrDefault2.get_model_defaults('res.partner'), {'ref': 'COMPANY'},
"Can't retrieve the created default value for company.")
self.assertEqual(IrDefault3.get_model_defaults('res.partner'), {'ref': 'GLOBAL'},
"Unexpected default value for company.")
# set a default value for current user (behavior of 'set default' from debug mode)
IrDefault2.set('res.partner', 'ref', 'USER', user_id=True, company_id=True)
self.assertEqual(IrDefault1.get_model_defaults('res.partner'), {'ref': 'COMPANY'},
"Can't retrieve the created default value for user.")
self.assertEqual(IrDefault2.get_model_defaults('res.partner'), {'ref': 'USER'},
"Unexpected default value for user.")
self.assertEqual(IrDefault3.get_model_defaults('res.partner'), {'ref': 'GLOBAL'},
"Unexpected default value for company.")
# check default values on partners
default1 = IrDefault1.env['res.partner'].default_get(['ref']).get('ref')
self.assertEqual(default1, 'COMPANY', "Wrong default value.")
default2 = IrDefault2.env['res.partner'].default_get(['ref']).get('ref')
self.assertEqual(default2, 'USER', "Wrong default value.")
default3 = IrDefault3.env['res.partner'].default_get(['ref']).get('ref')
self.assertEqual(default3, 'GLOBAL', "Wrong default value.")
def test_conditions(self):
""" check user-defined defaults with condition """
IrDefault = self.env['ir.default']
# default without condition
IrDefault.search([('field_id.model', '=', 'res.partner')]).unlink()
IrDefault.set('res.partner', 'ref', 'X')
self.assertEqual(IrDefault.get_model_defaults('res.partner'),
{'ref': 'X'})
self.assertEqual(IrDefault.get_model_defaults('res.partner', condition='name=Agrolait'),
{})
# default with a condition
IrDefault.search([('field_id.model', '=', 'res.partner.title')]).unlink()
IrDefault.set('res.partner.title', 'shortcut', 'X')
IrDefault.set('res.partner.title', 'shortcut', 'Mr', condition='name=Mister')
self.assertEqual(IrDefault.get_model_defaults('res.partner.title'),
{'shortcut': 'X'})
self.assertEqual(IrDefault.get_model_defaults('res.partner.title', condition='name=Miss'),
{})
self.assertEqual(IrDefault.get_model_defaults('res.partner.title', condition='name=Mister'),
{'shortcut': 'Mr'})
def test_invalid(self):
""" check error cases with 'ir.default' """
IrDefault = self.env['ir.default']
with self.assertRaises(ValidationError):
IrDefault.set('unknown_model', 'unknown_field', 42)
with self.assertRaises(ValidationError):
IrDefault.set('res.partner', 'unknown_field', 42)
with self.assertRaises(ValidationError):
IrDefault.set('res.partner', 'lang', 'some_LANG')
with self.assertRaises(ValidationError):
IrDefault.set('res.partner', 'credit_limit', 'foo')
def test_removal(self):
""" check defaults for many2one with their value being removed """
IrDefault = self.env['ir.default']
IrDefault.search([('field_id.model', '=', 'res.partner')]).unlink()
# set a record as a default value
title = self.env['res.partner.title'].create({'name': 'President'})
IrDefault.set('res.partner', 'title', title.id)
self.assertEqual(IrDefault.get_model_defaults('res.partner'), {'title': title.id})
# delete the record, and check the presence of the default value
title.unlink()
self.assertEqual(IrDefault.get_model_defaults('res.partner'), {})
def test_multi_company_defaults(self):
"""Check defaults in multi-company environment."""
company_a = self.env["res.company"].create({"name": "C_A"})
company_b = self.env["res.company"].create({"name": "C_B"})
company_a_b = (company_a + company_b)
company_b_a = (company_b + company_a)
multi_company_user = self.env['res.users'].create({
'name': 'u2', 'login': 'u2',
'company_id': company_a.id,
'company_ids': company_a_b.ids,
})
IrDefault = self.env["ir.default"].with_user(multi_company_user)
IrDefault.with_context(allowed_company_ids=company_a.ids).set(
'res.partner', 'ref', 'CADefault', user_id=True, company_id=True)
IrDefault.with_context(allowed_company_ids=company_b.ids).set(
'res.partner', 'ref', 'CBDefault', user_id=True, company_id=True)
self.assertEqual(
IrDefault.get_model_defaults('res.partner')['ref'],
'CADefault',
)
self.assertEqual(
IrDefault.with_context(allowed_company_ids=company_a.ids).get_model_defaults('res.partner')['ref'],
'CADefault',
)
self.assertEqual(
IrDefault.with_context(allowed_company_ids=company_b.ids).get_model_defaults('res.partner')['ref'],
'CBDefault',
)
self.assertEqual(
IrDefault.with_context(allowed_company_ids=company_a_b.ids).get_model_defaults('res.partner')['ref'],
'CADefault',
)
self.assertEqual(
IrDefault.with_context(allowed_company_ids=company_b_a.ids).get_model_defaults('res.partner')['ref'],
'CBDefault',
)
def test_json_format_invalid(self):
""" check the _check_json_format constraint """
IrDefault = self.env['ir.default']
field_id = self.env['ir.model.fields'].search([('model', '=', 'res.partner'), ('name', '=', 'ref')])
with self.assertRaises(ValidationError):
IrDefault.create({
'field_id': field_id.id,
'json_value': '{"name":"John", }',
})
| 51.696774 | 8,013 |
39,900 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
from odoo import SUPERUSER_ID, Command
from odoo.exceptions import RedirectWarning, UserError, ValidationError
from odoo.tests.common import TransactionCase, BaseCase
from odoo.tools import mute_logger
from odoo.tools.safe_eval import safe_eval, const_eval, expr_eval
class TestSafeEval(BaseCase):
def test_const(self):
# NB: True and False are names in Python 2 not consts
expected = (1, {"a": {2.5}}, [None, u"foo"])
actual = const_eval('(1, {"a": {2.5}}, [None, u"foo"])')
self.assertEqual(actual, expected)
def test_expr(self):
# NB: True and False are names in Python 2 not consts
expected = 3 * 4
actual = expr_eval('3 * 4')
self.assertEqual(actual, expected)
def test_01_safe_eval(self):
""" Try a few common expressions to verify they work with safe_eval """
expected = (1, {"a": 9 * 2}, (True, False, None))
actual = safe_eval('(1, {"a": 9 * 2}, (True, False, None))')
self.assertEqual(actual, expected, "Simple python expressions are not working with safe_eval")
def test_02_literal_eval(self):
""" Try simple literal definition to verify it works with literal_eval """
expected = (1, {"a": 9}, (True, False, None))
actual = ast.literal_eval('(1, {"a": 9}, (True, False, None))')
self.assertEqual(actual, expected, "Simple python expressions are not working with literal_eval")
def test_03_literal_eval_arithmetic(self):
""" Try arithmetic expression in literal_eval to verify it does not work """
with self.assertRaises(ValueError):
ast.literal_eval('(1, {"a": 2*9}, (True, False, None))')
def test_04_literal_eval_forbidden(self):
""" Try forbidden expressions in literal_eval to verify they are not allowed """
with self.assertRaises(ValueError):
ast.literal_eval('{"a": True.__class__}')
@mute_logger('odoo.tools.safe_eval')
def test_05_safe_eval_forbiddon(self):
""" Try forbidden expressions in safe_eval to verify they are not allowed"""
# no forbidden builtin expression
with self.assertRaises(ValueError):
safe_eval('open("/etc/passwd","r")')
# no forbidden opcodes
with self.assertRaises(ValueError):
safe_eval("import odoo", mode="exec")
# no dunder
with self.assertRaises(NameError):
safe_eval("self.__name__", {'self': self}, mode="exec")
# samples use effective TLDs from the Mozilla public suffix
# list at http://publicsuffix.org
SAMPLES = [
('"Raoul Grosbedon" <[email protected]> ', 'Raoul Grosbedon', '[email protected]'),
('[email protected]', '', '[email protected]'),
('Raoul chirurgiens-dentistes.fr', 'Raoul chirurgiens-dentistes.fr', ''),
(" Raoul O'hara <[email protected]>", "Raoul O'hara", '[email protected]'),
('Raoul Grosbedon <[email protected]> ', 'Raoul Grosbedon', '[email protected]'),
('Raoul [email protected]', 'Raoul', '[email protected]'),
]
class TestBase(TransactionCase):
def _check_find_or_create(self, test_string, expected_name, expected_email, check_partner=False, should_create=False):
partner = self.env['res.partner'].find_or_create(test_string)
if should_create and check_partner:
self.assertTrue(partner.id > check_partner.id, 'find_or_create failed - should have found existing')
elif check_partner:
self.assertEqual(partner, check_partner, 'find_or_create failed - should have found existing')
self.assertEqual(partner.name, expected_name)
self.assertEqual(partner.email or '', expected_email)
return partner
def test_00_res_partner_name_create(self):
res_partner = self.env['res.partner']
parse = res_partner._parse_partner_name
for text, name, mail in SAMPLES:
self.assertEqual((name, mail.lower()), parse(text))
partner_id, dummy = res_partner.name_create(text)
partner = res_partner.browse(partner_id)
self.assertEqual(name or mail.lower(), partner.name)
self.assertEqual(mail.lower() or False, partner.email)
# name_create supports default_email fallback
partner = self.env['res.partner'].browse(
self.env['res.partner'].with_context(
default_email='[email protected]'
).name_create('"Raoulette Vachette" <[email protected]>')[0]
)
self.assertEqual(partner.name, 'Raoulette Vachette')
self.assertEqual(partner.email, '[email protected]')
partner = self.env['res.partner'].browse(
self.env['res.partner'].with_context(
default_email='[email protected]'
).name_create('Raoulette Vachette')[0]
)
self.assertEqual(partner.name, 'Raoulette Vachette')
self.assertEqual(partner.email, '[email protected]')
def test_10_res_partner_find_or_create(self):
res_partner = self.env['res.partner']
partner = res_partner.browse(res_partner.name_create(SAMPLES[0][0])[0])
self._check_find_or_create(
SAMPLES[0][0], SAMPLES[0][1], SAMPLES[0][2],
check_partner=partner, should_create=False
)
partner_2 = res_partner.browse(res_partner.name_create('[email protected]')[0])
found_2 = self._check_find_or_create(
'[email protected]', '[email protected]', '[email protected]',
check_partner=partner_2, should_create=True
)
new = self._check_find_or_create(
SAMPLES[1][0], SAMPLES[1][2].lower(), SAMPLES[1][2].lower(),
check_partner=found_2, should_create=True
)
new2 = self._check_find_or_create(
SAMPLES[2][0], SAMPLES[2][1], SAMPLES[2][2],
check_partner=new, should_create=True
)
new3 = self._check_find_or_create(
SAMPLES[3][0], SAMPLES[3][1], SAMPLES[3][2],
check_partner=new2, should_create=True
)
new4 = self._check_find_or_create(
SAMPLES[4][0], SAMPLES[0][1], SAMPLES[0][2],
check_partner=partner, should_create=False
)
new5 = self._check_find_or_create(
SAMPLES[5][0], SAMPLES[5][1], SAMPLES[5][2],
check_partner=new4, should_create=True
)
def test_15_res_partner_name_search(self):
res_partner = self.env['res.partner']
DATA = [
('"A Raoul Grosbedon" <[email protected]>', False),
('B Raoul chirurgiens-dentistes.fr', True),
("C Raoul O'hara <[email protected]>", True),
('[email protected]', True),
]
for name, active in DATA:
partner_id, dummy = res_partner.with_context(default_active=active).name_create(name)
partners = res_partner.name_search('Raoul')
self.assertEqual(len(partners), 2, 'Incorrect search number result for name_search')
partners = res_partner.name_search('Raoul', limit=1)
self.assertEqual(len(partners), 1, 'Incorrect search number result for name_search with a limit')
self.assertEqual(partners[0][1], 'B Raoul chirurgiens-dentistes.fr', 'Incorrect partner returned, should be the first active')
def test_20_res_partner_address_sync(self):
res_partner = self.env['res.partner']
ghoststep = res_partner.create({
'name': 'GhostStep',
'is_company': True,
'street': 'Main Street, 10',
'phone': '123456789',
'email': '[email protected]',
'vat': 'BE0477472701',
'type': 'contact',
})
p1 = res_partner.browse(res_partner.name_create('Denis Bladesmith <[email protected]>')[0])
self.assertEqual(p1.type, 'contact', 'Default type must be "contact"')
p1phone = '123456789#34'
p1.write({'phone': p1phone,
'parent_id': ghoststep.id})
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, '[email protected]', 'Email should be preserved after sync')
# turn off sync
p1street = 'Different street, 42'
p1.write({'street': p1street,
'type': 'invoice'})
self.assertEqual(p1.street, p1street, 'Address fields must not be synced after turning sync off')
self.assertNotEqual(ghoststep.street, p1street, 'Parent address must never be touched')
# turn on sync again
p1.write({'type': 'contact'})
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced again')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, '[email protected]', 'Email should be preserved after sync')
# Modify parent, sync to children
ghoststreet = 'South Street, 25'
ghoststep.write({'street': ghoststreet})
self.assertEqual(p1.street, ghoststreet, 'Address fields must be synced automatically')
self.assertEqual(p1.phone, p1phone, 'Phone should not be synced')
self.assertEqual(p1.email, '[email protected]', 'Email should be preserved after sync')
p1street = 'My Street, 11'
p1.write({'street': p1street})
self.assertEqual(ghoststep.street, ghoststreet, 'Touching contact should never alter parent')
def test_30_res_partner_first_contact_sync(self):
""" Test initial creation of company/contact pair where contact address gets copied to
company """
res_partner = self.env['res.partner']
ironshield = res_partner.browse(res_partner.name_create('IronShield')[0])
self.assertFalse(ironshield.is_company, 'Partners are not companies by default')
self.assertEqual(ironshield.type, 'contact', 'Default type must be "contact"')
ironshield.write({'type': 'contact'})
p1 = res_partner.create({
'name': 'Isen Hardearth',
'street': 'Strongarm Avenue, 12',
'parent_id': ironshield.id,
})
self.assertEqual(p1.type, 'contact', 'Default type must be "contact", not the copied parent type')
self.assertEqual(ironshield.street, p1.street, 'Address fields should be copied to company')
def test_40_res_partner_address_get(self):
""" Test address_get address resolution mechanism: it should first go down through descendants,
stopping when encountering another is_copmany entity, then go up, stopping again at the first
is_company entity or the root ancestor and if nothing matches, it should use the provided partner
itself """
res_partner = self.env['res.partner']
elmtree = res_partner.browse(res_partner.name_create('Elmtree')[0])
branch1 = res_partner.create({'name': 'Branch 1',
'parent_id': elmtree.id,
'is_company': True})
leaf10 = res_partner.create({'name': 'Leaf 10',
'parent_id': branch1.id,
'type': 'invoice'})
branch11 = res_partner.create({'name': 'Branch 11',
'parent_id': branch1.id,
'type': 'other'})
leaf111 = res_partner.create({'name': 'Leaf 111',
'parent_id': branch11.id,
'type': 'delivery'})
branch11.write({'is_company': False}) # force is_company after creating 1rst child
branch2 = res_partner.create({'name': 'Branch 2',
'parent_id': elmtree.id,
'is_company': True})
leaf21 = res_partner.create({'name': 'Leaf 21',
'parent_id': branch2.id,
'type': 'delivery'})
leaf22 = res_partner.create({'name': 'Leaf 22',
'parent_id': branch2.id})
leaf23 = res_partner.create({'name': 'Leaf 23',
'parent_id': branch2.id,
'type': 'contact'})
# go up, stop at branch1
self.assertEqual(leaf111.address_get(['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id}, 'Invalid address resolution')
self.assertEqual(branch11.address_get(['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id}, 'Invalid address resolution')
# go down, stop at at all child companies
self.assertEqual(elmtree.address_get(['delivery', 'invoice', 'contact', 'other']),
{'delivery': elmtree.id,
'invoice': elmtree.id,
'contact': elmtree.id,
'other': elmtree.id}, 'Invalid address resolution')
# go down through children
self.assertEqual(branch1.address_get(['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id}, 'Invalid address resolution')
self.assertEqual(branch2.address_get(['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf21.id,
'invoice': branch2.id,
'contact': branch2.id,
'other': branch2.id}, 'Invalid address resolution. Company is the first encountered contact, therefore default for unfound addresses.')
# go up then down through siblings
self.assertEqual(leaf21.address_get(['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf21.id,
'invoice': branch2.id,
'contact': branch2.id,
'other': branch2.id}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(leaf22.address_get(['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf21.id,
'invoice': leaf22.id,
'contact': leaf22.id,
'other': leaf22.id}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(leaf23.address_get(['delivery', 'invoice', 'contact', 'other']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': leaf23.id,
'other': leaf23.id}, 'Invalid address resolution, `default` should only override if no partner with specific type exists')
# empty adr_pref means only 'contact'
self.assertEqual(elmtree.address_get([]),
{'contact': elmtree.id}, 'Invalid address resolution, no contact means commercial entity ancestor')
self.assertEqual(leaf111.address_get([]),
{'contact': branch1.id}, 'Invalid address resolution, no contact means finding contact in ancestors')
branch11.write({'type': 'contact'})
self.assertEqual(leaf111.address_get([]),
{'contact': branch11.id}, 'Invalid address resolution, branch11 should now be contact')
def test_commercial_partner_nullcompany(self):
""" The commercial partner is the first/nearest ancestor-or-self which
is a company or doesn't have a parent
"""
P = self.env['res.partner']
p0 = P.create({'name': '0', 'email': '0'})
self.assertEqual(p0.commercial_partner_id, p0, "partner without a parent is their own commercial partner")
p1 = P.create({'name': '1', 'email': '1', 'parent_id': p0.id})
self.assertEqual(p1.commercial_partner_id, p0, "partner's parent is their commercial partner")
p12 = P.create({'name': '12', 'email': '12', 'parent_id': p1.id})
self.assertEqual(p12.commercial_partner_id, p0, "partner's GP is their commercial partner")
p2 = P.create({'name': '2', 'email': '2', 'parent_id': p0.id, 'is_company': True})
self.assertEqual(p2.commercial_partner_id, p2, "partner flagged as company is their own commercial partner")
p21 = P.create({'name': '21', 'email': '21', 'parent_id': p2.id})
self.assertEqual(p21.commercial_partner_id, p2, "commercial partner is closest ancestor with themselves as commercial partner")
p3 = P.create({'name': '3', 'email': '3', 'is_company': True})
self.assertEqual(p3.commercial_partner_id, p3, "being both parent-less and company should be the same as either")
notcompanies = p0 | p1 | p12 | p21
self.env.cr.execute('update res_partner set is_company=null where id = any(%s)', [notcompanies.ids])
for parent in notcompanies:
p = P.create({
'name': parent.name + '_sub',
'email': parent.email + '_sub',
'parent_id': parent.id,
})
self.assertEqual(
p.commercial_partner_id,
parent.commercial_partner_id,
"check that is_company=null is properly handled when looking for ancestor"
)
def test_50_res_partner_commercial_sync(self):
res_partner = self.env['res.partner']
p0 = res_partner.create({'name': 'Sigurd Sunknife',
'email': '[email protected]'})
sunhelm = res_partner.create({'name': 'Sunhelm',
'is_company': True,
'street': 'Rainbow Street, 13',
'phone': '1122334455',
'email': '[email protected]',
'vat': 'BE0477472701',
'child_ids': [Command.link(p0.id),
Command.create({'name': 'Alrik Greenthorn',
'email': '[email protected]'})]})
p1 = res_partner.create({'name': 'Otto Blackwood',
'email': '[email protected]',
'parent_id': sunhelm.id})
p11 = res_partner.create({'name': 'Gini Graywool',
'email': '[email protected]',
'parent_id': p1.id})
p2 = res_partner.search([('email', '=', '[email protected]')], limit=1)
sunhelm.write({'child_ids': [Command.create({'name': 'Ulrik Greenthorn',
'email': '[email protected]'})]})
p3 = res_partner.search([('email', '=', '[email protected]')], limit=1)
for p in (p0, p1, p11, p2, p3):
self.assertEqual(p.commercial_partner_id, sunhelm, 'Incorrect commercial entity resolution')
self.assertEqual(p.vat, sunhelm.vat, 'Commercial fields must be automatically synced')
sunhelmvat = 'BE0123456749'
sunhelm.write({'vat': sunhelmvat})
for p in (p0, p1, p11, p2, p3):
self.assertEqual(p.vat, sunhelmvat, 'Commercial fields must be automatically and recursively synced')
p1vat = 'BE0987654394'
p1.write({'vat': p1vat})
for p in (sunhelm, p0, p11, p2, p3):
self.assertEqual(p.vat, sunhelmvat, 'Sync to children should only work downstream and on commercial entities')
# promote p1 to commercial entity
p1.write({'parent_id': sunhelm.id,
'is_company': True,
'name': 'Sunhelm Subsidiary'})
self.assertEqual(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEqual(p1.commercial_partner_id, p1, 'Incorrect commercial entity resolution after setting is_company')
# writing on parent should not touch child commercial entities
sunhelmvat2 = 'BE0112233453'
sunhelm.write({'vat': sunhelmvat2})
self.assertEqual(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEqual(p0.vat, sunhelmvat2, 'Commercial fields must be automatically synced')
def test_60_read_group(self):
title_sir = self.env['res.partner.title'].create({'name': 'Sir...'})
title_lady = self.env['res.partner.title'].create({'name': 'Lady...'})
test_users = [
{'name': 'Alice', 'login': 'alice', 'color': 1, 'function': 'Friend', 'date': '2015-03-28', 'title': title_lady.id},
{'name': 'Alice', 'login': 'alice2', 'color': 0, 'function': 'Friend', 'date': '2015-01-28', 'title': title_lady.id},
{'name': 'Bob', 'login': 'bob', 'color': 2, 'function': 'Friend', 'date': '2015-03-02', 'title': title_sir.id},
{'name': 'Eve', 'login': 'eve', 'color': 3, 'function': 'Eavesdropper', 'date': '2015-03-20', 'title': title_lady.id},
{'name': 'Nab', 'login': 'nab', 'color': -3, 'function': '5$ Wrench', 'date': '2014-09-10', 'title': title_sir.id},
{'name': 'Nab', 'login': 'nab-she', 'color': 6, 'function': '5$ Wrench', 'date': '2014-01-02', 'title': title_lady.id},
]
res_users = self.env['res.users']
user_ids = [res_users.create(vals).id for vals in test_users]
domain = [('id', 'in', user_ids)]
# group on local char field without domain and without active_test (-> empty WHERE clause)
groups_data = res_users.with_context(active_test=False).read_group([], fields=['login'], groupby=['login'], orderby='login DESC')
self.assertGreater(len(groups_data), 6, "Incorrect number of results when grouping on a field")
# group on local char field with limit
groups_data = res_users.read_group(domain, fields=['login'], groupby=['login'], orderby='login DESC', limit=3, offset=3)
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field with limit")
self.assertEqual(['bob', 'alice2', 'alice'], [g['login'] for g in groups_data], 'Result mismatch')
# group on inherited char field, aggregate on int field (second groupby ignored on purpose)
groups_data = res_users.read_group(domain, fields=['name', 'color', 'function'], groupby=['function', 'login'])
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field")
self.assertEqual(['5$ Wrench', 'Eavesdropper', 'Friend'], [g['function'] for g in groups_data], 'incorrect read_group order')
for group_data in groups_data:
self.assertIn('color', group_data, "Aggregated data for the column 'color' is not present in read_group return values")
self.assertEqual(group_data['color'], 3, "Incorrect sum for aggregated data for the column 'color'")
# group on inherited char field, reverse order
groups_data = res_users.read_group(domain, fields=['name', 'color'], groupby='name', orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
# group on int field, default ordering
groups_data = res_users.read_group(domain, fields=['color'], groupby='color')
self.assertEqual([-3, 0, 1, 2, 3, 6], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# multi group, second level is int field, should still be summed in first level grouping
groups_data = res_users.read_group(domain, fields=['name', 'color'], groupby=['name', 'color'], orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 3, 2, 1], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# group on inherited char field, multiple orders with directions
groups_data = res_users.read_group(domain, fields=['name', 'color'], groupby='name', orderby='color DESC, name')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['Eve', 'Nab', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 2, 1, 2], [g['name_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, default ordering
groups_data = res_users.read_group(domain, fields=['function', 'color', 'date'], groupby=['date'])
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['January 2014', 'September 2014', 'January 2015', 'March 2015'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 1, 1, 3], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, custom order
groups_data = res_users.read_group(domain, fields=['function', 'color', 'date'], groupby=['date'], orderby='date DESC')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['March 2015', 'January 2015', 'September 2014', 'January 2014'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 1, 1, 1], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited many2one (res_partner.title), default order
groups_data = res_users.read_group(domain, fields=['function', 'color', 'title'], groupby=['title'])
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady.id, 'Lady...'), (title_sir.id, 'Sir...')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), reversed natural order
groups_data = res_users.read_group(domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir.id, 'Sir...'), (title_lady.id, 'Lady...')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), multiple orders with m2o in second position
groups_data = res_users.read_group(domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="color desc, title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady.id, 'Lady...'), (title_sir.id, 'Sir...')], [g['title'] for g in groups_data], 'Incorrect ordering of the result')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), ordered by other inherited field (color)
groups_data = res_users.read_group(domain, fields=['function', 'color', 'title'], groupby=['title'], orderby='color')
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir.id, 'Sir...'), (title_lady.id, 'Lady...')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
def test_70_archive_internal_partners(self):
test_partner = self.env['res.partner'].create({'name':'test partner'})
test_user = self.env['res.users'].create({
'login': '[email protected]',
'partner_id': test_partner.id,
})
# Cannot archive the partner
with self.assertRaises(RedirectWarning):
test_partner.with_user(self.env.ref('base.user_admin')).toggle_active()
with self.assertRaises(ValidationError):
test_partner.with_user(self.env.ref('base.user_demo')).toggle_active()
# Can archive the user but the partner stays active
test_user.toggle_active()
self.assertTrue(test_partner.active, 'Parter related to user should remain active')
# Now we can archive the partner
test_partner.toggle_active()
# Activate the user should reactivate the partner
test_user.toggle_active()
self.assertTrue(test_partner.active, 'Activating user must active related partner')
class TestPartnerRecursion(TransactionCase):
def setUp(self):
super(TestPartnerRecursion,self).setUp()
res_partner = self.env['res.partner']
self.p1 = res_partner.browse(res_partner.name_create('Elmtree')[0])
self.p2 = res_partner.create({'name': 'Elmtree Child 1', 'parent_id': self.p1.id})
self.p3 = res_partner.create({'name': 'Elmtree Grand-Child 1.1', 'parent_id': self.p2.id})
def test_100_res_partner_recursion(self):
self.assertTrue(self.p3._check_recursion())
self.assertTrue((self.p1 + self.p2 + self.p3)._check_recursion())
# split 101, 102, 103 tests to force SQL rollback between them
def test_101_res_partner_recursion(self):
with self.assertRaises(ValidationError):
self.p1.write({'parent_id': self.p3.id})
def test_102_res_partner_recursion(self):
with self.assertRaises(ValidationError):
self.p2.write({'parent_id': self.p3.id})
def test_103_res_partner_recursion(self):
with self.assertRaises(ValidationError):
self.p3.write({'parent_id': self.p3.id})
def test_104_res_partner_recursion_indirect_cycle(self):
""" Indirect hacky write to create cycle in children """
p3b = self.p1.create({'name': 'Elmtree Grand-Child 1.2', 'parent_id': self.p2.id})
with self.assertRaises(ValidationError):
self.p2.write({'child_ids': [Command.update(self.p3.id, {'parent_id': p3b.id}),
Command.update(p3b.id, {'parent_id': self.p3.id})]})
def test_110_res_partner_recursion_multi_update(self):
""" multi-write on several partners in same hierarchy must not trigger a false cycle detection """
ps = self.p1 + self.p2 + self.p3
self.assertTrue(ps.write({'phone': '123456'}))
class TestParentStore(TransactionCase):
""" Verify that parent_store computation is done right """
def setUp(self):
super(TestParentStore, self).setUp()
# force res_partner_category.copy() to copy children
category = self.env['res.partner.category']
self.patch(category._fields['child_ids'], 'copy', True)
# setup categories
self.root = category.create({'name': 'Root category'})
self.cat0 = category.create({'name': 'Parent category', 'parent_id': self.root.id})
self.cat1 = category.create({'name': 'Child 1', 'parent_id': self.cat0.id})
self.cat2 = category.create({'name': 'Child 2', 'parent_id': self.cat0.id})
self.cat21 = category.create({'name': 'Child 2-1', 'parent_id': self.cat2.id})
def test_duplicate_parent(self):
""" Duplicate the parent category and verify that the children have been duplicated too """
new_cat0 = self.cat0.copy()
new_struct = new_cat0.search([('parent_id', 'child_of', new_cat0.id)])
self.assertEqual(len(new_struct), 4, "After duplication, the new object must have the childs records")
old_struct = new_cat0.search([('parent_id', 'child_of', self.cat0.id)])
self.assertEqual(len(old_struct), 4, "After duplication, previous record must have old childs records only")
self.assertFalse(new_struct & old_struct, "After duplication, nodes should not be mixed")
def test_duplicate_children_01(self):
""" Duplicate the children then reassign them to the new parent (1st method). """
new_cat1 = self.cat1.copy()
new_cat2 = self.cat2.copy()
new_cat0 = self.cat0.copy({'child_ids': []})
(new_cat1 + new_cat2).write({'parent_id': new_cat0.id})
new_struct = new_cat0.search([('parent_id', 'child_of', new_cat0.id)])
self.assertEqual(len(new_struct), 4, "After duplication, the new object must have the childs records")
old_struct = new_cat0.search([('parent_id', 'child_of', self.cat0.id)])
self.assertEqual(len(old_struct), 4, "After duplication, previous record must have old childs records only")
self.assertFalse(new_struct & old_struct, "After duplication, nodes should not be mixed")
def test_duplicate_children_02(self):
""" Duplicate the children then reassign them to the new parent (2nd method). """
new_cat1 = self.cat1.copy()
new_cat2 = self.cat2.copy()
new_cat0 = self.cat0.copy({'child_ids': [Command.set((new_cat1 + new_cat2).ids)]})
new_struct = new_cat0.search([('parent_id', 'child_of', new_cat0.id)])
self.assertEqual(len(new_struct), 4, "After duplication, the new object must have the childs records")
old_struct = new_cat0.search([('parent_id', 'child_of', self.cat0.id)])
self.assertEqual(len(old_struct), 4, "After duplication, previous record must have old childs records only")
self.assertFalse(new_struct & old_struct, "After duplication, nodes should not be mixed")
def test_duplicate_children_03(self):
""" Duplicate the children then reassign them to the new parent (3rd method). """
new_cat1 = self.cat1.copy()
new_cat2 = self.cat2.copy()
new_cat0 = self.cat0.copy({'child_ids': []})
new_cat0.write({'child_ids': [Command.link(new_cat1.id), Command.link(new_cat2.id)]})
new_struct = new_cat0.search([('parent_id', 'child_of', new_cat0.id)])
self.assertEqual(len(new_struct), 4, "After duplication, the new object must have the childs records")
old_struct = new_cat0.search([('parent_id', 'child_of', self.cat0.id)])
self.assertEqual(len(old_struct), 4, "After duplication, previous record must have old childs records only")
self.assertFalse(new_struct & old_struct, "After duplication, nodes should not be mixed")
class TestGroups(TransactionCase):
def test_res_groups_fullname_search(self):
all_groups = self.env['res.groups'].search([])
groups = all_groups.search([('full_name', 'like', '%Sale%')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Sale' in g.full_name],
"did not match search for 'Sale'")
groups = all_groups.search([('full_name', 'like', '%Technical%')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Technical' in g.full_name],
"did not match search for 'Technical'")
groups = all_groups.search([('full_name', 'like', '%Sales /%')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Sales /' in g.full_name],
"did not match search for 'Sales /'")
groups = all_groups.search([('full_name', 'in', ['Administration / Access Rights','Contact Creation'])])
self.assertTrue(groups, "did not match search for 'Administration / Access Rights' and 'Contact Creation'")
def test_res_group_recursion(self):
# four groups with no cycle, check them all together
a = self.env['res.groups'].create({'name': 'A'})
b = self.env['res.groups'].create({'name': 'B'})
c = self.env['res.groups'].create({'name': 'G', 'implied_ids': [Command.set((a + b).ids)]})
d = self.env['res.groups'].create({'name': 'D', 'implied_ids': [Command.set(c.ids)]})
self.assertTrue((a + b + c + d)._check_m2m_recursion('implied_ids'))
# create a cycle and check
a.implied_ids = d
self.assertFalse(a._check_m2m_recursion('implied_ids'))
def test_res_group_copy(self):
a = self.env['res.groups'].with_context(lang='en_US').create({'name': 'A'})
b = a.copy()
self.assertFalse(a.name == b.name)
def test_apply_groups(self):
a = self.env['res.groups'].create({'name': 'A'})
b = self.env['res.groups'].create({'name': 'B'})
c = self.env['res.groups'].create({'name': 'C', 'implied_ids': [Command.set(a.ids)]})
# C already implies A, we want both B+C to imply A
(b + c)._apply_group(a)
self.assertIn(a, b.implied_ids)
self.assertIn(a, c.implied_ids)
def test_remove_groups(self):
u = self.env['res.users'].create({'login': 'u', 'name': 'U'})
a = self.env['res.groups'].create({'name': 'A', 'users': [Command.set(u.ids)]})
b = self.env['res.groups'].create({'name': 'B', 'users': [Command.set(u.ids)]})
c = self.env['res.groups'].create({'name': 'C', 'implied_ids': [Command.set(a.ids)]})
# C already implies A, we want none of B+C to imply A
(b + c)._remove_group(a)
self.assertNotIn(a, b.implied_ids)
self.assertNotIn(a, c.implied_ids)
# Since B didn't imply A, removing A from the implied groups of (B+C)
# should not remove user U from A, even though C implied A, since C does
# not have U as a user
self.assertIn(u, a.users)
class TestUsers(TransactionCase):
def test_superuser(self):
""" The superuser is inactive and must remain as such. """
user = self.env['res.users'].browse(SUPERUSER_ID)
self.assertFalse(user.active)
with self.assertRaises(UserError):
user.write({'active': True})
| 56.756757 | 39,900 |
4,674 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from unittest.mock import patch
from freezegun import freeze_time
from odoo import fields
from odoo.tests.common import TransactionCase, RecordCapturer
class CronMixinCase:
def capture_triggers(self, cron_id=None):
"""
Get a context manager to get all cron triggers created during
the context lifetime. While in the context, it exposes the
triggers created so far from the beginning of the context. When
the context exits, it doesn't capture new triggers anymore.
The triggers are accessible on the `records` attribute of the
returned object.
:param cron_id: An optional cron record id (int) or xmlid (str)
to only capture triggers for that cron.
"""
if isinstance(cron_id, str): # xmlid case
cron_id = self.env.ref(cron_id).id
return RecordCapturer(
model=self.env['ir.cron.trigger'].sudo(),
domain=[('cron_id', '=', cron_id)] if cron_id else []
)
class TestIrCron(TransactionCase, CronMixinCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
freezer = freeze_time(cls.cr.now())
cls.frozen_datetime = freezer.start()
cls.addClassCleanup(freezer.stop)
def setUp(self):
super(TestIrCron, self).setUp()
self.cron = self.env['ir.cron'].create({
'name': 'TestCron',
'model_id': self.env.ref('base.model_res_partner').id,
'state': 'code',
'code': 'model.search([("name", "=", "TestCronRecord")]).write({"name": "You have been CRONWNED"})',
'interval_number': 1,
'interval_type': 'days',
'numbercall': -1,
'doall': False,
})
self.test_partner = self.env['res.partner'].create({
'name': 'TestCronRecord'
})
self.test_partner2 = self.env['res.partner'].create({
'name': 'NotTestCronRecord'
})
def test_cron_direct_trigger(self):
self.assertFalse(self.cron.lastcall)
self.assertEqual(self.test_partner.name, 'TestCronRecord')
self.assertEqual(self.test_partner2.name, 'NotTestCronRecord')
def patched_now(*args, **kwargs):
return '2020-10-22 08:00:00'
with patch('odoo.fields.Datetime.now', patched_now):
self.cron.method_direct_trigger()
self.assertEqual(fields.Datetime.to_string(self.cron.lastcall), '2020-10-22 08:00:00')
self.assertEqual(self.test_partner.name, 'You have been CRONWNED')
self.assertEqual(self.test_partner2.name, 'NotTestCronRecord')
def test_cron_skip_unactive_triggers(self):
# Situation: an admin disable the cron and another user triggers
# the cron to be executed *now*, the cron shouldn't be ready and
# the trigger should not be stored.
self.cron.active = False
self.cron.nextcall = fields.Datetime.now() + timedelta(days=2)
self.cron.flush()
with self.capture_triggers() as capture:
self.cron._trigger()
ready_jobs = self.registry['ir.cron']._get_all_ready_jobs(self.cr)
self.assertNotIn(self.cron.id, [job['id'] for job in ready_jobs],
"the cron shouldn't be ready")
self.assertFalse(capture.records, "trigger should has been skipped")
def test_cron_keep_future_triggers(self):
# Situation: yesterday an admin disabled the cron, while the
# cron was disabled, another user triggered it to run today.
# In case the cron as been re-enabled before "today", it should
# run.
# go yesterday
self.frozen_datetime.tick(delta=timedelta(days=-1))
# admin disable the cron
self.cron.active = False
self.cron.nextcall = fields.Datetime.now() + timedelta(days=10)
self.cron.flush()
# user triggers the cron to run *tomorrow of yesterday (=today)
with self.capture_triggers() as capture:
self.cron._trigger(at=fields.Datetime.now() + timedelta(days=1))
# admin re-enable the cron
self.cron.active = True
self.cron.flush()
# go today, check the cron should run
self.frozen_datetime.tick(delta=timedelta(days=1))
ready_jobs = self.registry['ir.cron']._get_all_ready_jobs(self.cr)
self.assertIn(self.cron.id, [job['id'] for job in ready_jobs],
"cron should be ready")
self.assertTrue(capture.records, "trigger should has been kept")
| 38 | 4,674 |
4,828 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import os
import platform
import psutil
import unittest
from odoo.addons.base.tests.common import TransactionCaseWithUserDemo
from odoo.exceptions import CacheMiss
from odoo.tests.common import TransactionCase
class TestRecordCache(TransactionCaseWithUserDemo):
def test_cache(self):
""" Check the record cache object. """
Model = self.env['res.partner']
name = type(Model).name
ref = type(Model).ref
cache = self.env.cache
def check1(record, field, value):
# value is None means no value in cache
self.assertEqual(cache.contains(record, field), value is not None)
try:
self.assertEqual(cache.get(record, field), value)
self.assertIsNotNone(value)
except CacheMiss:
self.assertIsNone(value)
self.assertEqual(field in cache.get_fields(record), value is not None)
self.assertEqual(record in cache.get_records(record, field), value is not None)
def check(record, name_val, ref_val):
""" check the values of fields 'name' and 'ref' on record. """
check1(record, name, name_val)
check1(record, ref, ref_val)
foo1, bar1 = Model.browse([1, 2])
foo2, bar2 = Model.with_user(self.user_demo).browse([1, 2])
self.assertNotEqual(foo1.env.uid, foo2.env.uid)
# cache is empty
cache.invalidate()
check(foo1, None, None)
check(foo2, None, None)
check(bar1, None, None)
check(bar2, None, None)
self.assertCountEqual(cache.get_missing_ids(foo1 + bar1, name), [1, 2])
self.assertCountEqual(cache.get_missing_ids(foo2 + bar2, name), [1, 2])
# set values in one environment only
cache.set(foo1, name, 'FOO1_NAME')
cache.set(foo1, ref, 'FOO1_REF')
cache.set(bar1, name, 'BAR1_NAME')
cache.set(bar1, ref, 'BAR1_REF')
check(foo1, 'FOO1_NAME', 'FOO1_REF')
check(foo2, 'FOO1_NAME', 'FOO1_REF')
check(bar1, 'BAR1_NAME', 'BAR1_REF')
check(bar2, 'BAR1_NAME', 'BAR1_REF')
self.assertCountEqual(cache.get_missing_ids(foo1 + bar1, name), [])
self.assertCountEqual(cache.get_missing_ids(foo2 + bar2, name), [])
# set values in both environments
cache.set(foo2, name, 'FOO2_NAME')
cache.set(foo2, ref, 'FOO2_REF')
cache.set(bar2, name, 'BAR2_NAME')
cache.set(bar2, ref, 'BAR2_REF')
check(foo1, 'FOO2_NAME', 'FOO2_REF')
check(foo2, 'FOO2_NAME', 'FOO2_REF')
check(bar1, 'BAR2_NAME', 'BAR2_REF')
check(bar2, 'BAR2_NAME', 'BAR2_REF')
self.assertCountEqual(cache.get_missing_ids(foo1 + bar1, name), [])
self.assertCountEqual(cache.get_missing_ids(foo2 + bar2, name), [])
# remove value in one environment
cache.remove(foo1, name)
check(foo1, None, 'FOO2_REF')
check(foo2, None, 'FOO2_REF')
check(bar1, 'BAR2_NAME', 'BAR2_REF')
check(bar2, 'BAR2_NAME', 'BAR2_REF')
self.assertCountEqual(cache.get_missing_ids(foo1 + bar1, name), [1])
self.assertCountEqual(cache.get_missing_ids(foo2 + bar2, name), [1])
# partial invalidation
cache.invalidate([(name, None), (ref, foo1.ids)])
check(foo1, None, None)
check(foo2, None, None)
check(bar1, None, 'BAR2_REF')
check(bar2, None, 'BAR2_REF')
# total invalidation
cache.invalidate()
check(foo1, None, None)
check(foo2, None, None)
check(bar1, None, None)
check(bar2, None, None)
@unittest.skipIf(
not(platform.system() == 'Linux' and platform.machine() == 'x86_64'),
"This test only makes sense on 64-bit Linux-like systems",
)
def test_memory(self):
""" Check memory consumption of the cache. """
NB_RECORDS = 100000
MAX_MEMORY = 100
cache = self.env.cache
model = self.env['res.partner']
records = [model.new() for index in range(NB_RECORDS)]
process = psutil.Process(os.getpid())
rss0 = process.memory_info().rss
char_names = [
'name', 'display_name', 'email', 'website', 'phone', 'mobile',
'street', 'street2', 'city', 'zip', 'vat', 'ref',
]
for name in char_names:
field = model._fields[name]
for record in records:
cache.set(record, field, 'test')
mem_usage = process.memory_info().rss - rss0
self.assertLess(
mem_usage, MAX_MEMORY * 1024 * 1024,
"Caching %s records must take less than %sMB of memory" % (NB_RECORDS, MAX_MEMORY),
)
| 37.138462 | 4,828 |
6,292 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.osv.query import Query
from odoo.tests.common import BaseCase
class QueryTestCase(BaseCase):
def test_basic_query(self):
query = Query(None, 'product_product')
query.add_table('product_template')
query.add_where("product_product.template_id = product_template.id")
# add inner join
alias = query.join("product_template", "categ_id", "product_category", "id", "categ_id")
self.assertEqual(alias, 'product_template__categ_id')
# add left join
alias = query.left_join("product_product", "user_id", "res_user", "id", "user_id")
self.assertEqual(alias, 'product_product__user_id')
from_clause, where_clause, where_params = query.get_sql()
self.assertEqual(from_clause,
'"product_product", "product_template" JOIN "product_category" AS "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" AS "product_product__user_id" ON ("product_product"."user_id" = "product_product__user_id"."id")')
self.assertEqual(where_clause, "product_product.template_id = product_template.id")
def test_query_chained_explicit_joins(self):
query = Query(None, 'product_product')
query.add_table('product_template')
query.add_where("product_product.template_id = product_template.id")
# add inner join
alias = query.join("product_template", "categ_id", "product_category", "id", "categ_id")
self.assertEqual(alias, 'product_template__categ_id')
# add CHAINED left join
alias = query.left_join("product_template__categ_id", "user_id", "res_user", "id", "user_id")
self.assertEqual(alias, 'product_template__categ_id__user_id')
from_clause, where_clause, where_params = query.get_sql()
self.assertEqual(from_clause,
'"product_product", "product_template" JOIN "product_category" AS "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" AS "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id")')
self.assertEqual(where_clause, "product_product.template_id = product_template.id")
def test_mixed_query_chained_explicit_implicit_joins(self):
query = Query(None, 'product_product')
query.add_table('product_template')
query.add_where("product_product.template_id = product_template.id")
# add inner join
alias = query.join("product_template", "categ_id", "product_category", "id", "categ_id")
self.assertEqual(alias, 'product_template__categ_id')
# add CHAINED left join
alias = query.left_join("product_template__categ_id", "user_id", "res_user", "id", "user_id")
self.assertEqual(alias, 'product_template__categ_id__user_id')
# additional implicit join
query.add_table('account.account')
query.add_where("product_category.expense_account_id = account_account.id")
from_clause, where_clause, where_params = query.get_sql()
self.assertEqual(from_clause,
'"product_product", "product_template", "account.account" JOIN "product_category" AS "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" AS "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id")')
self.assertEqual(where_clause, "product_product.template_id = product_template.id AND product_category.expense_account_id = account_account.id")
def test_raise_missing_lhs(self):
query = Query(None, 'product_product')
with self.assertRaises(AssertionError):
query.join("product_template", "categ_id", "product_category", "id", "categ_id")
def test_long_aliases(self):
query = Query(None, 'product_product')
tmp = query.join('product_product', 'product_tmpl_id', 'product_template', 'id', 'product_tmpl_id')
self.assertEqual(tmp, 'product_product__product_tmpl_id')
# no hashing
tmp_cat = query.join(tmp, 'product_category_id', 'product_category', 'id', 'product_category_id')
self.assertEqual(tmp_cat, 'product_product__product_tmpl_id__product_category_id')
# hashing to limit identifier length
tmp_cat_cmp = query.join(tmp_cat, 'company_id', 'res_company', 'id', 'company_id')
self.assertEqual(tmp_cat_cmp, 'product_product__product_tmpl_id__product_category_id__9f0ddff7')
tmp_cat_stm = query.join(tmp_cat, 'salesteam_id', 'res_company', 'id', 'salesteam_id')
self.assertEqual(tmp_cat_stm, 'product_product__product_tmpl_id__product_category_id__953a466f')
# extend hashed identifiers
tmp_cat_cmp_par = query.join(tmp_cat_cmp, 'partner_id', 'res_partner', 'id', 'partner_id')
self.assertEqual(tmp_cat_cmp_par, 'product_product__product_tmpl_id__product_category_id__56d55687')
tmp_cat_stm_par = query.join(tmp_cat_stm, 'partner_id', 'res_partner', 'id', 'partner_id')
self.assertEqual(tmp_cat_stm_par, 'product_product__product_tmpl_id__product_category_id__00363fdd')
def test_table_expression(self):
query = Query(None, 'foo')
from_clause, where_clause, where_params = query.get_sql()
self.assertEqual(from_clause, '"foo"')
query = Query(None, 'bar', 'SELECT id FROM foo')
from_clause, where_clause, where_params = query.get_sql()
self.assertEqual(from_clause, '(SELECT id FROM foo) AS "bar"')
query = Query(None, 'foo')
query.add_table('bar', 'SELECT id FROM foo')
from_clause, where_clause, where_params = query.get_sql()
self.assertEqual(from_clause, '"foo", (SELECT id FROM foo) AS "bar"')
query = Query(None, 'foo')
query.join('foo', 'bar_id', 'SELECT id FROM foo', 'id', 'bar')
from_clause, where_clause, where_params = query.get_sql()
self.assertEqual(from_clause, '"foo" JOIN (SELECT id FROM foo) AS "foo__bar" ON ("foo"."bar_id" = "foo__bar"."id")')
| 62.29703 | 6,292 |
27,407 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from ast import literal_eval
from collections import defaultdict
import functools
import itertools
import logging
import psycopg2
import datetime
from odoo import api, fields, models, Command
from odoo import SUPERUSER_ID, _
from odoo.exceptions import ValidationError, UserError
from odoo.tools import mute_logger
_logger = logging.getLogger('odoo.addons.base.partner.merge')
class MergePartnerLine(models.TransientModel):
_name = 'base.partner.merge.line'
_description = 'Merge Partner Line'
_order = 'min_id asc'
wizard_id = fields.Many2one('base.partner.merge.automatic.wizard', 'Wizard')
min_id = fields.Integer('MinID')
aggr_ids = fields.Char('Ids', required=True)
class MergePartnerAutomatic(models.TransientModel):
"""
The idea behind this wizard is to create a list of potential partners to
merge. We use two objects, the first one is the wizard for the end-user.
And the second will contain the partner list to merge.
"""
_name = 'base.partner.merge.automatic.wizard'
_description = 'Merge Partner Wizard'
@api.model
def default_get(self, fields):
res = super(MergePartnerAutomatic, self).default_get(fields)
active_ids = self.env.context.get('active_ids')
if self.env.context.get('active_model') == 'res.partner' and active_ids:
if 'state' in fields:
res['state'] = 'selection'
if 'partner_ids' in fields:
res['partner_ids'] = [Command.set(active_ids)]
if 'dst_partner_id' in fields:
res['dst_partner_id'] = self._get_ordered_partner(active_ids)[-1].id
return res
# Group by
group_by_email = fields.Boolean('Email')
group_by_name = fields.Boolean('Name')
group_by_is_company = fields.Boolean('Is Company')
group_by_vat = fields.Boolean('VAT')
group_by_parent_id = fields.Boolean('Parent Company')
state = fields.Selection([
('option', 'Option'),
('selection', 'Selection'),
('finished', 'Finished')
], readonly=True, required=True, string='State', default='option')
number_group = fields.Integer('Group of Contacts', readonly=True)
current_line_id = fields.Many2one('base.partner.merge.line', string='Current Line')
line_ids = fields.One2many('base.partner.merge.line', 'wizard_id', string='Lines')
partner_ids = fields.Many2many('res.partner', string='Contacts', context={'active_test': False})
dst_partner_id = fields.Many2one('res.partner', string='Destination Contact')
exclude_contact = fields.Boolean('A user associated to the contact')
exclude_journal_item = fields.Boolean('Journal Items associated to the contact')
maximum_group = fields.Integer('Maximum of Group of Contacts')
# ----------------------------------------
# Update method. Core methods to merge steps
# ----------------------------------------
def _get_fk_on(self, table):
""" return a list of many2one relation with the given table.
:param table : the name of the sql table to return relations
:returns a list of tuple 'table name', 'column name'.
"""
query = """
SELECT cl1.relname as table, att1.attname as column
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND cl2.relname = %s
AND att2.attname = 'id'
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND con.contype = 'f'
"""
self._cr.execute(query, (table,))
return self._cr.fetchall()
@api.model
def _update_foreign_keys(self, src_partners, dst_partner):
""" Update all foreign key from the src_partner to dst_partner. All many2one fields will be updated.
:param src_partners : merge source res.partner recordset (does not include destination one)
:param dst_partner : record of destination res.partner
"""
_logger.debug('_update_foreign_keys for dst_partner: %s for src_partners: %s', dst_partner.id, str(src_partners.ids))
# find the many2one relation to a partner
Partner = self.env['res.partner']
relations = self._get_fk_on('res_partner')
self.flush()
for table, column in relations:
if 'base_partner_merge_' in table: # ignore two tables
continue
# get list of columns of current table (exept the current fk column)
query = "SELECT column_name FROM information_schema.columns WHERE table_name LIKE '%s'" % (table)
self._cr.execute(query, ())
columns = []
for data in self._cr.fetchall():
if data[0] != column:
columns.append(data[0])
# do the update for the current table/column in SQL
query_dic = {
'table': table,
'column': column,
'value': columns[0],
}
if len(columns) <= 1:
# unique key treated
query = """
UPDATE "%(table)s" as ___tu
SET "%(column)s" = %%s
WHERE
"%(column)s" = %%s AND
NOT EXISTS (
SELECT 1
FROM "%(table)s" as ___tw
WHERE
"%(column)s" = %%s AND
___tu.%(value)s = ___tw.%(value)s
)""" % query_dic
for partner in src_partners:
self._cr.execute(query, (dst_partner.id, partner.id, dst_partner.id))
else:
try:
with mute_logger('odoo.sql_db'), self._cr.savepoint():
query = 'UPDATE "%(table)s" SET "%(column)s" = %%s WHERE "%(column)s" IN %%s' % query_dic
self._cr.execute(query, (dst_partner.id, tuple(src_partners.ids),))
# handle the recursivity with parent relation
if column == Partner._parent_name and table == 'res_partner':
query = """
WITH RECURSIVE cycle(id, parent_id) AS (
SELECT id, parent_id FROM res_partner
UNION
SELECT cycle.id, res_partner.parent_id
FROM res_partner, cycle
WHERE res_partner.id = cycle.parent_id AND
cycle.id != cycle.parent_id
)
SELECT id FROM cycle WHERE id = parent_id AND id = %s
"""
self._cr.execute(query, (dst_partner.id,))
# NOTE JEM : shouldn't we fetch the data ?
except psycopg2.Error:
# updating fails, most likely due to a violated unique constraint
# keeping record with nonexistent partner_id is useless, better delete it
query = 'DELETE FROM "%(table)s" WHERE "%(column)s" IN %%s' % query_dic
self._cr.execute(query, (tuple(src_partners.ids),))
self.invalidate_cache()
@api.model
def _update_reference_fields(self, src_partners, dst_partner):
""" Update all reference fields from the src_partner to dst_partner.
:param src_partners : merge source res.partner recordset (does not include destination one)
:param dst_partner : record of destination res.partner
"""
_logger.debug('_update_reference_fields for dst_partner: %s for src_partners: %r', dst_partner.id, src_partners.ids)
def update_records(model, src, field_model='model', field_id='res_id'):
Model = self.env[model] if model in self.env else None
if Model is None:
return
records = Model.sudo().search([(field_model, '=', 'res.partner'), (field_id, '=', src.id)])
try:
with mute_logger('odoo.sql_db'), self._cr.savepoint():
records.sudo().write({field_id: dst_partner.id})
records.flush()
except psycopg2.Error:
# updating fails, most likely due to a violated unique constraint
# keeping record with nonexistent partner_id is useless, better delete it
records.sudo().unlink()
update_records = functools.partial(update_records)
for partner in src_partners:
update_records('calendar', src=partner, field_model='model_id.model')
update_records('ir.attachment', src=partner, field_model='res_model')
update_records('mail.followers', src=partner, field_model='res_model')
update_records('mail.activity', src=partner, field_model='res_model')
update_records('mail.message', src=partner)
update_records('ir.model.data', src=partner)
records = self.env['ir.model.fields'].sudo().search([('ttype', '=', 'reference')])
for record in records:
try:
Model = self.env[record.model]
field = Model._fields[record.name]
except KeyError:
# unknown model or field => skip
continue
if field.compute is not None:
continue
for partner in src_partners:
records_ref = Model.sudo().search([(record.name, '=', 'res.partner,%d' % partner.id)])
values = {
record.name: 'res.partner,%d' % dst_partner.id,
}
records_ref.sudo().write(values)
self.flush()
def _get_summable_fields(self):
""" Returns the list of fields that should be summed when merging partners
"""
return []
@api.model
def _update_values(self, src_partners, dst_partner):
""" Update values of dst_partner with the ones from the src_partners.
:param src_partners : recordset of source res.partner
:param dst_partner : record of destination res.partner
"""
_logger.debug('_update_values for dst_partner: %s for src_partners: %r', dst_partner.id, src_partners.ids)
model_fields = dst_partner.fields_get().keys()
summable_fields = self._get_summable_fields()
def write_serializer(item):
if isinstance(item, models.BaseModel):
return item.id
else:
return item
# get all fields that are not computed or x2many
values = dict()
values_by_company = defaultdict(dict) # {company: vals}
for column in model_fields:
field = dst_partner._fields[column]
if field.type not in ('many2many', 'one2many') and field.compute is None:
for item in itertools.chain(src_partners, [dst_partner]):
if item[column]:
if column in summable_fields and values.get(column):
values[column] += write_serializer(item[column])
else:
values[column] = write_serializer(item[column])
elif field.company_dependent and column in summable_fields:
# sum the values of partners for each company; use sudo() to
# compute the sum on all companies, including forbidden ones
partners = (src_partners + dst_partner).sudo()
for company in self.env['res.company'].sudo().search([]):
values_by_company[company][column] = sum(
partners.with_company(company).mapped(column)
)
# remove fields that can not be updated (id and parent_id)
values.pop('id', None)
parent_id = values.pop('parent_id', None)
dst_partner.write(values)
for company, vals in values_by_company.items():
dst_partner.with_company(company).sudo().write(vals)
# try to update the parent_id
if parent_id and parent_id != dst_partner.id:
try:
dst_partner.write({'parent_id': parent_id})
except ValidationError:
_logger.info('Skip recursive partner hierarchies for parent_id %s of partner: %s', parent_id, dst_partner.id)
def _merge(self, partner_ids, dst_partner=None, extra_checks=True):
""" private implementation of merge partner
:param partner_ids : ids of partner to merge
:param dst_partner : record of destination res.partner
:param extra_checks: pass False to bypass extra sanity check (e.g. email address)
"""
# super-admin can be used to bypass extra checks
if self.env.is_admin():
extra_checks = False
Partner = self.env['res.partner']
partner_ids = Partner.browse(partner_ids).exists()
if len(partner_ids) < 2:
return
if len(partner_ids) > 3:
raise UserError(_("For safety reasons, you cannot merge more than 3 contacts together. You can re-open the wizard several times if needed."))
# check if the list of partners to merge contains child/parent relation
child_ids = self.env['res.partner']
for partner_id in partner_ids:
child_ids |= Partner.search([('id', 'child_of', [partner_id.id])]) - partner_id
if partner_ids & child_ids:
raise UserError(_("You cannot merge a contact with one of his parent."))
if extra_checks and len(set(partner.email for partner in partner_ids)) > 1:
raise UserError(_("All contacts must have the same email. Only the Administrator can merge contacts with different emails."))
# remove dst_partner from partners to merge
if dst_partner and dst_partner in partner_ids:
src_partners = partner_ids - dst_partner
else:
ordered_partners = self._get_ordered_partner(partner_ids.ids)
dst_partner = ordered_partners[-1]
src_partners = ordered_partners[:-1]
_logger.info("dst_partner: %s", dst_partner.id)
# FIXME: is it still required to make and exception for account.move.line since accounting v9.0 ?
if extra_checks and 'account.move.line' in self.env and self.env['account.move.line'].sudo().search([('partner_id', 'in', [partner.id for partner in src_partners])]):
raise UserError(_("Only the destination contact may be linked to existing Journal Items. Please ask the Administrator if you need to merge several contacts linked to existing Journal Items."))
# Make the company of all related users consistent with destination partner company
if dst_partner.company_id:
partner_ids.mapped('user_ids').sudo().write({
'company_ids': [Command.link(dst_partner.company_id.id)],
'company_id': dst_partner.company_id.id
})
# call sub methods to do the merge
self._update_foreign_keys(src_partners, dst_partner)
self._update_reference_fields(src_partners, dst_partner)
self._update_values(src_partners, dst_partner)
self._log_merge_operation(src_partners, dst_partner)
# delete source partner, since they are merged
src_partners.unlink()
def _log_merge_operation(self, src_partners, dst_partner):
_logger.info('(uid = %s) merged the partners %r with %s', self._uid, src_partners.ids, dst_partner.id)
# ----------------------------------------
# Helpers
# ----------------------------------------
@api.model
def _generate_query(self, fields, maximum_group=100):
""" Build the SQL query on res.partner table to group them according to given criteria
:param fields : list of column names to group by the partners
:param maximum_group : limit of the query
"""
# make the list of column to group by in sql query
sql_fields = []
for field in fields:
if field in ['email', 'name']:
sql_fields.append('lower(%s)' % field)
elif field in ['vat']:
sql_fields.append("replace(%s, ' ', '')" % field)
else:
sql_fields.append(field)
group_fields = ', '.join(sql_fields)
# where clause : for given group by columns, only keep the 'not null' record
filters = []
for field in fields:
if field in ['email', 'name', 'vat']:
filters.append((field, 'IS NOT', 'NULL'))
criteria = ' AND '.join('%s %s %s' % (field, operator, value) for field, operator, value in filters)
# build the query
text = [
"SELECT min(id), array_agg(id)",
"FROM res_partner",
]
if criteria:
text.append('WHERE %s' % criteria)
text.extend([
"GROUP BY %s" % group_fields,
"HAVING COUNT(*) >= 2",
"ORDER BY min(id)",
])
if maximum_group:
text.append("LIMIT %s" % maximum_group,)
return ' '.join(text)
@api.model
def _compute_selected_groupby(self):
""" Returns the list of field names the partner can be grouped (as merge
criteria) according to the option checked on the wizard
"""
groups = []
group_by_prefix = 'group_by_'
for field_name in self._fields:
if field_name.startswith(group_by_prefix):
if getattr(self, field_name, False):
groups.append(field_name[len(group_by_prefix):])
if not groups:
raise UserError(_("You have to specify a filter for your selection."))
return groups
@api.model
def _partner_use_in(self, aggr_ids, models):
""" Check if there is no occurence of this group of partner in the selected model
:param aggr_ids : stringified list of partner ids separated with a comma (sql array_agg)
:param models : dict mapping a model name with its foreign key with res_partner table
"""
return any(
self.env[model].search_count([(field, 'in', aggr_ids)])
for model, field in models.items()
)
@api.model
def _get_ordered_partner(self, partner_ids):
""" Helper : returns a `res.partner` recordset ordered by create_date/active fields
:param partner_ids : list of partner ids to sort
"""
return self.env['res.partner'].browse(partner_ids).sorted(
key=lambda p: (not p.active, (p.create_date or datetime.datetime(1970, 1, 1))),
reverse=True,
)
def _compute_models(self):
""" Compute the different models needed by the system if you want to exclude some partners. """
model_mapping = {}
if self.exclude_contact:
model_mapping['res.users'] = 'partner_id'
if 'account.move.line' in self.env and self.exclude_journal_item:
model_mapping['account.move.line'] = 'partner_id'
return model_mapping
# ----------------------------------------
# Actions
# ----------------------------------------
def action_skip(self):
""" Skip this wizard line. Don't compute any thing, and simply redirect to the new step."""
if self.current_line_id:
self.current_line_id.unlink()
return self._action_next_screen()
def _action_next_screen(self):
""" return the action of the next screen ; this means the wizard is set to treat the
next wizard line. Each line is a subset of partner that can be merged together.
If no line left, the end screen will be displayed (but an action is still returned).
"""
self.invalidate_cache() # FIXME: is this still necessary?
values = {}
if self.line_ids:
# in this case, we try to find the next record.
current_line = self.line_ids[0]
current_partner_ids = literal_eval(current_line.aggr_ids)
values.update({
'current_line_id': current_line.id,
'partner_ids': [Command.set(current_partner_ids)],
'dst_partner_id': self._get_ordered_partner(current_partner_ids)[-1].id,
'state': 'selection',
})
else:
values.update({
'current_line_id': False,
'partner_ids': [],
'state': 'finished',
})
self.write(values)
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'res_id': self.id,
'view_mode': 'form',
'target': 'new',
}
def _process_query(self, query):
""" Execute the select request and write the result in this wizard
:param query : the SQL query used to fill the wizard line
"""
self.ensure_one()
model_mapping = self._compute_models()
# group partner query
self._cr.execute(query) # pylint: disable=sql-injection
counter = 0
for min_id, aggr_ids in self._cr.fetchall():
# To ensure that the used partners are accessible by the user
partners = self.env['res.partner'].search([('id', 'in', aggr_ids)])
if len(partners) < 2:
continue
# exclude partner according to options
if model_mapping and self._partner_use_in(partners.ids, model_mapping):
continue
self.env['base.partner.merge.line'].create({
'wizard_id': self.id,
'min_id': min_id,
'aggr_ids': partners.ids,
})
counter += 1
self.write({
'state': 'selection',
'number_group': counter,
})
_logger.info("counter: %s", counter)
def action_start_manual_process(self):
""" Start the process 'Merge with Manual Check'. Fill the wizard according to the group_by and exclude
options, and redirect to the first step (treatment of first wizard line). After, for each subset of
partner to merge, the wizard will be actualized.
- Compute the selected groups (with duplication)
- If the user has selected the 'exclude_xxx' fields, avoid the partners
"""
self.ensure_one()
groups = self._compute_selected_groupby()
query = self._generate_query(groups, self.maximum_group)
self._process_query(query)
return self._action_next_screen()
def action_start_automatic_process(self):
""" Start the process 'Merge Automatically'. This will fill the wizard with the same mechanism as 'Merge
with Manual Check', but instead of refreshing wizard with the current line, it will automatically process
all lines by merging partner grouped according to the checked options.
"""
self.ensure_one()
self.action_start_manual_process() # here we don't redirect to the next screen, since it is automatic process
self.invalidate_cache() # FIXME: is this still necessary?
for line in self.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(partner_ids)
line.unlink()
self._cr.commit() # TODO JEM : explain why
self.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'res_id': self.id,
'view_mode': 'form',
'target': 'new',
}
def parent_migration_process_cb(self):
self.ensure_one()
query = """
SELECT
min(p1.id),
array_agg(DISTINCT p1.id)
FROM
res_partner as p1
INNER join
res_partner as p2
ON
p1.email = p2.email AND
p1.name = p2.name AND
(p1.parent_id = p2.id OR p1.id = p2.parent_id)
WHERE
p2.id IS NOT NULL
GROUP BY
p1.email,
p1.name,
CASE WHEN p1.parent_id = p2.id THEN p2.id
ELSE p1.id
END
HAVING COUNT(*) >= 2
ORDER BY
min(p1.id)
"""
self._process_query(query)
for line in self.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(partner_ids)
line.unlink()
self._cr.commit()
self.write({'state': 'finished'})
self._cr.execute("""
UPDATE
res_partner
SET
is_company = NULL,
parent_id = NULL
WHERE
parent_id = id
""")
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'res_id': self.id,
'view_mode': 'form',
'target': 'new',
}
def action_update_all_process(self):
self.ensure_one()
self.parent_migration_process_cb()
# NOTE JEM : seems louche to create a new wizard instead of reuse the current one with updated options.
# since it is like this from the initial commit of this wizard, I don't change it. yet ...
wizard = self.create({'group_by_vat': True, 'group_by_email': True, 'group_by_name': True})
wizard.action_start_automatic_process()
# NOTE JEM : no idea if this query is usefull
self._cr.execute("""
UPDATE
res_partner
SET
is_company = NULL
WHERE
parent_id IS NOT NULL AND
is_company IS NOT NULL
""")
return self._action_next_screen()
def action_merge(self):
""" Merge Contact button. Merge the selected partners, and redirect to
the end screen (since there is no other wizard line to process.
"""
if not self.partner_ids:
self.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'res_id': self.id,
'view_mode': 'form',
'target': 'new',
}
self._merge(self.partner_ids.ids, self.dst_partner_id)
if self.current_line_id:
self.current_line_id.unlink()
return self._action_next_screen()
| 41.588771 | 27,407 |
2,346 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class BaseModuleUninstall(models.TransientModel):
_name = "base.module.uninstall"
_description = "Module Uninstall"
show_all = fields.Boolean()
module_id = fields.Many2one(
'ir.module.module', string="Module", required=True,
domain=[('state', 'in', ['installed', 'to upgrade', 'to install'])],
ondelete='cascade', readonly=True,
)
module_ids = fields.Many2many('ir.module.module', string="Impacted modules",
compute='_compute_module_ids')
model_ids = fields.Many2many('ir.model', string="Impacted data models",
compute='_compute_model_ids')
def _get_modules(self):
""" Return all the modules impacted by self. """
return self.module_id.downstream_dependencies(self.module_id)
@api.depends('module_id', 'show_all')
def _compute_module_ids(self):
for wizard in self:
modules = wizard._get_modules()
wizard.module_ids = modules if wizard.show_all else modules.filtered('application')
def _get_models(self):
""" Return the models (ir.model) to consider for the impact. """
return self.env['ir.model'].search([('transient', '=', False)])
@api.depends('module_ids')
def _compute_model_ids(self):
ir_models = self._get_models()
ir_models_xids = ir_models._get_external_ids()
for wizard in self:
if wizard.module_id:
module_names = set(wizard._get_modules().mapped('name'))
def lost(model):
xids = ir_models_xids.get(model.id, ())
return xids and all(xid.split('.')[0] in module_names for xid in xids)
# find the models that have all their XIDs in the given modules
self.model_ids = ir_models.filtered(lost).sorted('name')
@api.onchange('module_id')
def _onchange_module_id(self):
# if we select a technical module, show technical modules by default
if not self.module_id.application:
self.show_all = True
def action_uninstall(self):
modules = self.module_id
return modules.button_immediate_uninstall()
| 39.762712 | 2,346 |
2,874 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import logging
import os
from tempfile import TemporaryFile
from psycopg2 import ProgrammingError
from contextlib import closing
from odoo import api, fields, models, tools, sql_db, _
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class BaseLanguageImport(models.TransientModel):
_name = "base.language.import"
_description = "Language Import"
name = fields.Char('Language Name', required=True)
code = fields.Char('ISO Code', size=6, required=True,
help="ISO Language and Country code, e.g. en_US")
data = fields.Binary('File', required=True, attachment=False)
filename = fields.Char('File Name', required=True)
overwrite = fields.Boolean('Overwrite Existing Terms',
default=True,
help="If you enable this option, existing translations (including custom ones) "
"will be overwritten and replaced by those in this file")
def import_lang(self):
this = self[0]
with TemporaryFile('wb+') as buf:
try:
buf.write(base64.decodebytes(this.data))
# now we determine the file format
buf.seek(0)
fileformat = os.path.splitext(this.filename)[-1][1:].lower()
Lang = self.env["res.lang"]
lang = Lang._activate_lang(self.code) or Lang._create_lang(
self.code, lang_name=self.name
)
tools.trans_load_data(
this._cr, buf, fileformat, this.code, overwrite=self.overwrite
)
except ProgrammingError as e:
_logger.exception('Could not import the file due to a format mismatch or it being malformed.')
with closing(sql_db.db_connect(self._cr.dbname).cursor()) as cr:
raise UserError(_('File %r not imported due to a malformed file.\n\n'
'This issue can be caused by duplicates entries who are referring to the same field. '
'Please check the content of the file you are trying to import.\n\n'
'Technical Details:\n%s') % (self.filename, tools.ustr(e)))
except Exception as e:
_logger.warning('Could not import the file due to a format mismatch or it being malformed.')
raise UserError(
_('File %r not imported due to format mismatch or a malformed file.'
' (Valid formats are .csv, .po, .pot)\n\nTechnical Details:\n%s') % \
(this.filename, tools.ustr(e))
)
return True
| 44.90625 | 2,874 |
1,260 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import tarfile
import tempfile
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
class BaseUpdateTranslations(models.TransientModel):
_name = 'base.update.translations'
_description = 'Update Translations'
@api.model
def _get_languages(self):
return self.env['res.lang'].get_installed()
lang = fields.Selection(_get_languages, 'Language', required=True)
@api.model
def _get_lang_name(self, lang_code):
lang = self.env['res.lang']._lang_get(lang_code)
if not lang:
raise UserError(_('No language with code "%s" exists', lang_code))
return lang.name
def act_update(self):
with tempfile.NamedTemporaryFile() as buf:
tools.trans_export(self.lang, ['all'], buf, 'tgz', self._cr)
buf.seek(0)
tar = tarfile.open(fileobj=buf)
for file_info in tar:
module_file = tar.extractfile(file_info)
tools.trans_load_data(self._cr, module_file, 'po', self.lang, create_empty_translation=True)
tar.close()
return {'type': 'ir.actions.act_window_close'}
| 33.157895 | 1,260 |
3,391 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class BaseModuleUpgrade(models.TransientModel):
_name = "base.module.upgrade"
_description = "Upgrade Module"
@api.model
@api.returns('ir.module.module')
def get_module_list(self):
states = ['to upgrade', 'to remove', 'to install']
return self.env['ir.module.module'].search([('state', 'in', states)])
@api.model
def _default_module_info(self):
return "\n".join("%s: %s" % (mod.name, mod.state) for mod in self.get_module_list())
module_info = fields.Text('Apps to Update', readonly=True, default=_default_module_info)
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(BaseModuleUpgrade, self).fields_view_get(view_id, view_type, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
if not(self._context.get('active_model') and self._context.get('active_id')):
return res
if not self.get_module_list():
res['arch'] = '''<form string="Upgrade Completed">
<separator string="Upgrade Completed" colspan="4"/>
<footer>
<button name="config" string="Start Configuration" type="object" class="btn-primary" data-hotkey="q"/>
<button special="cancel" data-hotkey="z" string="Close" class="btn-secondary"/>
</footer>
</form>'''
return res
def upgrade_module_cancel(self):
Module = self.env['ir.module.module']
to_install = Module.search([('state', 'in', ['to upgrade', 'to remove'])])
to_install.write({'state': 'installed'})
to_uninstall = Module.search([('state', '=', 'to install')])
to_uninstall.write({'state': 'uninstalled'})
return {'type': 'ir.actions.act_window_close'}
def upgrade_module(self):
Module = self.env['ir.module.module']
# install/upgrade: double-check preconditions
mods = Module.search([('state', 'in', ['to upgrade', 'to install'])])
if mods:
query = """ SELECT d.name
FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s) """
self._cr.execute(query, (tuple(mods.ids), ('uninstalled',)))
unmet_packages = [row[0] for row in self._cr.fetchall()]
if unmet_packages:
raise UserError(_('The following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
mods.download()
# terminate transaction before re-creating cursor below
self._cr.commit()
odoo.modules.registry.Registry.new(self._cr.dbname, update_module=True)
self._cr.reset()
return {'type': 'ir.actions.act_window_close'}
def config(self):
# pylint: disable=next-method-called
return self.env['res.config'].next()
| 42.3875 | 3,391 |
957 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
from odoo import api, fields, models
class BaseModuleUpdate(models.TransientModel):
_name = "base.module.update"
_description = "Update Module"
updated = fields.Integer('Number of modules updated', readonly=True)
added = fields.Integer('Number of modules added', readonly=True)
state = fields.Selection([('init', 'init'), ('done', 'done')], 'Status', readonly=True, default='init')
def update_module(self):
for this in self:
updated, added = self.env['ir.module.module'].update_list()
this.write({'updated': updated, 'added': added, 'state': 'done'})
return False
def action_module_open(self):
res = {
'domain': str([]),
'name': 'Modules',
'view_mode': 'tree,form',
'res_model': 'ir.module.module',
'view_id': False,
'type': 'ir.actions.act_window',
}
return res
| 33 | 957 |
2,259 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import contextlib
import io
from odoo import api, fields, models, tools, _
NEW_LANG_KEY = '__new__'
class BaseLanguageExport(models.TransientModel):
_name = "base.language.export"
_description = 'Language Export'
@api.model
def _get_languages(self):
langs = self.env['res.lang'].get_installed()
return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + \
langs
name = fields.Char('File Name', readonly=True)
lang = fields.Selection(_get_languages, string='Language', required=True, default=NEW_LANG_KEY)
format = fields.Selection([('csv','CSV File'), ('po','PO File'), ('tgz', 'TGZ Archive')],
string='File Format', required=True, default='po')
modules = fields.Many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id',
string='Apps To Export', domain=[('state','=','installed')])
data = fields.Binary('File', readonly=True, attachment=False)
state = fields.Selection([('choose', 'choose'), ('get', 'get')], # choose language or get the file
default='choose')
def act_getfile(self):
this = self[0]
lang = this.lang if this.lang != NEW_LANG_KEY else False
mods = sorted(this.mapped('modules.name')) or ['all']
with contextlib.closing(io.BytesIO()) as buf:
tools.trans_export(lang, mods, buf, this.format, self._cr)
out = base64.encodebytes(buf.getvalue())
filename = 'new'
if lang:
filename = tools.get_iso_codes(lang)
elif len(mods) == 1:
filename = mods[0]
extension = this.format
if not lang and extension == 'po':
extension = 'pot'
name = "%s.%s" % (filename, extension)
this.write({'state': 'get', 'data': out, 'name': name})
return {
'type': 'ir.actions.act_window',
'res_model': 'base.language.export',
'view_mode': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
| 38.948276 | 2,259 |
2,305 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class BaseLanguageInstall(models.TransientModel):
_name = "base.language.install"
_description = "Install Language"
@api.model
def _default_language(self):
""" Display the selected language when using the 'Update Terms' action
from the language list view
"""
if self._context.get('active_model') == 'res.lang':
lang = self.env['res.lang'].browse(self._context.get('active_id'))
return lang.code
return False
@api.model
def _get_languages(self):
return [[code, name] for code, _, name, *_ in self.env['res.lang'].get_available()]
lang = fields.Selection(_get_languages, string='Language', required=True,
default=_default_language)
overwrite = fields.Boolean('Overwrite Existing Terms',
default=True,
help="If you check this box, your customized translations will be overwritten and replaced by the official ones.")
state = fields.Selection([('init', 'init'), ('done', 'done')],
string='Status', readonly=True, default='init')
def lang_install(self):
self.ensure_one()
mods = self.env['ir.module.module'].search([('state', '=', 'installed')])
self.env['res.lang']._activate_lang(self.lang)
mods._update_translations(self.lang, self.overwrite)
self.state = 'done'
self.env.cr.execute('ANALYZE ir_translation')
return {
'name': _('Language Pack'),
'view_mode': 'form',
'view_id': False,
'res_model': 'base.language.install',
'domain': [],
'context': dict(self._context, active_ids=self.ids),
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': self.id,
}
def reload(self):
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def switch_lang(self):
self.env.user.lang = self.lang
return {
'type': 'ir.actions.client',
'tag': 'reload_context',
}
| 36.015625 | 2,305 |
7,842 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError
class IrDefault(models.Model):
""" User-defined default values for fields. """
_name = 'ir.default'
_description = 'Default Values'
_rec_name = 'field_id'
field_id = fields.Many2one('ir.model.fields', string="Field", required=True,
ondelete='cascade', index=True)
user_id = fields.Many2one('res.users', string='User', ondelete='cascade', index=True,
help="If set, action binding only applies for this user.")
company_id = fields.Many2one('res.company', string='Company', ondelete='cascade', index=True,
help="If set, action binding only applies for this company")
condition = fields.Char('Condition', help="If set, applies the default upon condition.")
json_value = fields.Char('Default Value (JSON format)', required=True)
@api.constrains('json_value')
def _check_json_format(self):
for record in self:
try:
json.loads(record.json_value)
except json.JSONDecodeError:
raise ValidationError(_('Invalid JSON format in Default Value field.'))
@api.model_create_multi
def create(self, vals_list):
self.clear_caches()
return super(IrDefault, self).create(vals_list)
def write(self, vals):
if self:
self.clear_caches()
return super(IrDefault, self).write(vals)
def unlink(self):
if self:
self.clear_caches()
return super(IrDefault, self).unlink()
@api.model
def set(self, model_name, field_name, value, user_id=False, company_id=False, condition=False):
""" Defines a default value for the given field. Any entry for the same
scope (field, user, company) will be replaced. The value is encoded
in JSON to be stored to the database.
:param user_id: may be ``False`` for all users, ``True`` for the
current user, or any user id
:param company_id: may be ``False`` for all companies, ``True`` for
the current user's company, or any company id
:param condition: optional condition that restricts the
applicability of the default value; this is an
opaque string, but the client typically uses
single-field conditions in the form ``'key=val'``.
"""
if user_id is True:
user_id = self.env.uid
if company_id is True:
company_id = self.env.company.id
# check consistency of model_name, field_name, and value
try:
model = self.env[model_name]
field = model._fields[field_name]
field.convert_to_cache(value, model)
json_value = json.dumps(value, ensure_ascii=False)
except KeyError:
raise ValidationError(_("Invalid field %s.%s") % (model_name, field_name))
except Exception:
raise ValidationError(_("Invalid value for %s.%s: %s") % (model_name, field_name, value))
# update existing default for the same scope, or create one
field = self.env['ir.model.fields']._get(model_name, field_name)
default = self.search([
('field_id', '=', field.id),
('user_id', '=', user_id),
('company_id', '=', company_id),
('condition', '=', condition),
])
if default:
default.write({'json_value': json_value})
else:
self.create({
'field_id': field.id,
'user_id': user_id,
'company_id': company_id,
'condition': condition,
'json_value': json_value,
})
return True
@api.model
def get(self, model_name, field_name, user_id=False, company_id=False, condition=False):
""" Return the default value for the given field, user and company, or
``None`` if no default is available.
:param user_id: may be ``False`` for all users, ``True`` for the
current user, or any user id
:param company_id: may be ``False`` for all companies, ``True`` for
the current user's company, or any company id
:param condition: optional condition that restricts the
applicability of the default value; this is an
opaque string, but the client typically uses
single-field conditions in the form ``'key=val'``.
"""
if user_id is True:
user_id = self.env.uid
if company_id is True:
company_id = self.env.company.id
field = self.env['ir.model.fields']._get(model_name, field_name)
default = self.search([
('field_id', '=', field.id),
('user_id', '=', user_id),
('company_id', '=', company_id),
('condition', '=', condition),
], limit=1)
return json.loads(default.json_value) if default else None
@api.model
@tools.ormcache('self.env.uid', 'self.env.company.id', 'model_name', 'condition')
# Note about ormcache invalidation: it is not needed when deleting a field,
# a user, or a company, as the corresponding defaults will no longer be
# requested. It must only be done when a user's company is modified.
def get_model_defaults(self, model_name, condition=False):
""" Return the available default values for the given model (for the
current user), as a dict mapping field names to values.
"""
cr = self.env.cr
query = """ SELECT f.name, d.json_value
FROM ir_default d
JOIN ir_model_fields f ON d.field_id=f.id
WHERE f.model=%s
AND (d.user_id IS NULL OR d.user_id=%s)
AND (d.company_id IS NULL OR d.company_id=%s)
AND {}
ORDER BY d.user_id, d.company_id, d.id
"""
# self.env.company is empty when there is no user (controllers with auth=None)
params = [model_name, self.env.uid, self.env.company.id or None]
if condition:
query = query.format("d.condition=%s")
params.append(condition)
else:
query = query.format("d.condition IS NULL")
cr.execute(query, params)
result = {}
for row in cr.fetchall():
# keep the highest priority default for each field
if row[0] not in result:
result[row[0]] = json.loads(row[1])
return result
@api.model
def discard_records(self, records):
""" Discard all the defaults of many2one fields using any of the given
records.
"""
json_vals = [json.dumps(id) for id in records.ids]
domain = [('field_id.ttype', '=', 'many2one'),
('field_id.relation', '=', records._name),
('json_value', 'in', json_vals)]
return self.search(domain).unlink()
@api.model
def discard_values(self, model_name, field_name, values):
""" Discard all the defaults for any of the given values. """
field = self.env['ir.model.fields']._get(model_name, field_name)
json_vals = [json.dumps(value, ensure_ascii=False) for value in values]
domain = [('field_id', '=', field.id), ('json_value', 'in', json_vals)]
return self.search(domain).unlink()
| 44.05618 | 7,842 |
18,313 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.osv.expression import TERM_OPERATORS_NEGATION
from odoo.tools import ormcache
TYPE2FIELD = {
'char': 'value_text',
'float': 'value_float',
'boolean': 'value_integer',
'integer': 'value_integer',
'text': 'value_text',
'binary': 'value_binary',
'many2one': 'value_reference',
'date': 'value_datetime',
'datetime': 'value_datetime',
'selection': 'value_text',
}
TYPE2CLEAN = {
'boolean': bool,
'integer': lambda val: val or False,
'float': lambda val: val or False,
'char': lambda val: val or False,
'text': lambda val: val or False,
'selection': lambda val: val or False,
'binary': lambda val: val or False,
'date': lambda val: val.date() if val else False,
'datetime': lambda val: val or False,
}
class Property(models.Model):
_name = 'ir.property'
_description = 'Company Property'
name = fields.Char(index=True)
res_id = fields.Char(string='Resource', index=True, help="If not set, acts as a default value for new resources",)
company_id = fields.Many2one('res.company', string='Company', index=True)
fields_id = fields.Many2one('ir.model.fields', string='Field', ondelete='cascade', required=True)
value_float = fields.Float()
value_integer = fields.Integer()
value_text = fields.Text() # will contain (char, text)
value_binary = fields.Binary(attachment=False)
value_reference = fields.Char()
value_datetime = fields.Datetime()
type = fields.Selection([('char', 'Char'),
('float', 'Float'),
('boolean', 'Boolean'),
('integer', 'Integer'),
('text', 'Text'),
('binary', 'Binary'),
('many2one', 'Many2One'),
('date', 'Date'),
('datetime', 'DateTime'),
('selection', 'Selection'),
],
required=True,
default='many2one',
index=True)
def init(self):
# Ensure there is at most one active variant for each combination.
query = """
CREATE UNIQUE INDEX IF NOT EXISTS ir_property_unique_index
ON %s (fields_id, COALESCE(company_id, 0), COALESCE(res_id, ''))
"""
self.env.cr.execute(query % self._table)
def _update_values(self, values):
if 'value' not in values:
return values
value = values.pop('value')
prop = None
type_ = values.get('type')
if not type_:
if self:
prop = self[0]
type_ = prop.type
else:
type_ = self._fields['type'].default(self)
field = TYPE2FIELD.get(type_)
if not field:
raise UserError(_('Invalid type'))
if field == 'value_reference':
if not value:
value = False
elif isinstance(value, models.BaseModel):
value = '%s,%d' % (value._name, value.id)
elif isinstance(value, int):
field_id = values.get('fields_id')
if not field_id:
if not prop:
raise ValueError()
field_id = prop.fields_id
else:
field_id = self.env['ir.model.fields'].browse(field_id)
value = '%s,%d' % (field_id.sudo().relation, value)
values[field] = value
return values
def write(self, values):
# if any of the records we're writing on has a res_id=False *or*
# we're writing a res_id=False on any record
default_set = False
if self._ids:
self.env.cr.execute(
'SELECT EXISTS (SELECT 1 FROM ir_property WHERE id in %s AND res_id IS NULL)', [self._ids])
default_set = self.env.cr.rowcount == 1 or any(
v.get('res_id') is False
for v in values
)
r = super(Property, self).write(self._update_values(values))
if default_set:
# DLE P44: test `test_27_company_dependent`
# Easy solution, need to flush write when changing a property.
# Maybe it would be better to be able to compute all impacted cache value and update those instead
# Then clear_caches must be removed as well.
self.flush()
self.clear_caches()
return r
@api.model_create_multi
def create(self, vals_list):
vals_list = [self._update_values(vals) for vals in vals_list]
created_default = any(not v.get('res_id') for v in vals_list)
r = super(Property, self).create(vals_list)
if created_default:
# DLE P44: test `test_27_company_dependent`
self.flush()
self.clear_caches()
return r
def unlink(self):
default_deleted = False
if self._ids:
self.env.cr.execute(
'SELECT EXISTS (SELECT 1 FROM ir_property WHERE id in %s)',
[self._ids]
)
default_deleted = self.env.cr.rowcount == 1
r = super().unlink()
if default_deleted:
self.clear_caches()
return r
def get_by_record(self):
self.ensure_one()
if self.type in ('char', 'text', 'selection'):
return self.value_text
elif self.type == 'float':
return self.value_float
elif self.type == 'boolean':
return bool(self.value_integer)
elif self.type == 'integer':
return self.value_integer
elif self.type == 'binary':
return self.value_binary
elif self.type == 'many2one':
if not self.value_reference:
return False
model, resource_id = self.value_reference.split(',')
return self.env[model].browse(int(resource_id)).exists()
elif self.type == 'datetime':
return self.value_datetime
elif self.type == 'date':
if not self.value_datetime:
return False
return fields.Date.to_string(fields.Datetime.from_string(self.value_datetime))
return False
@api.model
def _set_default(self, name, model, value, company=False):
""" Set the given field's generic value for the given company.
:param name: the field's name
:param model: the field's model name
:param value: the field's value
:param company: the company (record or id)
"""
field_id = self.env['ir.model.fields']._get(model, name).id
company_id = int(company) if company else False
prop = self.sudo().search([
('fields_id', '=', field_id),
('company_id', '=', company_id),
('res_id', '=', False),
])
if prop:
prop.write({'value': value})
else:
prop.create({
'fields_id': field_id,
'company_id': company_id,
'res_id': False,
'name': name,
'value': value,
'type': self.env[model]._fields[name].type,
})
@api.model
def _get(self, name, model, res_id=False):
""" Get the given field's generic value for the record.
:param name: the field's name
:param model: the field's model name
:param res_id: optional resource, format: "<id>" (int) or
"<model>,<id>" (str)
"""
if not res_id:
t, v = self._get_default_property(name, model)
if not v or t != 'many2one':
return v
return self.env[v[0]].browse(v[1])
p = self._get_property(name, model, res_id=res_id)
if p:
return p.get_by_record()
return False
# only cache Property._get(res_id=False) as that's
# sub-optimally.
COMPANY_KEY = "self.env.company.id"
@ormcache(COMPANY_KEY, 'name', 'model')
def _get_default_property(self, name, model):
prop = self._get_property(name, model, res_id=False)
if not prop:
return None, False
v = prop.get_by_record()
if prop.type != 'many2one':
return prop.type, v
return 'many2one', v and (v._name, v.id)
def _get_property(self, name, model, res_id):
domain = self._get_domain(name, model)
if domain is not None:
if res_id and isinstance(res_id, int):
res_id = "%s,%s" % (model, res_id)
domain = [('res_id', '=', res_id)] + domain
#make the search with company_id asc to make sure that properties specific to a company are given first
return self.sudo().search(domain, limit=1, order='company_id')
return self.sudo().browse(())
def _get_domain(self, prop_name, model):
field_id = self.env['ir.model.fields']._get(model, prop_name).id
if not field_id:
return None
company_id = self.env.company.id
return [('fields_id', '=', field_id), ('company_id', 'in', [company_id, False])]
@api.model
def _get_multi(self, name, model, ids):
""" Read the property field `name` for the records of model `model` with
the given `ids`, and return a dictionary mapping `ids` to their
corresponding value.
"""
if not ids:
return {}
field = self.env[model]._fields[name]
field_id = self.env['ir.model.fields']._get(model, name).id
company_id = self.env.company.id
if field.type == 'many2one':
comodel = self.env[field.comodel_name]
model_pos = len(model) + 2
value_pos = len(comodel._name) + 2
# retrieve values: both p.res_id and p.value_reference are formatted
# as "<rec._name>,<rec.id>"; the purpose of the LEFT JOIN is to
# return the value id if it exists, NULL otherwise
query = """
SELECT substr(p.res_id, %s)::integer, r.id
FROM ir_property p
LEFT JOIN {} r ON substr(p.value_reference, %s)::integer=r.id
WHERE p.fields_id=%s
AND (p.company_id=%s OR p.company_id IS NULL)
AND (p.res_id IN %s OR p.res_id IS NULL)
ORDER BY p.company_id NULLS FIRST
""".format(comodel._table)
params = [model_pos, value_pos, field_id, company_id]
clean = comodel.browse
elif field.type in TYPE2FIELD:
model_pos = len(model) + 2
# retrieve values: p.res_id is formatted as "<rec._name>,<rec.id>"
query = """
SELECT substr(p.res_id, %s)::integer, p.{}
FROM ir_property p
WHERE p.fields_id=%s
AND (p.company_id=%s OR p.company_id IS NULL)
AND (p.res_id IN %s OR p.res_id IS NULL)
ORDER BY p.company_id NULLS FIRST
""".format(TYPE2FIELD[field.type])
params = [model_pos, field_id, company_id]
clean = TYPE2CLEAN[field.type]
else:
return dict.fromkeys(ids, False)
# retrieve values
cr = self.env.cr
result = {}
refs = {"%s,%s" % (model, id) for id in ids}
for sub_refs in cr.split_for_in_conditions(refs):
cr.execute(query, params + [sub_refs])
result.update(cr.fetchall())
# determine all values and format them
default = result.get(None, None)
return {
id: clean(result.get(id, default))
for id in ids
}
@api.model
def _set_multi(self, name, model, values, default_value=None):
""" Assign the property field `name` for the records of model `model`
with `values` (dictionary mapping record ids to their value).
If the value for a given record is the same as the default
value, the property entry will not be stored, to avoid bloating
the database.
If `default_value` is provided, that value will be used instead
of the computed default value, to determine whether the value
for a record should be stored or not.
"""
def clean(value):
return value.id if isinstance(value, models.BaseModel) else value
if not values:
return
if default_value is None:
domain = self._get_domain(name, model)
if domain is None:
raise Exception()
# retrieve the default value for the field
default_value = clean(self._get(name, model))
# retrieve the properties corresponding to the given record ids
field_id = self.env['ir.model.fields']._get(model, name).id
company_id = self.env.company.id
refs = {('%s,%s' % (model, id)): id for id in values}
props = self.sudo().search([
('fields_id', '=', field_id),
('company_id', '=', company_id),
('res_id', 'in', list(refs)),
])
# modify existing properties
for prop in props:
id = refs.pop(prop.res_id)
value = clean(values[id])
if value == default_value:
# avoid prop.unlink(), as it clears the record cache that can
# contain the value of other properties to set on record!
self._cr.execute("DELETE FROM ir_property WHERE id=%s", [prop.id])
elif value != clean(prop.get_by_record()):
prop.write({'value': value})
# create new properties for records that do not have one yet
vals_list = []
for ref, id in refs.items():
value = clean(values[id])
if value != default_value:
vals_list.append({
'fields_id': field_id,
'company_id': company_id,
'res_id': ref,
'name': name,
'value': value,
'type': self.env[model]._fields[name].type,
})
self.sudo().create(vals_list)
@api.model
def search_multi(self, name, model, operator, value):
""" Return a domain for the records that match the given condition. """
default_matches = False
negate = False
# For "is set" and "is not set", same logic for all types
if operator == 'in' and False in value:
operator = 'not in'
negate = True
elif operator == 'not in' and False not in value:
operator = 'in'
negate = True
elif operator in ('!=', 'not like', 'not ilike') and value:
operator = TERM_OPERATORS_NEGATION[operator]
negate = True
elif operator == '=' and not value:
operator = '!='
negate = True
field = self.env[model]._fields[name]
if field.type == 'many2one':
def makeref(value):
return value and f'{field.comodel_name},{value}'
if operator in ('=', '!=', '<=', '<', '>', '>='):
value = makeref(value)
elif operator in ('in', 'not in'):
value = [makeref(v) for v in value]
elif operator in ('=like', '=ilike', 'like', 'not like', 'ilike', 'not ilike'):
# most probably inefficient... but correct
target = self.env[field.comodel_name]
target_names = target.name_search(value, operator=operator, limit=None)
target_ids = [n[0] for n in target_names]
operator, value = 'in', [makeref(v) for v in target_ids]
elif field.type in ('integer', 'float'):
# No record is created in ir.property if the field's type is float or integer with a value
# equal to 0. Then to match with the records that are linked to a property field equal to 0,
# the negation of the operator must be taken to compute the goods and the domain returned
# to match the searched records is just the opposite.
value = float(value) if field.type == 'float' else int(value)
if operator == '>=' and value <= 0:
operator = '<'
negate = True
elif operator == '>' and value < 0:
operator = '<='
negate = True
elif operator == '<=' and value >= 0:
operator = '>'
negate = True
elif operator == '<' and value > 0:
operator = '>='
negate = True
elif field.type == 'boolean':
# the value must be mapped to an integer value
value = int(value)
# retrieve the properties that match the condition
domain = self._get_domain(name, model)
if domain is None:
raise Exception()
props = self.search(domain + [(TYPE2FIELD[field.type], operator, value)])
# retrieve the records corresponding to the properties that match
good_ids = []
for prop in props:
if prop.res_id:
__, res_id = prop.res_id.split(',')
good_ids.append(int(res_id))
else:
default_matches = True
if default_matches:
# exclude all records with a property that does not match
props = self.search(domain + [('res_id', '!=', False)])
all_ids = {int(res_id.split(',')[1]) for res_id in props.mapped('res_id')}
bad_ids = list(all_ids - set(good_ids))
if negate:
return [('id', 'in', bad_ids)]
else:
return [('id', 'not in', bad_ids)]
elif negate:
return [('id', 'not in', good_ids)]
else:
return [('id', 'in', good_ids)]
| 39.130342 | 18,313 |
15,754 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
import json
import locale
import logging
import re
from operator import itemgetter
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError, ValidationError
_logger = logging.getLogger(__name__)
DEFAULT_DATE_FORMAT = '%m/%d/%Y'
DEFAULT_TIME_FORMAT = '%H:%M:%S'
class Lang(models.Model):
_name = "res.lang"
_description = "Languages"
_order = "active desc,name"
_disallowed_datetime_patterns = list(tools.DATETIME_FORMATS_MAP)
_disallowed_datetime_patterns.remove('%y') # this one is in fact allowed, just not good practice
name = fields.Char(required=True)
code = fields.Char(string='Locale Code', required=True, help='This field is used to set/get locales for user')
iso_code = fields.Char(string='ISO code', help='This ISO code is the name of po files to use for translations')
url_code = fields.Char('URL Code', required=True, help='The Lang Code displayed in the URL')
active = fields.Boolean()
direction = fields.Selection([('ltr', 'Left-to-Right'), ('rtl', 'Right-to-Left')], required=True, default='ltr')
date_format = fields.Char(string='Date Format', required=True, default=DEFAULT_DATE_FORMAT)
time_format = fields.Char(string='Time Format', required=True, default=DEFAULT_TIME_FORMAT)
week_start = fields.Selection([('1', 'Monday'),
('2', 'Tuesday'),
('3', 'Wednesday'),
('4', 'Thursday'),
('5', 'Friday'),
('6', 'Saturday'),
('7', 'Sunday')], string='First Day of Week', required=True, default='7')
grouping = fields.Char(string='Separator Format', required=True, default='[]',
help="The Separator Format should be like [,n] where 0 < n :starting from Unit digit. "
"-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500; "
"[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. "
"Provided ',' as the thousand separator in each case.")
decimal_point = fields.Char(string='Decimal Separator', required=True, default='.', trim=False)
thousands_sep = fields.Char(string='Thousands Separator', default=',', trim=False)
@api.depends('code', 'flag_image')
def _compute_field_flag_image_url(self):
for lang in self:
if lang.flag_image:
lang.flag_image_url = f"/web/image/res.lang/{lang.id}/flag_image"
else:
lang.flag_image_url = f"/base/static/img/country_flags/{lang.code.lower().rsplit('_')[-1]}.png"
flag_image = fields.Image("Image")
flag_image_url = fields.Char(compute=_compute_field_flag_image_url)
_sql_constraints = [
('name_uniq', 'unique(name)', 'The name of the language must be unique !'),
('code_uniq', 'unique(code)', 'The code of the language must be unique !'),
('url_code_uniq', 'unique(url_code)', 'The URL code of the language must be unique !'),
]
@api.constrains('active')
def _check_active(self):
# do not check during installation
if self.env.registry.ready and not self.search_count([]):
raise ValidationError(_('At least one language must be active.'))
@api.constrains('time_format', 'date_format')
def _check_format(self):
for lang in self:
for pattern in lang._disallowed_datetime_patterns:
if (lang.time_format and pattern in lang.time_format) or \
(lang.date_format and pattern in lang.date_format):
raise ValidationError(_('Invalid date/time format directive specified. '
'Please refer to the list of allowed directives, '
'displayed when you edit a language.'))
@api.constrains('grouping')
def _check_grouping(self):
warning = _('The Separator Format should be like [,n] where 0 < n :starting from Unit digit. '
'-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500;'
'[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. '
'Provided as the thousand separator in each case.')
for lang in self:
try:
if any(not isinstance(x, int) for x in json.loads(lang.grouping)):
raise ValidationError(warning)
except Exception:
raise ValidationError(warning)
def _register_hook(self):
# check that there is at least one active language
if not self.search_count([]):
_logger.error("No language is active.")
# TODO remove me after v14
def load_lang(self, lang, lang_name=None):
_logger.warning("Call to deprecated method load_lang, use _create_lang or _activate_lang instead")
language = self._activate_lang(lang) or self._create_lang(lang, lang_name)
return language.id
def _activate_lang(self, code):
""" Activate languages
:param code: code of the language to activate
:return: the language matching 'code' activated
"""
lang = self.with_context(active_test=False).search([('code', '=', code)])
if lang and not lang.active:
lang.active = True
return lang
def _create_lang(self, lang, lang_name=None):
""" Create the given language and make it active. """
# create the language with locale information
fail = True
iso_lang = tools.get_iso_codes(lang)
for ln in tools.get_locales(lang):
try:
locale.setlocale(locale.LC_ALL, str(ln))
fail = False
break
except locale.Error:
continue
if fail:
lc = locale.getdefaultlocale()[0]
msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.'
_logger.warning(msg, lang, lc)
if not lang_name:
lang_name = lang
def fix_xa0(s):
"""Fix badly-encoded non-breaking space Unicode character from locale.localeconv(),
coercing to utf-8, as some platform seem to output localeconv() in their system
encoding, e.g. Windows-1252"""
if s == '\xa0':
return '\xc2\xa0'
return s
def fix_datetime_format(format):
"""Python's strftime supports only the format directives
that are available on the platform's libc, so in order to
be 100% cross-platform we map to the directives required by
the C standard (1989 version), always available on platforms
with a C standard implementation."""
# For some locales, nl_langinfo returns a D_FMT/T_FMT that contains
# unsupported '%-' patterns, e.g. for cs_CZ
format = format.replace('%-', '%')
for pattern, replacement in tools.DATETIME_FORMATS_MAP.items():
format = format.replace(pattern, replacement)
return str(format)
conv = locale.localeconv()
lang_info = {
'code': lang,
'iso_code': iso_lang,
'name': lang_name,
'active': True,
'date_format' : fix_datetime_format(locale.nl_langinfo(locale.D_FMT)),
'time_format' : fix_datetime_format(locale.nl_langinfo(locale.T_FMT)),
'decimal_point' : fix_xa0(str(conv['decimal_point'])),
'thousands_sep' : fix_xa0(str(conv['thousands_sep'])),
'grouping' : str(conv.get('grouping', [])),
}
try:
return self.create(lang_info)
finally:
tools.resetlocale()
@api.model
def install_lang(self):
"""
This method is called from odoo/addons/base/data/res_lang_data.xml to load
some language and set it as the default for every partners. The
language is set via tools.config by the '_initialize_db' method on the
'db' object. This is a fragile solution and something else should be
found.
"""
# config['load_language'] is a comma-separated list or None
lang_code = (tools.config.get('load_language') or 'en_US').split(',')[0]
lang = self._activate_lang(lang_code) or self._create_lang(lang_code)
IrDefault = self.env['ir.default']
default_value = IrDefault.get('res.partner', 'lang')
if default_value is None:
IrDefault.set('res.partner', 'lang', lang_code)
# set language of main company, created directly by db bootstrap SQL
partner = self.env.company.partner_id
if not partner.lang:
partner.write({'lang': lang_code})
return True
@tools.ormcache('code')
def _lang_get_id(self, code):
return self.with_context(active_test=True).search([('code', '=', code)]).id
@tools.ormcache('url_code')
def _lang_get_code(self, url_code):
return self.with_context(active_test=True).search([('url_code', '=', url_code)]).code or url_code
def _lang_get(self, code):
""" Return the language using this code if it is active """
return self.browse(self._lang_get_id(code))
@tools.ormcache('self.code', 'monetary')
def _data_get(self, monetary=False):
thousands_sep = self.thousands_sep or ''
decimal_point = self.decimal_point
grouping = self.grouping
return grouping, thousands_sep, decimal_point
@api.model
@tools.ormcache()
def get_available(self):
""" Return the available languages as a list of (code, url_code, name,
active) sorted by name.
"""
langs = self.with_context(active_test=False).search([])
return langs.get_sorted()
def get_sorted(self):
return sorted([(lang.code, lang.url_code, lang.name, lang.active, lang.flag_image_url) for lang in self], key=itemgetter(2))
@tools.ormcache('self.id')
def _get_cached_values(self):
self.ensure_one()
return {
'id': self.id,
'code': self.code,
'url_code': self.url_code,
'name': self.name,
}
def _get_cached(self, field):
return self._get_cached_values()[field]
@api.model
@tools.ormcache('code')
def _lang_code_to_urlcode(self, code):
for c, urlc, name, *_ in self.get_available():
if c == code:
return urlc
return self._lang_get(code).url_code
@api.model
@tools.ormcache()
def get_installed(self):
""" Return the installed languages as a list of (code, name) sorted by name. """
langs = self.with_context(active_test=True).search([])
return sorted([(lang.code, lang.name) for lang in langs], key=itemgetter(1))
def toggle_active(self):
super().toggle_active()
# Automatically load translation
active_lang = [lang.code for lang in self.filtered(lambda l: l.active)]
if active_lang:
mods = self.env['ir.module.module'].search([('state', '=', 'installed')])
mods._update_translations(active_lang)
@api.model_create_multi
def create(self, vals_list):
self.clear_caches()
for vals in vals_list:
if not vals.get('url_code'):
vals['url_code'] = vals.get('iso_code') or vals['code']
return super(Lang, self).create(vals_list)
def write(self, vals):
lang_codes = self.mapped('code')
if 'code' in vals and any(code != vals['code'] for code in lang_codes):
raise UserError(_("Language code cannot be modified."))
if vals.get('active') == False:
if self.env['res.users'].search_count([('lang', 'in', lang_codes)]):
raise UserError(_("Cannot deactivate a language that is currently used by users."))
if self.env['res.partner'].search_count([('lang', 'in', lang_codes)]):
raise UserError(_("Cannot deactivate a language that is currently used by contacts."))
# delete linked ir.default specifying default partner's language
self.env['ir.default'].discard_values('res.partner', 'lang', lang_codes)
res = super(Lang, self).write(vals)
self.flush()
self.clear_caches()
return res
@api.ondelete(at_uninstall=True)
def _unlink_except_default_lang(self):
for language in self:
if language.code == 'en_US':
raise UserError(_("Base Language 'en_US' can not be deleted."))
ctx_lang = self._context.get('lang')
if ctx_lang and (language.code == ctx_lang):
raise UserError(_("You cannot delete the language which is the user's preferred language."))
if language.active:
raise UserError(_("You cannot delete the language which is Active!\nPlease de-activate the language first."))
def unlink(self):
for language in self:
self.env['ir.translation'].search([('lang', '=', language.code)]).unlink()
self.clear_caches()
return super(Lang, self).unlink()
def format(self, percent, value, grouping=False, monetary=False):
""" Format() will return the language-specific output for float values"""
self.ensure_one()
if percent[0] != '%':
raise ValueError(_("format() must be given exactly one %char format specifier"))
formatted = percent % value
# floats and decimal ints need special action!
if grouping:
lang_grouping, thousands_sep, decimal_point = self._data_get(monetary)
eval_lang_grouping = ast.literal_eval(lang_grouping)
if percent[-1] in 'eEfFgG':
parts = formatted.split('.')
parts[0] = intersperse(parts[0], eval_lang_grouping, thousands_sep)[0]
formatted = decimal_point.join(parts)
elif percent[-1] in 'diu':
formatted = intersperse(formatted, eval_lang_grouping, thousands_sep)[0]
return formatted
def split(l, counts):
"""
>>> split("hello world", [])
['hello world']
>>> split("hello world", [1])
['h', 'ello world']
>>> split("hello world", [2])
['he', 'llo world']
>>> split("hello world", [2,3])
['he', 'llo', ' world']
>>> split("hello world", [2,3,0])
['he', 'llo', ' wo', 'rld']
>>> split("hello world", [2,-1,3])
['he', 'llo world']
"""
res = []
saved_count = len(l) # count to use when encoutering a zero
for count in counts:
if not l:
break
if count == -1:
break
if count == 0:
while l:
res.append(l[:saved_count])
l = l[saved_count:]
break
res.append(l[:count])
l = l[count:]
saved_count = count
if l:
res.append(l)
return res
intersperse_pat = re.compile('([^0-9]*)([^ ]*)(.*)')
def intersperse(string, counts, separator=''):
"""
See the asserts below for examples.
"""
left, rest, right = intersperse_pat.match(string).groups()
def reverse(s): return s[::-1]
splits = split(reverse(rest), counts)
res = separator.join(reverse(s) for s in reverse(splits))
return left + res + right, len(splits) > 0 and len(splits) -1 or 0
| 41.240838 | 15,754 |
44,053 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import hashlib
import itertools
import json
import logging
import operator
from collections import defaultdict
from difflib import get_close_matches
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.exceptions import AccessError, UserError, ValidationError
from odoo.modules import get_module_path, get_module_resource
_logger = logging.getLogger(__name__)
TRANSLATION_TYPE = [
('model', 'Model Field'),
('model_terms', 'Structured Model Field'),
('code', 'Code'),
]
class IrTranslationImport(object):
""" Temporary cursor for optimizing mass insert into model 'ir.translation'.
Open it (attached to a sql cursor), feed it with translation data and
finish() it in order to insert multiple translations in a batch.
"""
_table = 'tmp_ir_translation_import'
def __init__(self, cr, overwrite=False):
""" Store some values, and also create a temporary SQL table to accept
the data.
:param model: the model to insert the data into (as a recordset)
"""
self._cr = cr
self._model_table = "ir_translation"
self._overwrite = overwrite
self._debug = False
self._rows = []
# Note that Postgres will NOT inherit the constraints or indexes
# of ir_translation, so this copy will be much faster.
query = """ CREATE TEMP TABLE %s (
imd_model VARCHAR,
imd_name VARCHAR,
noupdate BOOLEAN
) INHERITS (%s) """ % (self._table, self._model_table)
self._cr.execute(query)
def push(self, trans_dict):
""" Feed a translation, as a dictionary, into the cursor """
params = dict(trans_dict, state="translated")
self._rows.append((params['name'], params['lang'], params['res_id'],
params['src'], params['type'], params['imd_model'],
params['module'], params['imd_name'], params['value'],
params['state'], params['comments']))
def finish(self):
""" Transfer the data from the temp table to ir.translation """
cr = self._cr
# Step 0: insert rows in batch
query = """ INSERT INTO %s (name, lang, res_id, src, type, imd_model,
module, imd_name, value, state, comments)
VALUES """ % self._table
for rows in cr.split_for_in_conditions(self._rows):
cr.execute(query + ", ".join(["%s"] * len(rows)), rows)
_logger.debug("ir.translation.cursor: We have %d entries to process", len(self._rows))
# Step 1: resolve ir.model.data references to res_ids
cr.execute(""" UPDATE %s AS ti
SET res_id = imd.res_id,
noupdate = imd.noupdate
FROM ir_model_data AS imd
WHERE ti.res_id IS NULL
AND ti.module IS NOT NULL AND ti.imd_name IS NOT NULL
AND ti.module = imd.module AND ti.imd_name = imd.name
AND ti.imd_model = imd.model; """ % self._table)
if self._debug:
cr.execute(""" SELECT module, imd_name, imd_model FROM %s
WHERE res_id IS NULL AND module IS NOT NULL """ % self._table)
for row in cr.fetchall():
_logger.info("ir.translation.cursor: missing res_id for %s.%s <%s> ", *row)
# Records w/o res_id must _not_ be inserted into our db, because they are
# referencing non-existent data.
cr.execute("DELETE FROM %s WHERE res_id IS NULL AND module IS NOT NULL" % self._table)
count = 0
# Step 2: insert new or upsert non-noupdate translations
if self._overwrite:
cr.execute(""" INSERT INTO %s(name, lang, res_id, src, type, value, module, state, comments)
SELECT name, lang, res_id, src, type, value, module, state, comments
FROM %s
WHERE type = 'code'
AND noupdate IS NOT TRUE
ON CONFLICT (type, lang, md5(src)) WHERE type = 'code'
DO UPDATE SET (name, lang, res_id, src, type, value, module, state, comments) = (EXCLUDED.name, EXCLUDED.lang, EXCLUDED.res_id, EXCLUDED.src, EXCLUDED.type, EXCLUDED.value, EXCLUDED.module, EXCLUDED.state,
CASE WHEN %s.comments = 'openerp-web' THEN 'openerp-web' ELSE EXCLUDED.comments END
)
WHERE EXCLUDED.value IS NOT NULL AND EXCLUDED.value != '';
""" % (self._model_table, self._table, self._model_table))
count += cr.rowcount
cr.execute(""" INSERT INTO %s(name, lang, res_id, src, type, value, module, state, comments)
SELECT name, lang, res_id, src, type, value, module, state, comments
FROM %s
WHERE type = 'model'
AND noupdate IS NOT TRUE
ON CONFLICT (type, lang, name, res_id) WHERE type = 'model'
DO UPDATE SET (name, lang, res_id, src, type, value, module, state, comments) = (EXCLUDED.name, EXCLUDED.lang, EXCLUDED.res_id, EXCLUDED.src, EXCLUDED.type, EXCLUDED.value, EXCLUDED.module, EXCLUDED.state, EXCLUDED.comments)
WHERE EXCLUDED.value IS NOT NULL AND EXCLUDED.value != '';
""" % (self._model_table, self._table))
count += cr.rowcount
cr.execute(""" INSERT INTO %s(name, lang, res_id, src, type, value, module, state, comments)
SELECT name, lang, res_id, src, type, value, module, state, comments
FROM %s
WHERE type = 'model_terms'
AND noupdate IS NOT TRUE
ON CONFLICT (type, name, lang, res_id, md5(src))
DO UPDATE SET (name, lang, res_id, src, type, value, module, state, comments) = (EXCLUDED.name, EXCLUDED.lang, EXCLUDED.res_id, EXCLUDED.src, EXCLUDED.type, EXCLUDED.value, EXCLUDED.module, EXCLUDED.state, EXCLUDED.comments)
WHERE EXCLUDED.value IS NOT NULL AND EXCLUDED.value != '';
""" % (self._model_table, self._table))
count += cr.rowcount
cr.execute(""" INSERT INTO %s(name, lang, res_id, src, type, value, module, state, comments)
SELECT name, lang, res_id, src, type, value, module, state, comments
FROM %s
WHERE %%s OR noupdate is true
ON CONFLICT DO NOTHING;
""" % (self._model_table, self._table), [not self._overwrite])
count += cr.rowcount
if self._debug:
cr.execute("SELECT COUNT(*) FROM ONLY %s" % self._model_table)
total = cr.fetchone()[0]
_logger.debug("ir.translation.cursor: %d entries now in ir.translation, %d common entries with tmp", total, count)
# Step 3: cleanup
cr.execute("DROP TABLE %s" % self._table)
self._rows.clear()
return True
class IrTranslation(models.Model):
_name = "ir.translation"
_description = 'Translation'
_log_access = False
name = fields.Char(string='Translated field', required=True)
res_id = fields.Integer(string='Record ID', index=True)
lang = fields.Selection(selection='_get_languages', string='Language', validate=False)
type = fields.Selection(TRANSLATION_TYPE, string='Type', index=True)
src = fields.Text(string='Internal Source') # stored in database, kept for backward compatibility
value = fields.Text(string='Translation Value')
module = fields.Char(index=True, help="Module this term belongs to")
state = fields.Selection([('to_translate', 'To Translate'),
('inprogress', 'Translation in Progress'),
('translated', 'Translated')],
string="Status", default='to_translate',
help="Automatically set to let administators find new terms that might need to be translated")
# aka gettext extracted-comments - we use them to flag openerp-web translation
# cfr: http://www.gnu.org/savannah-checkouts/gnu/gettext/manual/html_node/PO-Files.html
comments = fields.Text(string='Translation comments', index=True)
_sql_constraints = [
('lang_fkey_res_lang', 'FOREIGN KEY(lang) REFERENCES res_lang(code)',
'Language code of translation item must be among known languages'),
]
@api.model
def _get_languages(self):
return self.env['res.lang'].get_installed()
def _auto_init(self):
res = super(IrTranslation, self)._auto_init()
# Add separate md5 index on src (no size limit on values, and good performance).
tools.create_index(self._cr, 'ir_translation_src_md5', self._table, ['md5(src)'])
# Cover 'model_terms' type
tools.create_unique_index(self._cr, 'ir_translation_unique', self._table,
['type', 'name', 'lang', 'res_id', 'md5(src)'])
if not tools.index_exists(self._cr, 'ir_translation_code_unique'):
self._cr.execute("CREATE UNIQUE INDEX ir_translation_code_unique ON ir_translation (type, lang, md5(src)) WHERE type = 'code'")
if not tools.index_exists(self._cr, 'ir_translation_model_unique'):
self._cr.execute("CREATE UNIQUE INDEX ir_translation_model_unique ON ir_translation (type, lang, name, res_id) WHERE type = 'model'")
return res
@api.model
def _get_ids(self, name, tt, lang, ids):
""" Return the translations of records.
:param name: a string defined as "<model_name>,<field_name>"
:param tt: the type of translation (should always be "model")
:param lang: the language code
:param ids: the ids of the given records
"""
translations = dict.fromkeys(ids, False)
if ids:
self._cr.execute("""SELECT res_id, value FROM ir_translation
WHERE lang=%s AND type=%s AND name=%s AND res_id IN %s""",
(lang, tt, name, tuple(ids)))
for res_id, value in self._cr.fetchall():
translations[res_id] = value
return translations
CACHED_MODELS = {'ir.model.fields', 'ir.ui.view'}
def _modified_model(self, model_name):
""" Invalidate the ormcache if necessary, depending on ``model_name``.
This should be called when modifying translations of type 'model'.
"""
if model_name in self.CACHED_MODELS:
self.clear_caches()
def _modified(self):
""" Invalidate the ormcache if necessary, depending on the translations ``self``. """
# DLE P63: test_views.py
for trans in self:
if trans.type == 'model_terms' and trans.res_id:
model, field = trans.name.split(',')
if model in self.env:
model = self.env[model]
if field in model._fields:
field = model._fields[field]
record = model.browse(trans.res_id)
record.modified([field.name])
for trans in self:
if (trans.type != 'model' or
(trans.name.split(',')[0] in self.CACHED_MODELS) or
(trans.comments and 'openerp-web' in trans.comments)): # clear get_web_trans_hash
self.clear_caches()
break
@api.model
def _set_ids(self, name, tt, lang, ids, value, src=None):
""" Update the translations of records.
:param name: a string defined as "<model_name>,<field_name>"
:param tt: the type of translation (should always be "model")
:param lang: the language code
:param ids: the ids of the given records
:param value: the value of the translation
:param src: the source of the translation
"""
self._modified_model(name.split(',')[0])
# update existing translations
self._cr.execute("""UPDATE ir_translation
SET value=%s, src=%s, state=%s
WHERE lang=%s AND type=%s AND name=%s AND res_id IN %s
RETURNING res_id""",
(value, src, 'translated', lang, tt, name, tuple(ids)))
existing_ids = [row[0] for row in self._cr.fetchall()]
# create missing translations
self.sudo().create([{
'lang': lang,
'type': tt,
'name': name,
'res_id': res_id,
'value': value,
'src': src,
'state': 'translated',
}
for res_id in set(ids) - set(existing_ids)
])
return len(ids)
def _set_source(self, name, ids, src):
""" Update the translation source of records.
:param name: a string defined as "<model_name>,<field_name>"
:param ids: the ids of the given records
:param src: the source of the translation
"""
self._cr.execute("""UPDATE ir_translation
SET src=%s
WHERE type=%s AND name=%s AND res_id IN %s
RETURNING id""",
(src, 'model', name, tuple(ids)))
existing_ids = [row[0] for row in self._cr.fetchall()]
# invalidate src for updated translations
self.invalidate_cache(fnames=['src'], ids=existing_ids)
@api.model
def _get_source_query(self, name, types, lang, source, res_id):
if source:
# Note: the extra test on md5(src) is a hint for postgres to use the
# index ir_translation_src_md5
query = """SELECT value FROM ir_translation
WHERE lang=%s AND type in %s AND src=%s AND md5(src)=md5(%s)"""
source = tools.ustr(source)
params = (lang or '', types, source, source)
if res_id:
query += " AND res_id in %s"
params += (res_id,)
if name:
query += " AND name=%s"
params += (tools.ustr(name),)
else:
query = """ SELECT value FROM ir_translation
WHERE lang=%s AND type in %s AND name=%s """
params = (lang or '', types, tools.ustr(name))
return (query, params)
@tools.ormcache('name', 'types', 'lang', 'source', 'res_id')
def __get_source(self, name, types, lang, source, res_id):
# res_id is a tuple or None, otherwise ormcache cannot cache it!
query, params = self._get_source_query(name, types, lang, source, res_id)
self._cr.execute(query, params)
res = self._cr.fetchone()
trad = res and res[0] or u''
if source and not trad:
return tools.ustr(source)
return trad
@api.model
def _get_source(self, name, types, lang, source=None, res_id=None):
""" Return the translation for the given combination of ``name``,
``type``, ``language`` and ``source``. All values passed to this method
should be unicode (not byte strings), especially ``source``.
:param name: identification of the term to translate, such as field name (optional if source is passed)
:param types: single string defining type of term to translate (see ``type`` field on ir.translation), or sequence of allowed types (strings)
:param lang: language code of the desired translation
:param source: optional source term to translate (should be unicode)
:param res_id: optional resource id or a list of ids to translate (if used, ``source`` should be set)
:rtype: unicode
:return: the request translation, or an empty unicode string if no translation was
found and `source` was not passed
"""
# FIXME: should assert that `source` is unicode and fix all callers to
# always pass unicode so we can remove the string encoding/decoding.
if not lang:
return tools.ustr(source or '')
if isinstance(types, str):
types = (types,)
if res_id:
if isinstance(res_id, int):
res_id = (res_id,)
else:
res_id = tuple(res_id)
return self.__get_source(name, types, lang, source, res_id)
@api.model
def _get_terms_query(self, field, records):
""" Utility function that makes the query for field terms. """
query = """ SELECT * FROM ir_translation
WHERE lang=%s AND type=%s AND name=%s AND res_id IN %s """
name = "%s,%s" % (field.model_name, field.name)
params = (records.env.lang, 'model_terms', name, tuple(records.ids))
return query, params
@api.model
def _get_terms_mapping(self, field, records):
""" Return a function mapping a ir_translation row (dict) to a value.
This method is called before querying the database for translations.
"""
return lambda data: data['value']
@api.model
def _get_terms_translations(self, field, records):
""" Return the terms and translations of a given `field` on `records`.
:return: {record_id: {source: value}}
"""
result = {rid: {} for rid in records.ids}
if records:
map_trans = self._get_terms_mapping(field, records)
query, params = self._get_terms_query(field, records)
self._cr.execute(query, params)
for data in self._cr.dictfetchall():
result[data['res_id']][data['src']] = map_trans(data)
return result
@api.model
def _sync_terms_translations(self, field, records):
""" Synchronize the translations to the terms to translate, after the
English value of a field is modified. The algorithm tries to match
existing translations to the terms to translate, provided the distance
between modified strings is not too large. It allows to not retranslate
data where a typo has been fixed in the English value.
"""
if not callable(field.translate):
return
Translation = self.env['ir.translation']
outdated = Translation
discarded = Translation
for record in records:
# get field value and terms to translate
value = record[field.name]
terms = set(field.get_trans_terms(value))
translations = Translation.search([
('type', '=', 'model_terms'),
('name', '=', "%s,%s" % (field.model_name, field.name)),
('res_id', '=', record.id),
])
if not terms:
# discard all translations for that field
discarded += translations
continue
# remap existing translations on terms when possible; each term
# should be translated at most once per language
done = set() # {(src, lang), ...}
translations_to_match = []
for translation in translations:
if not translation.value:
discarded += translation
# consider it done to avoid being matched against another term
done.add((translation.src, translation.lang))
elif translation.src in terms:
done.add((translation.src, translation.lang))
else:
translations_to_match.append(translation)
for translation in translations_to_match:
matches = get_close_matches(translation.src, terms, 1, 0.9)
src = matches[0] if matches else None
if not src:
outdated += translation
elif (src, translation.lang) in done:
discarded += translation
else:
vals = {'src': src, 'state': translation.state}
if translation.lang == records.env.lang:
vals['value'] = src
translation.write(vals)
done.add((src, translation.lang))
# process outdated and discarded translations
outdated.write({'state': 'to_translate'})
if discarded:
# delete in SQL to avoid invalidating the whole cache
discarded._modified()
discarded.modified(self._fields)
self.flush(self._fields, discarded)
self.invalidate_cache(ids=discarded._ids)
self.env.cr.execute("DELETE FROM ir_translation WHERE id IN %s", [discarded._ids])
@api.model
@tools.ormcache_context('model_name', keys=('lang',))
def get_field_string(self, model_name):
""" Return the translation of fields strings in the context's language.
Note that the result contains the available translations only.
:param model_name: the name of a model
:return: the model's fields' strings as a dictionary `{field_name: field_string}`
"""
fields = self.env['ir.model.fields'].sudo().search([('model', '=', model_name)])
return {field.name: field.field_description for field in fields}
@api.model
@tools.ormcache_context('model_name', keys=('lang',))
def get_field_help(self, model_name):
""" Return the translation of fields help in the context's language.
Note that the result contains the available translations only.
:param model_name: the name of a model
:return: the model's fields' help as a dictionary `{field_name: field_help}`
"""
fields = self.env['ir.model.fields'].sudo().search([('model', '=', model_name)])
return {field.name: field.help for field in fields}
@api.model
@tools.ormcache_context('model_name', 'field_name', keys=('lang',))
def get_field_selection(self, model_name, field_name):
""" Return the translation of a field's selection in the context's language.
Note that the result contains the available translations only.
:param model_name: the name of the field's model
:param field_name: the name of the field
:return: the fields' selection as a list
"""
field = self.env['ir.model.fields']._get(model_name, field_name)
return [(sel.value, sel.name) for sel in field.selection_ids]
def check(self, mode):
""" Check access rights of operation ``mode`` on ``self`` for the
current user. Raise an AccessError in case conditions are not met.
"""
if self.env.is_superuser():
return
# collect translated field records (model_ids) and other translations
trans_ids = []
model_ids = defaultdict(set)
model_fields = defaultdict(set)
for trans in self:
if trans.type in ('model', 'model_terms'):
mname, fname = trans.name.split(',')
model_ids[mname].add(trans.res_id)
model_fields[mname].add(fname)
else:
trans_ids.append(trans.id)
# check for regular access rights on other translations
if trans_ids:
records = self.browse(trans_ids)
records.check_access_rights(mode)
records.check_access_rule(mode)
# check for read/write access on translated field records
fmode = 'read' if mode == 'read' else 'write'
for mname, ids in model_ids.items():
records = self.env[mname].browse(ids).exists()
records.check_access_rights(fmode)
records.check_field_access_rights(fmode, model_fields[mname])
if mode == 'create' and set(records._ids) != ids:
raise ValidationError(_("Creating translation on non existing records"))
if not records:
continue
records.check_access_rule(fmode)
@api.constrains('type', 'name', 'value')
def _check_value(self):
for trans in self.with_context(lang=None):
if trans.type == 'model' and trans.value:
mname, fname = trans.name.split(',')
record = trans.env[mname].browse(trans.res_id)
field = record._fields[fname]
if callable(field.translate):
src = trans.src
val = trans.value.strip()
# check whether applying (src -> val) then (val -> src)
# gives the original value back
value0 = field.translate(lambda term: None, record[fname])
value1 = field.translate({src: val}.get, value0)
# don't check the reverse if no translation happened
if value0 == value1:
continue
value2 = field.translate({val: src}.get, value1)
if value2 != value0:
raise ValidationError(_("Translation is not valid:\n%s", val))
@api.model_create_multi
def create(self, vals_list):
records = super(IrTranslation, self.sudo()).create(vals_list).with_env(self.env)
records.check('create')
records._modified()
# DLE P62: `test_translate.py`, `test_sync`
self.flush()
return records
def write(self, vals):
if vals.get('value'):
vals.setdefault('state', 'translated')
elif vals.get('src') or not vals.get('value', True):
vals.setdefault('state', 'to_translate')
self.check('write')
result = super(IrTranslation, self.sudo()).write(vals)
self.check('write')
self._modified()
# DLE P62: `test_translate.py`, `test_sync`
# when calling `flush` with a field list, if there is no value for one of these fields,
# the flush to database is not done.
# this causes issues when changing the src/value of a translation, as when we read, we ask the flush,
# but its not really the field which is in the towrite values, but its translation
self.flush()
return result
def unlink(self):
self.check('unlink')
self._modified()
return super(IrTranslation, self.sudo()).unlink()
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
# DLE P67, `test_new_fields.py`, `test_80_copy`
# When assigning a translation to a field
# e.g. email.with_context(lang='fr_FR').label = "bonjour"
# and then search on translations for this translation, must flush as the translation has not yet been written in database
if any(self.env[model]._fields[field].translate for model, ids in self.env.all.towrite.items() for record_id, fields in ids.items() for field in fields):
self.flush()
return super(IrTranslation, self)._search(args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)
@api.model
def insert_missing(self, field, records):
""" Insert missing translations for `field` on `records`. """
records = records.with_context(lang=None)
external_ids = records.get_external_id() # if no xml_id, empty string
if callable(field.translate):
# insert missing translations for each term in src
query = """ INSERT INTO ir_translation (lang, type, name, res_id, src, value, module, state)
SELECT l.code, 'model_terms', %(name)s, %(res_id)s, %(src)s, '', %(module)s, 'to_translate'
FROM res_lang l
WHERE l.active AND NOT EXISTS (
SELECT 1 FROM ir_translation
WHERE lang=l.code AND type='model' AND name=%(name)s AND res_id=%(res_id)s AND src=%(src)s
)
ON CONFLICT DO NOTHING;
"""
for record in records:
module = external_ids[record.id].split('.')[0]
src = record[field.name] or None
for term in set(field.get_trans_terms(src)):
self._cr.execute(query, {
'name': "%s,%s" % (field.model_name, field.name),
'res_id': record.id,
'src': term,
'module': module
})
else:
# insert missing translations for src
query = """ INSERT INTO ir_translation (lang, type, name, res_id, src, value, module, state)
SELECT l.code, 'model', %(name)s, %(res_id)s, %(src)s, '', %(module)s, 'to_translate'
FROM res_lang l
WHERE l.active AND NOT EXISTS (
SELECT 1 FROM ir_translation
WHERE lang=l.code AND type='model' AND name=%(name)s AND res_id=%(res_id)s
);
DELETE FROM ir_translation dup
WHERE type='model' AND name=%(name)s AND res_id=%(res_id)s
AND dup.id NOT IN (SELECT MAX(t.id)
FROM ir_translation t
WHERE t.lang=dup.lang AND type='model' AND name=%(name)s AND res_id=%(res_id)s
);
UPDATE ir_translation SET src=%(src)s
WHERE type='model' AND name=%(name)s AND res_id=%(res_id)s;
"""
for record in records:
module = external_ids[record.id].split('.')[0]
self._cr.execute(query, {
'name': "%s,%s" % (field.model_name, field.name),
'res_id': record.id,
'src': record[field.name] or None,
'module': module
})
self._modified_model(field.model_name)
@api.model
def _upsert_translations(self, vals_list):
""" Insert or update translations of type 'model' or 'model_terms'.
This method is used for creations of translations where the given
``vals_list`` is trusted to be the right values and potential
conflicts should be updated to the new given value.
Mandatory values: name, lang, res_id, src, type
The other keys are ignored during update if not present
"""
rows_by_type = defaultdict(list)
for vals in vals_list:
rows_by_type[vals['type']].append((
vals['name'], vals['lang'], vals['res_id'], vals['src'] or '', vals['type'],
vals.get('module'), vals['value'] or '', vals.get('state'), vals.get('comments'),
))
if rows_by_type['model']:
query = """
INSERT INTO ir_translation (name, lang, res_id, src, type,
module, value, state, comments)
VALUES {}
ON CONFLICT (type, lang, name, res_id) WHERE type='model'
DO UPDATE SET (name, lang, res_id, src, type, value, module, state, comments) =
(EXCLUDED.name, EXCLUDED.lang, EXCLUDED.res_id, EXCLUDED.src, EXCLUDED.type,
EXCLUDED.value,
COALESCE(EXCLUDED.module, ir_translation.module),
COALESCE(EXCLUDED.state, ir_translation.state),
COALESCE(EXCLUDED.comments, ir_translation.comments))
WHERE EXCLUDED.value IS NOT NULL AND EXCLUDED.value != '';
""".format(", ".join(["%s"] * len(rows_by_type['model'])))
self.env.cr.execute(query, rows_by_type['model'])
if rows_by_type['model_terms']:
query = """
INSERT INTO ir_translation (name, lang, res_id, src, type,
module, value, state, comments)
VALUES {}
ON CONFLICT (type, name, lang, res_id, md5(src))
DO UPDATE SET (name, lang, res_id, src, type, value, module, state, comments) =
(EXCLUDED.name, EXCLUDED.lang, EXCLUDED.res_id, EXCLUDED.src, EXCLUDED.type,
EXCLUDED.value, EXCLUDED.module, EXCLUDED.state, EXCLUDED.comments)
WHERE EXCLUDED.value IS NOT NULL AND EXCLUDED.value != '';
""".format(", ".join(["%s"] * len(rows_by_type['model_terms'])))
self.env.cr.execute(query, rows_by_type['model_terms'])
def _update_translations(self, vals_list):
""" Update translations of type 'model' or 'model_terms'.
This method is used for update of translations where the given
``vals_list`` is trusted to be the right values
No new translation will be created
"""
grouped_rows = {}
for vals in vals_list:
key = (vals['lang'], vals['type'], vals['name'])
grouped_rows.setdefault(key, [vals['value'], vals['src'], vals['state'], []])
grouped_rows[key][3].append(vals['res_id'])
for where, values in grouped_rows.items():
self._cr.execute(
""" UPDATE ir_translation
SET value=%s,
src=%s,
state=%s
WHERE lang=%s AND type=%s AND name=%s AND res_id in %s
""",
(values[0], values[1], values[2], where[0], where[1], where[2], tuple(values[3]))
)
@api.model
def translate_fields(self, model, id, field=None):
""" Open a view for translating the field(s) of the record (model, id). """
main_lang = 'en_US'
if not self.env['res.lang'].search_count([('code', '!=', main_lang)]):
raise UserError(_("Translation features are unavailable until you install an extra translation."))
# determine domain for selecting translations
record = self.env[model].with_context(lang=main_lang).browse(id)
domain = ['&', ('res_id', '=', id), ('name', '=like', model + ',%')]
def make_domain(fld, rec):
name = "%s,%s" % (fld.model_name, fld.name)
return ['&', ('res_id', '=', rec.id), ('name', '=', name)]
# insert missing translations, and extend domain for related fields
for name, fld in record._fields.items():
if not fld.translate:
continue
rec = record
if fld.related:
try:
# traverse related fields up to their data source
while fld.related:
rec, fld = fld.traverse_related(rec)
if rec:
domain = ['|'] + domain + make_domain(fld, rec)
except AccessError:
continue
assert fld.translate and rec._name == fld.model_name
self.insert_missing(fld, rec)
action = {
'name': _('Translate'),
'res_model': 'ir.translation',
'type': 'ir.actions.act_window',
'view_mode': 'tree',
'view_id': self.env.ref('base.view_translation_dialog_tree').id,
'target': 'current',
'flags': {'search_view': True, 'action_buttons': True},
'domain': domain,
'context': {},
}
if field:
fld = record._fields[field]
if not fld.related:
action['context'] = {
'search_default_name': "%s,%s" % (fld.model_name, fld.name),
}
else:
rec = record
try:
while fld.related:
rec, fld = fld.traverse_related(rec)
if rec:
action['context'] = {'search_default_name': "%s,%s" % (fld.model_name, fld.name),}
except AccessError:
pass
action['target'] = 'new'
action['context']['translation_type'] = 'text' if fld.type in ['text', 'html'] else 'char'
action['context']['translation_show_src'] = False
if callable(fld.translate):
action['view_id'] = self.env.ref('base.view_translation_lang_src_value_tree').id,
action['context']['translation_show_src'] = True
else:
action['view_id'] = self.env.ref('base.view_translation_lang_value_tree').id,
return action
def _get_import_cursor(self, overwrite):
""" Return a cursor-like object for fast inserting translations """
return IrTranslationImport(self._cr, overwrite)
def _load_module_terms(self, modules, langs, overwrite=False):
""" Load PO files of the given modules for the given languages. """
# load i18n files
for module_name in modules:
modpath = get_module_path(module_name)
if not modpath:
continue
for lang in langs:
lang_code = tools.get_iso_codes(lang)
lang_overwrite = overwrite
base_lang_code = None
if '_' in lang_code:
base_lang_code = lang_code.split('_')[0]
# Step 1: for sub-languages, load base language first (e.g. es_CL.po is loaded over es.po)
if base_lang_code:
base_trans_file = get_module_resource(module_name, 'i18n', base_lang_code + '.po')
if base_trans_file:
_logger.info('module %s: loading base translation file %s for language %s', module_name, base_lang_code, lang)
tools.trans_load(self._cr, base_trans_file, lang, verbose=False, overwrite=lang_overwrite)
lang_overwrite = True # make sure the requested translation will override the base terms later
# i18n_extra folder is for additional translations handle manually (eg: for l10n_be)
base_trans_extra_file = get_module_resource(module_name, 'i18n_extra', base_lang_code + '.po')
if base_trans_extra_file:
_logger.info('module %s: loading extra base translation file %s for language %s', module_name, base_lang_code, lang)
tools.trans_load(self._cr, base_trans_extra_file, lang, verbose=False, overwrite=lang_overwrite)
lang_overwrite = True # make sure the requested translation will override the base terms later
# Step 2: then load the main translation file, possibly overriding the terms coming from the base language
trans_file = get_module_resource(module_name, 'i18n', lang_code + '.po')
if trans_file:
_logger.info('module %s: loading translation file (%s) for language %s', module_name, lang_code, lang)
tools.trans_load(self._cr, trans_file, lang, verbose=False, overwrite=lang_overwrite)
elif lang_code != 'en_US':
_logger.info('module %s: no translation for language %s', module_name, lang_code)
trans_extra_file = get_module_resource(module_name, 'i18n_extra', lang_code + '.po')
if trans_extra_file:
_logger.info('module %s: loading extra translation file (%s) for language %s', module_name, lang_code, lang)
tools.trans_load(self._cr, trans_extra_file, lang, verbose=False, overwrite=lang_overwrite)
return True
@api.model
def get_technical_translations(self, model_name):
""" Find the translations for the fields of `model_name`
Find the technical translations for the fields of the model, including
string, tooltip and available selections.
:return: action definition to open the list of available translations
"""
fields = self.env['ir.model.fields'].search([('model', '=', model_name)])
selection_ids = tools.flatten([field.selection_ids.ids for field in fields if field.ttype == 'selection'])
view = self.env.ref("base.view_translation_tree", False) or self.env['ir.ui.view']
return {
'name': _("Technical Translations"),
'view_mode': 'tree',
'views': [(view.id, "list")],
'res_model': 'ir.translation',
'type': 'ir.actions.act_window',
'domain': [
'&',
('type', '=', 'model'),
'|',
'&', ('res_id', 'in', fields.ids),
('name', 'like', 'ir.model.fields,'),
'&', ('res_id', 'in', selection_ids),
('name', 'like', 'ir.model.fields.selection,')
],
}
@api.model
def get_translations_for_webclient(self, mods, lang):
if not mods:
mods = [x['name'] for x in self.env['ir.module.module'].sudo().search_read(
[('state', '=', 'installed')], ['name'])]
if not lang:
lang = self._context.get("lang")
langs = self.env['res.lang']._lang_get(lang)
lang_params = None
if langs:
lang_params = {
"name": langs.name,
"direction": langs.direction,
"date_format": langs.date_format,
"time_format": langs.time_format,
"grouping": langs.grouping,
"decimal_point": langs.decimal_point,
"thousands_sep": langs.thousands_sep,
"week_start": langs.week_start,
}
lang_params['week_start'] = int(lang_params['week_start'])
lang_params['code'] = lang
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
translations_per_module = {}
messages = self.env['ir.translation'].sudo().search_read([
('module', 'in', mods), ('lang', '=', lang),
('comments', 'like', 'openerp-web'), ('value', '!=', False),
('value', '!=', '')],
['module', 'src', 'value', 'lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod, {'messages': []})
translations_per_module[mod]['messages'].extend({
'id': m['src'],
'string': m['value']}
for m in msg_group)
return translations_per_module, lang_params
@api.model
@tools.ormcache('frozenset(mods)', 'lang')
def get_web_translations_hash(self, mods, lang):
translations, lang_params = self.get_translations_for_webclient(mods, lang)
translation_cache = {
'lang_parameters': lang_params,
'modules': translations,
'lang': lang,
'multi_lang': len(self.env['res.lang'].sudo().get_installed()) > 1,
}
return hashlib.sha1(json.dumps(translation_cache, sort_keys=True).encode()).hexdigest()
| 48.145355 | 44,053 |
37,493 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from email.message import EmailMessage
from email.utils import make_msgid
import base64
import datetime
import email
import email.policy
import idna
import logging
import re
import smtplib
import ssl
import sys
import threading
from socket import gaierror, timeout
from OpenSSL import crypto as SSLCrypto
from OpenSSL.crypto import Error as SSLCryptoError, FILETYPE_PEM
from OpenSSL.SSL import Error as SSLError
from urllib3.contrib.pyopenssl import PyOpenSSLContext
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
from odoo.tools import ustr, pycompat, formataddr, email_normalize, encapsulate_email, email_domain_extract, email_domain_normalize
_logger = logging.getLogger(__name__)
_test_logger = logging.getLogger('odoo.tests')
SMTP_TIMEOUT = 60
class MailDeliveryException(Exception):
"""Specific exception subclass for mail delivery errors"""
# Python 3: patch SMTP's internal printer/debugger
def _print_debug(self, *args):
_logger.debug(' '.join(str(a) for a in args))
smtplib.SMTP._print_debug = _print_debug
# Python 3: workaround for bpo-35805, only partially fixed in Python 3.8.
RFC5322_IDENTIFICATION_HEADERS = {'message-id', 'in-reply-to', 'references', 'resent-msg-id'}
_noFoldPolicy = email.policy.SMTP.clone(max_line_length=None)
class IdentificationFieldsNoFoldPolicy(email.policy.EmailPolicy):
# Override _fold() to avoid folding identification fields, excluded by RFC2047 section 5
# These are particularly important to preserve, as MTAs will often rewrite non-conformant
# Message-ID headers, causing a loss of thread information (replies are lost)
def _fold(self, name, value, *args, **kwargs):
if name.lower() in RFC5322_IDENTIFICATION_HEADERS:
return _noFoldPolicy._fold(name, value, *args, **kwargs)
return super()._fold(name, value, *args, **kwargs)
# Global monkey-patch for our preferred SMTP policy, preserving the non-default linesep
email.policy.SMTP = IdentificationFieldsNoFoldPolicy(linesep=email.policy.SMTP.linesep)
# Python 2: replace smtplib's stderr
class WriteToLogger(object):
def write(self, s):
_logger.debug(s)
smtplib.stderr = WriteToLogger()
def is_ascii(s):
return all(ord(cp) < 128 for cp in s)
address_pattern = re.compile(r'([^ ,<@]+@[^> ,]+)')
def extract_rfc2822_addresses(text):
"""Returns a list of valid RFC2822 addresses
that can be found in ``source``, ignoring
malformed ones and non-ASCII ones.
"""
if not text:
return []
candidates = address_pattern.findall(ustr(text))
valid_addresses = []
for c in candidates:
try:
valid_addresses.append(formataddr(('', c), charset='ascii'))
except idna.IDNAError:
pass
return valid_addresses
class IrMailServer(models.Model):
"""Represents an SMTP server, able to send outgoing emails, with SSL and TLS capabilities."""
_name = "ir.mail_server"
_description = 'Mail Server'
_order = 'sequence'
NO_VALID_RECIPIENT = ("At least one valid recipient address should be "
"specified for outgoing emails (To/Cc/Bcc)")
name = fields.Char(string='Description', required=True, index=True)
from_filter = fields.Char(
"From Filter",
help='Define for which email address or domain this server can be used.\n'
'e.g.: "[email protected]" or "odoo.com"')
smtp_host = fields.Char(string='SMTP Server', required=True, help="Hostname or IP of SMTP server")
smtp_port = fields.Integer(string='SMTP Port', required=True, default=25, help="SMTP Port. Usually 465 for SSL, and 25 or 587 for other cases.")
smtp_authentication = fields.Selection([('login', 'Username'), ('certificate', 'SSL Certificate')], string='Authenticate with', required=True, default='login')
smtp_user = fields.Char(string='Username', help="Optional username for SMTP authentication", groups='base.group_system')
smtp_pass = fields.Char(string='Password', help="Optional password for SMTP authentication", groups='base.group_system')
smtp_encryption = fields.Selection([('none', 'None'),
('starttls', 'TLS (STARTTLS)'),
('ssl', 'SSL/TLS')],
string='Connection Security', required=True, default='none',
help="Choose the connection encryption scheme:\n"
"- None: SMTP sessions are done in cleartext.\n"
"- TLS (STARTTLS): TLS encryption is requested at start of SMTP session (Recommended)\n"
"- SSL/TLS: SMTP sessions are encrypted with SSL/TLS through a dedicated port (default: 465)")
smtp_ssl_certificate = fields.Binary(
'SSL Certificate', groups='base.group_system', attachment=False,
help='SSL certificate used for authentication')
smtp_ssl_private_key = fields.Binary(
'SSL Private Key', groups='base.group_system', attachment=False,
help='SSL private key used for authentication')
smtp_debug = fields.Boolean(string='Debugging', help="If enabled, the full output of SMTP sessions will "
"be written to the server log at DEBUG level "
"(this is very verbose and may include confidential info!)")
sequence = fields.Integer(string='Priority', default=10, help="When no specific mail server is requested for a mail, the highest priority one "
"is used. Default priority is 10 (smaller number = higher priority)")
active = fields.Boolean(default=True)
@api.constrains('smtp_ssl_certificate', 'smtp_ssl_private_key')
def _check_smtp_ssl_files(self):
"""We must provided both files or none."""
for mail_server in self:
if mail_server.smtp_ssl_certificate and not mail_server.smtp_ssl_private_key:
raise UserError(_('SSL private key is missing for %s.', mail_server.name))
elif mail_server.smtp_ssl_private_key and not mail_server.smtp_ssl_certificate:
raise UserError(_('SSL certificate is missing for %s.', mail_server.name))
def _get_test_email_addresses(self):
self.ensure_one()
if self.from_filter:
if "@" in self.from_filter:
# All emails will be sent from the same address
return self.from_filter, "[email protected]"
# All emails will be sent from any address in the same domain
default_from = self.env["ir.config_parameter"].sudo().get_param("mail.default.from", "odoo")
return f"{default_from}@{self.from_filter}", "[email protected]"
# Fallback to current user email if there's no from filter
email_from = self.env.user.email
if not email_from:
raise UserError(_('Please configure an email on the current user to simulate '
'sending an email message via this outgoing server'))
return email_from, '[email protected]'
def test_smtp_connection(self):
for server in self:
smtp = False
try:
smtp = self.connect(mail_server_id=server.id)
# simulate sending an email from current user's address - without sending it!
email_from, email_to = server._get_test_email_addresses()
# Testing the MAIL FROM step should detect sender filter problems
(code, repl) = smtp.mail(email_from)
if code != 250:
raise UserError(_('The server refused the sender address (%(email_from)s) '
'with error %(repl)s') % locals())
# Testing the RCPT TO step should detect most relaying problems
(code, repl) = smtp.rcpt(email_to)
if code not in (250, 251):
raise UserError(_('The server refused the test recipient (%(email_to)s) '
'with error %(repl)s') % locals())
# Beginning the DATA step should detect some deferred rejections
# Can't use self.data() as it would actually send the mail!
smtp.putcmd("data")
(code, repl) = smtp.getreply()
if code != 354:
raise UserError(_('The server refused the test connection '
'with error %(repl)s') % locals())
except UserError as e:
# let UserErrors (messages) bubble up
raise e
except (UnicodeError, idna.core.InvalidCodepoint) as e:
raise UserError(_("Invalid server name !\n %s", ustr(e)))
except (gaierror, timeout) as e:
raise UserError(_("No response received. Check server address and port number.\n %s", ustr(e)))
except smtplib.SMTPServerDisconnected as e:
raise UserError(_("The server has closed the connection unexpectedly. Check configuration served on this port number.\n %s", ustr(e.strerror)))
except smtplib.SMTPResponseException as e:
raise UserError(_("Server replied with following exception:\n %s", ustr(e.smtp_error)))
except smtplib.SMTPNotSupportedError as e:
raise UserError(_("An option is not supported by the server:\n %s", e.strerror))
except smtplib.SMTPException as e:
raise UserError(_("An SMTP exception occurred. Check port number and connection security type.\n %s", ustr(e)))
except SSLError as e:
raise UserError(_("An SSL exception occurred. Check connection security type.\n %s", ustr(e)))
except Exception as e:
raise UserError(_("Connection Test Failed! Here is what we got instead:\n %s", ustr(e)))
finally:
try:
if smtp:
smtp.close()
except Exception:
# ignored, just a consequence of the previous exception
pass
message = _("Connection Test Successful!")
return {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'message': message,
'type': 'success',
'sticky': False,
}
}
def connect(self, host=None, port=None, user=None, password=None, encryption=None,
smtp_from=None, ssl_certificate=None, ssl_private_key=None, smtp_debug=False, mail_server_id=None):
"""Returns a new SMTP connection to the given SMTP server.
When running in test mode, this method does nothing and returns `None`.
:param host: host or IP of SMTP server to connect to, if mail_server_id not passed
:param int port: SMTP port to connect to
:param user: optional username to authenticate with
:param password: optional password to authenticate with
:param string encryption: optional, ``'ssl'`` | ``'starttls'``
:param smtp_from: FROM SMTP envelop, used to find the best mail server
:param ssl_certificate: filename of the SSL certificate used for authentication
Used when no mail server is given and overwrite the odoo-bin argument "smtp_ssl_certificate"
:param ssl_private_key: filename of the SSL private key used for authentication
Used when no mail server is given and overwrite the odoo-bin argument "smtp_ssl_private_key"
:param bool smtp_debug: toggle debugging of SMTP sessions (all i/o
will be output in logs)
:param mail_server_id: ID of specific mail server to use (overrides other parameters)
"""
# Do not actually connect while running in test mode
if self._is_test_mode():
return
mail_server = smtp_encryption = None
if mail_server_id:
mail_server = self.sudo().browse(mail_server_id)
elif not host:
mail_server, smtp_from = self.sudo()._find_mail_server(smtp_from)
if not mail_server:
mail_server = self.env['ir.mail_server']
ssl_context = None
if mail_server:
smtp_server = mail_server.smtp_host
smtp_port = mail_server.smtp_port
if mail_server.smtp_authentication == "login":
smtp_user = mail_server.smtp_user
smtp_password = mail_server.smtp_pass
else:
smtp_user = None
smtp_password = None
smtp_encryption = mail_server.smtp_encryption
smtp_debug = smtp_debug or mail_server.smtp_debug
from_filter = mail_server.from_filter
if (mail_server.smtp_authentication == "certificate"
and mail_server.smtp_ssl_certificate
and mail_server.smtp_ssl_private_key):
try:
ssl_context = PyOpenSSLContext(ssl.PROTOCOL_TLS)
smtp_ssl_certificate = base64.b64decode(mail_server.smtp_ssl_certificate)
certificate = SSLCrypto.load_certificate(FILETYPE_PEM, smtp_ssl_certificate)
smtp_ssl_private_key = base64.b64decode(mail_server.smtp_ssl_private_key)
private_key = SSLCrypto.load_privatekey(FILETYPE_PEM, smtp_ssl_private_key)
ssl_context._ctx.use_certificate(certificate)
ssl_context._ctx.use_privatekey(private_key)
# Check that the private key match the certificate
ssl_context._ctx.check_privatekey()
except SSLCryptoError as e:
raise UserError(_('The private key or the certificate is not a valid file. \n%s', str(e)))
except SSLError as e:
raise UserError(_('Could not load your certificate / private key. \n%s', str(e)))
else:
# we were passed individual smtp parameters or nothing and there is no default server
smtp_server = host or tools.config.get('smtp_server')
smtp_port = tools.config.get('smtp_port', 25) if port is None else port
smtp_user = user or tools.config.get('smtp_user')
smtp_password = password or tools.config.get('smtp_password')
from_filter = self.env['ir.config_parameter'].sudo().get_param(
'mail.default.from_filter', tools.config.get('from_filter'))
smtp_encryption = encryption
if smtp_encryption is None and tools.config.get('smtp_ssl'):
smtp_encryption = 'starttls' # smtp_ssl => STARTTLS as of v7
smtp_ssl_certificate_filename = ssl_certificate or tools.config.get('smtp_ssl_certificate_filename')
smtp_ssl_private_key_filename = ssl_private_key or tools.config.get('smtp_ssl_private_key_filename')
if smtp_ssl_certificate_filename and smtp_ssl_private_key_filename:
try:
ssl_context = PyOpenSSLContext(ssl.PROTOCOL_TLS)
ssl_context.load_cert_chain(smtp_ssl_certificate_filename, keyfile=smtp_ssl_private_key_filename)
# Check that the private key match the certificate
ssl_context._ctx.check_privatekey()
except SSLCryptoError as e:
raise UserError(_('The private key or the certificate is not a valid file. \n%s', str(e)))
except SSLError as e:
raise UserError(_('Could not load your certificate / private key. \n%s', str(e)))
if not smtp_server:
raise UserError(
(_("Missing SMTP Server") + "\n" +
_("Please define at least one SMTP server, "
"or provide the SMTP parameters explicitly.")))
if smtp_encryption == 'ssl':
if 'SMTP_SSL' not in smtplib.__all__:
raise UserError(
_("Your Odoo Server does not support SMTP-over-SSL. "
"You could use STARTTLS instead. "
"If SSL is needed, an upgrade to Python 2.6 on the server-side "
"should do the trick."))
connection = smtplib.SMTP_SSL(smtp_server, smtp_port, timeout=SMTP_TIMEOUT)
else:
connection = smtplib.SMTP(smtp_server, smtp_port, timeout=SMTP_TIMEOUT)
connection.set_debuglevel(smtp_debug)
if smtp_encryption == 'starttls':
# starttls() will perform ehlo() if needed first
# and will discard the previous list of services
# after successfully performing STARTTLS command,
# (as per RFC 3207) so for example any AUTH
# capability that appears only on encrypted channels
# will be correctly detected for next step
connection.starttls(context=ssl_context)
if smtp_user:
# Attempt authentication - will raise if AUTH service not supported
local, at, domain = smtp_user.rpartition('@')
if at:
smtp_user = local + at + idna.encode(domain).decode('ascii')
mail_server._smtp_login(connection, smtp_user, smtp_password or '')
# Some methods of SMTP don't check whether EHLO/HELO was sent.
# Anyway, as it may have been sent by login(), all subsequent usages should consider this command as sent.
connection.ehlo_or_helo_if_needed()
# Store the "from_filter" of the mail server / odoo-bin argument to know if we
# need to change the FROM headers or not when we will prepare the mail message
connection.from_filter = from_filter
connection.smtp_from = smtp_from
return connection
def _smtp_login(self, connection, smtp_user, smtp_password):
"""Authenticate the SMTP connection.
Can be overridden in other module for different authentication methods.Can be
called on the model itself or on a singleton.
:param connection: The SMTP connection to authenticate
:param smtp_user: The user to used for the authentication
:param smtp_password: The password to used for the authentication
"""
connection.login(smtp_user, smtp_password)
def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None,
body_alternative=None, subtype_alternative='plain'):
"""Constructs an RFC2822 email.message.Message object based on the keyword arguments passed, and returns it.
:param string email_from: sender email address
:param list email_to: list of recipient addresses (to be joined with commas)
:param string subject: email subject (no pre-encoding/quoting necessary)
:param string body: email body, of the type ``subtype`` (by default, plaintext).
If html subtype is used, the message will be automatically converted
to plaintext and wrapped in multipart/alternative, unless an explicit
``body_alternative`` version is passed.
:param string body_alternative: optional alternative body, of the type specified in ``subtype_alternative``
:param string reply_to: optional value of Reply-To header
:param string object_id: optional tracking identifier, to be included in the message-id for
recognizing replies. Suggested format for object-id is "res_id-model",
e.g. "12345-crm.lead".
:param string subtype: optional mime subtype for the text body (usually 'plain' or 'html'),
must match the format of the ``body`` parameter. Default is 'plain',
making the content part of the mail "text/plain".
:param string subtype_alternative: optional mime subtype of ``body_alternative`` (usually 'plain'
or 'html'). Default is 'plain'.
:param list attachments: list of (filename, filecontents) pairs, where filecontents is a string
containing the bytes of the attachment
:param list email_cc: optional list of string values for CC header (to be joined with commas)
:param list email_bcc: optional list of string values for BCC header (to be joined with commas)
:param dict headers: optional map of headers to set on the outgoing mail (may override the
other headers, including Subject, Reply-To, Message-Id, etc.)
:rtype: email.message.EmailMessage
:return: the new RFC2822 email message
"""
email_from = email_from or self._get_default_from_address()
assert email_from, "You must either provide a sender address explicitly or configure "\
"using the combination of `mail.catchall.domain` and `mail.default.from` "\
"ICPs, in the server configuration file or with the "\
"--email-from startup parameter."
headers = headers or {} # need valid dict later
email_cc = email_cc or []
email_bcc = email_bcc or []
body = body or u''
msg = EmailMessage(policy=email.policy.SMTP)
if not message_id:
if object_id:
message_id = tools.generate_tracking_message_id(object_id)
else:
message_id = make_msgid()
msg['Message-Id'] = message_id
if references:
msg['references'] = references
msg['Subject'] = subject
msg['From'] = email_from
del msg['Reply-To']
msg['Reply-To'] = reply_to or email_from
msg['To'] = email_to
if email_cc:
msg['Cc'] = email_cc
if email_bcc:
msg['Bcc'] = email_bcc
msg['Date'] = datetime.datetime.utcnow()
for key, value in headers.items():
msg[pycompat.to_text(ustr(key))] = value
email_body = ustr(body)
if subtype == 'html' and not body_alternative:
msg['MIME-Version'] = '1.0'
msg.add_alternative(tools.html2plaintext(email_body), subtype='plain', charset='utf-8')
msg.add_alternative(email_body, subtype=subtype, charset='utf-8')
elif body_alternative:
msg['MIME-Version'] = '1.0'
msg.add_alternative(ustr(body_alternative), subtype=subtype_alternative, charset='utf-8')
msg.add_alternative(email_body, subtype=subtype, charset='utf-8')
else:
msg.set_content(email_body, subtype=subtype, charset='utf-8')
if attachments:
for (fname, fcontent, mime) in attachments:
maintype, subtype = mime.split('/') if mime and '/' in mime else ('application', 'octet-stream')
msg.add_attachment(fcontent, maintype, subtype, filename=fname)
return msg
@api.model
def _get_default_bounce_address(self):
'''Compute the default bounce address.
The default bounce address is used to set the envelop address if no
envelop address is provided in the message. It is formed by properly
joining the parameters "mail.bounce.alias" and
"mail.catchall.domain".
If "mail.bounce.alias" is not set it defaults to "postmaster-odoo".
If "mail.catchall.domain" is not set, return None.
'''
get_param = self.env['ir.config_parameter'].sudo().get_param
postmaster = get_param('mail.bounce.alias', default='postmaster-odoo')
domain = get_param('mail.catchall.domain')
if postmaster and domain:
return '%s@%s' % (postmaster, domain)
@api.model
def _get_default_from_address(self):
"""Compute the default from address.
Used for the "header from" address when no other has been received.
:return str/None:
If the config parameter ``mail.default.from`` contains
a full email address, return it.
Otherwise, combines config parameters ``mail.default.from`` and
``mail.catchall.domain`` to generate a default sender address.
If some of those parameters is not defined, it will default to the
``--email-from`` CLI/config parameter.
"""
get_param = self.env['ir.config_parameter'].sudo().get_param
email_from = get_param("mail.default.from")
if email_from and "@" in email_from:
return email_from
domain = get_param("mail.catchall.domain")
if email_from and domain:
return "%s@%s" % (email_from, domain)
return tools.config.get("email_from")
def _prepare_email_message(self, message, smtp_session):
"""Prepare the SMTP information (from, to, message) before sending.
:param message: the email.message.Message to send, information like the
Return-Path, the From, etc... will be used to find the smtp_from and to smtp_to
:param smtp_session: the opened SMTP session to use to authenticate the sender
:return: smtp_from, smtp_to_list, message
smtp_from: email to used during the authentication to the mail server
smtp_to_list: list of email address which will receive the email
message: the email.message.Message to send
"""
# Use the default bounce address **only if** no Return-Path was
# provided by caller. Caller may be using Variable Envelope Return
# Path (VERP) to detect no-longer valid email addresses.
bounce_address = message['Return-Path'] or self._get_default_bounce_address() or message['From']
smtp_from = message['From'] or bounce_address
assert smtp_from, "The Return-Path or From header is required for any outbound email"
email_to = message['To']
email_cc = message['Cc']
email_bcc = message['Bcc']
del message['Bcc']
# All recipient addresses must only contain ASCII characters
smtp_to_list = [
address
for base in [email_to, email_cc, email_bcc]
for address in extract_rfc2822_addresses(base)
if address
]
assert smtp_to_list, self.NO_VALID_RECIPIENT
x_forge_to = message['X-Forge-To']
if x_forge_to:
# `To:` header forged, e.g. for posting on mail.channels, to avoid confusion
del message['X-Forge-To']
del message['To'] # avoid multiple To: headers!
message['To'] = x_forge_to
# Try to not spoof the mail from headers
from_filter = getattr(smtp_session, 'from_filter', False)
smtp_from = getattr(smtp_session, 'smtp_from', False) or smtp_from
notifications_email = email_normalize(self._get_default_from_address())
if notifications_email and smtp_from == notifications_email and message['From'] != notifications_email:
smtp_from = encapsulate_email(message['From'], notifications_email)
if message['From'] != smtp_from:
del message['From']
message['From'] = smtp_from
# Check if it's still possible to put the bounce address as smtp_from
if self._match_from_filter(bounce_address, from_filter):
# Mail headers FROM will be spoofed to be able to receive bounce notifications
# Because the mail server support the domain of the bounce address
smtp_from = bounce_address
# The email's "Envelope From" (Return-Path) must only contain ASCII characters.
smtp_from_rfc2822 = extract_rfc2822_addresses(smtp_from)
assert smtp_from_rfc2822, (
f"Malformed 'Return-Path' or 'From' address: {smtp_from} - "
"It should contain one valid plain ASCII email")
smtp_from = smtp_from_rfc2822[-1]
return smtp_from, smtp_to_list, message
@api.model
def send_email(self, message, mail_server_id=None, smtp_server=None, smtp_port=None,
smtp_user=None, smtp_password=None, smtp_encryption=None,
smtp_ssl_certificate=None, smtp_ssl_private_key=None,
smtp_debug=False, smtp_session=None):
"""Sends an email directly (no queuing).
No retries are done, the caller should handle MailDeliveryException in order to ensure that
the mail is never lost.
If the mail_server_id is provided, sends using this mail server, ignoring other smtp_* arguments.
If mail_server_id is None and smtp_server is None, use the default mail server (highest priority).
If mail_server_id is None and smtp_server is not None, use the provided smtp_* arguments.
If both mail_server_id and smtp_server are None, look for an 'smtp_server' value in server config,
and fails if not found.
:param message: the email.message.Message to send. The envelope sender will be extracted from the
``Return-Path`` (if present), or will be set to the default bounce address.
The envelope recipients will be extracted from the combined list of ``To``,
``CC`` and ``BCC`` headers.
:param smtp_session: optional pre-established SMTP session. When provided,
overrides `mail_server_id` and all the `smtp_*` parameters.
Passing the matching `mail_server_id` may yield better debugging/log
messages. The caller is in charge of disconnecting the session.
:param mail_server_id: optional id of ir.mail_server to use for sending. overrides other smtp_* arguments.
:param smtp_server: optional hostname of SMTP server to use
:param smtp_encryption: optional TLS mode, one of 'none', 'starttls' or 'ssl' (see ir.mail_server fields for explanation)
:param smtp_port: optional SMTP port, if mail_server_id is not passed
:param smtp_user: optional SMTP user, if mail_server_id is not passed
:param smtp_password: optional SMTP password to use, if mail_server_id is not passed
:param smtp_ssl_certificate: filename of the SSL certificate used for authentication
:param smtp_ssl_private_key: filename of the SSL private key used for authentication
:param smtp_debug: optional SMTP debug flag, if mail_server_id is not passed
:return: the Message-ID of the message that was just sent, if successfully sent, otherwise raises
MailDeliveryException and logs root cause.
"""
smtp = smtp_session
if not smtp:
smtp = self.connect(
smtp_server, smtp_port, smtp_user, smtp_password, smtp_encryption,
smtp_from=message['From'], ssl_certificate=smtp_ssl_certificate, ssl_private_key=smtp_ssl_private_key,
smtp_debug=smtp_debug, mail_server_id=mail_server_id,)
smtp_from, smtp_to_list, message = self._prepare_email_message(message, smtp)
# Do not actually send emails in testing mode!
if self._is_test_mode():
_test_logger.info("skip sending email in test mode")
return message['Message-Id']
try:
message_id = message['Message-Id']
if sys.version_info < (3, 7, 4):
# header folding code is buggy and adds redundant carriage
# returns, it got fixed in 3.7.4 thanks to bpo-34424
message_str = message.as_string()
message_str = re.sub('\r+(?!\n)', '', message_str)
mail_options = []
if any((not is_ascii(addr) for addr in smtp_to_list + [smtp_from])):
# non ascii email found, require SMTPUTF8 extension,
# the relay may reject it
mail_options.append("SMTPUTF8")
smtp.sendmail(smtp_from, smtp_to_list, message_str, mail_options=mail_options)
else:
smtp.send_message(message, smtp_from, smtp_to_list)
# do not quit() a pre-established smtp_session
if not smtp_session:
smtp.quit()
except smtplib.SMTPServerDisconnected:
raise
except Exception as e:
params = (ustr(smtp_server), e.__class__.__name__, ustr(e))
msg = _("Mail delivery failed via SMTP server '%s'.\n%s: %s", *params)
_logger.info(msg)
raise MailDeliveryException(_("Mail Delivery Failed"), msg)
return message_id
def _find_mail_server(self, email_from, mail_servers=None):
"""Find the appropriate mail server for the given email address.
Returns: Record<ir.mail_server>, email_from
- Mail server to use to send the email (None if we use the odoo-bin arguments)
- Email FROM to use to send the email (in some case, it might be impossible
to use the given email address directly if no mail server is configured for)
"""
email_from_normalized = email_normalize(email_from)
email_from_domain = email_domain_extract(email_from_normalized)
notifications_email = email_normalize(self._get_default_from_address())
notifications_domain = email_domain_extract(notifications_email)
if mail_servers is None:
mail_servers = self.sudo().search([], order='sequence')
# 1. Try to find a mail server for the right mail from
mail_server = mail_servers.filtered(lambda m: email_normalize(m.from_filter) == email_from_normalized)
if mail_server:
return mail_server[0], email_from
mail_server = mail_servers.filtered(lambda m: email_domain_normalize(m.from_filter) == email_from_domain)
if mail_server:
return mail_server[0], email_from
# 2. Try to find a mail server for <[email protected]>
if notifications_email:
mail_server = mail_servers.filtered(lambda m: email_normalize(m.from_filter) == notifications_email)
if mail_server:
return mail_server[0], notifications_email
mail_server = mail_servers.filtered(lambda m: email_domain_normalize(m.from_filter) == notifications_domain)
if mail_server:
return mail_server[0], notifications_email
# 3. Take the first mail server without "from_filter" because
# nothing else has been found... Will spoof the FROM because
# we have no other choices
mail_server = mail_servers.filtered(lambda m: not m.from_filter)
if mail_server:
return mail_server[0], email_from
# 4. Return the first mail server even if it was configured for another domain
if mail_servers:
return mail_servers[0], email_from
# 5: SMTP config in odoo-bin arguments
from_filter = self.env['ir.config_parameter'].sudo().get_param(
'mail.default.from_filter', tools.config.get('from_filter'))
if self._match_from_filter(email_from, from_filter):
return None, email_from
if notifications_email and self._match_from_filter(notifications_email, from_filter):
return None, notifications_email
return None, email_from
@api.model
def _match_from_filter(self, email_from, from_filter):
"""Return True is the given email address match the "from_filter" field.
The from filter can be Falsy (always match),
a domain name or an full email address.
"""
if not from_filter:
return True
normalized_mail_from = email_normalize(email_from)
if '@' in from_filter:
return email_normalize(from_filter) == normalized_mail_from
return email_domain_extract(normalized_mail_from) == email_domain_normalize(from_filter)
@api.onchange('smtp_encryption')
def _onchange_encryption(self):
result = {}
if self.smtp_encryption == 'ssl':
self.smtp_port = 465
if not 'SMTP_SSL' in smtplib.__all__:
result['warning'] = {
'title': _('Warning'),
'message': _('Your server does not seem to support SSL, you may want to try STARTTLS instead'),
}
else:
self.smtp_port = 25
return result
def _is_test_mode(self):
"""Return True if we are running the tests, so we do not send real emails.
Can be overridden in tests after mocking the SMTP lib to test in depth the
outgoing mail server.
"""
return getattr(threading.current_thread(), 'testing', False) or self.env.registry.in_test_mode()
| 51.714483 | 37,493 |
13,057 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import warnings
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.exceptions import AccessError, ValidationError
from odoo.osv import expression
from odoo.tools import config
from odoo.tools.safe_eval import safe_eval, time
_logger = logging.getLogger(__name__)
class IrRule(models.Model):
_name = 'ir.rule'
_description = 'Record Rule'
_order = 'model_id DESC,id'
_MODES = ['read', 'write', 'create', 'unlink']
name = fields.Char(index=True)
active = fields.Boolean(default=True, help="If you uncheck the active field, it will disable the record rule without deleting it (if you delete a native record rule, it may be re-created when you reload the module).")
model_id = fields.Many2one('ir.model', string='Model', index=True, required=True, ondelete="cascade")
groups = fields.Many2many('res.groups', 'rule_group_rel', 'rule_group_id', 'group_id', ondelete='restrict')
domain_force = fields.Text(string='Domain')
perm_read = fields.Boolean(string='Apply for Read', default=True)
perm_write = fields.Boolean(string='Apply for Write', default=True)
perm_create = fields.Boolean(string='Apply for Create', default=True)
perm_unlink = fields.Boolean(string='Apply for Delete', default=True)
_sql_constraints = [
('no_access_rights',
'CHECK (perm_read!=False or perm_write!=False or perm_create!=False or perm_unlink!=False)',
'Rule must have at least one checked access right !'),
]
def _eval_context_for_combinations(self):
"""Returns a dictionary to use as evaluation context for
ir.rule domains, when the goal is to obtain python lists
that are easier to parse and combine, but not to
actually execute them."""
return {'user': tools.unquote('user'),
'time': tools.unquote('time')}
@api.model
def _eval_context(self):
"""Returns a dictionary to use as evaluation context for
ir.rule domains.
Note: company_ids contains the ids of the activated companies
by the user with the switch company menu. These companies are
filtered and trusted.
"""
# use an empty context for 'user' to make the domain evaluation
# independent from the context
return {
'user': self.env.user.with_context({}),
'time': time,
'company_ids': self.env.companies.ids,
'company_id': self.env.company.id,
}
@api.depends('groups')
def _compute_global(self):
for rule in self:
rule['global'] = not rule.groups
@api.constrains('model_id')
def _check_model_name(self):
# Don't allow rules on rules records (this model).
if any(rule.model_id.model == self._name for rule in self):
raise ValidationError(_('Rules can not be applied on the Record Rules model.'))
def _compute_domain_keys(self):
""" Return the list of context keys to use for caching ``_compute_domain``. """
return ['allowed_company_ids']
def _get_failing(self, for_records, mode='read'):
""" Returns the rules for the mode for the current user which fail on
the specified records.
Can return any global rule and/or all local rules (since local rules
are OR-ed together, the entire group succeeds or fails, while global
rules get AND-ed and can each fail)
"""
Model = for_records.browse(()).sudo()
eval_context = self._eval_context()
all_rules = self._get_rules(Model._name, mode=mode).sudo()
# first check if the group rules fail for any record (aka if
# searching on (records, group_rules) filters out some of the records)
group_rules = all_rules.filtered(lambda r: r.groups and r.groups & self.env.user.groups_id)
group_domains = expression.OR([
safe_eval(r.domain_force, eval_context) if r.domain_force else []
for r in group_rules
])
# if all records get returned, the group rules are not failing
if Model.search_count(expression.AND([[('id', 'in', for_records.ids)], group_domains])) == len(for_records):
group_rules = self.browse(())
# failing rules are previously selected group rules or any failing global rule
def is_failing(r, ids=for_records.ids):
dom = safe_eval(r.domain_force, eval_context) if r.domain_force else []
return Model.search_count(expression.AND([
[('id', 'in', ids)],
expression.normalize_domain(dom)
])) < len(ids)
return all_rules.filtered(lambda r: r in group_rules or (not r.groups and is_failing(r))).with_user(self.env.user)
def _get_rules(self, model_name, mode='read'):
""" Returns all the rules matching the model for the mode for the
current user.
"""
if mode not in self._MODES:
raise ValueError('Invalid mode: %r' % (mode,))
if self.env.su:
return self.browse(())
query = """ SELECT r.id FROM ir_rule r JOIN ir_model m ON (r.model_id=m.id)
WHERE m.model=%s AND r.active AND r.perm_{mode}
AND (r.id IN (SELECT rule_group_id FROM rule_group_rel rg
JOIN res_groups_users_rel gu ON (rg.group_id=gu.gid)
WHERE gu.uid=%s)
OR r.global)
ORDER BY r.id
""".format(mode=mode)
self._cr.execute(query, (model_name, self._uid))
return self.browse(row[0] for row in self._cr.fetchall())
@api.model
@tools.conditional(
'xml' not in config['dev_mode'],
tools.ormcache('self.env.uid', 'self.env.su', 'model_name', 'mode',
'tuple(self._compute_domain_context_values())'),
)
def _compute_domain(self, model_name, mode="read"):
rules = self._get_rules(model_name, mode=mode)
if not rules:
return
# browse user and rules as SUPERUSER_ID to avoid access errors!
eval_context = self._eval_context()
user_groups = self.env.user.groups_id
global_domains = [] # list of domains
group_domains = [] # list of domains
for rule in rules.sudo():
# evaluate the domain for the current user
dom = safe_eval(rule.domain_force, eval_context) if rule.domain_force else []
dom = expression.normalize_domain(dom)
if not rule.groups:
global_domains.append(dom)
elif rule.groups & user_groups:
group_domains.append(dom)
# combine global domains and group domains
if not group_domains:
return expression.AND(global_domains)
return expression.AND(global_domains + [expression.OR(group_domains)])
def _compute_domain_context_values(self):
for k in self._compute_domain_keys():
v = self._context.get(k)
if isinstance(v, list):
# currently this could be a frozenset (to avoid depending on
# the order of allowed_company_ids) but it seems safer if
# possibly slightly more miss-y to use a tuple
v = tuple(v)
yield v
@api.model
def clear_cache(self):
""" Deprecated, use `clear_caches` instead. """
self.clear_caches()
@api.model
def domain_get(self, model_name, mode='read'):
# this method is now unsafe, since it returns a list of tables which
# does not contain the joins present in the generated Query object
warnings.warn(
"Unsafe and deprecated IrRule.domain_get(), "
"use IrRule._compute_domain() and expression().query instead",
DeprecationWarning,
)
dom = self._compute_domain(model_name, mode)
if dom:
# _where_calc is called as superuser. This means that rules can
# involve objects on which the real uid has no acces rights.
# This means also there is no implicit restriction (e.g. an object
# references another object the user can't see).
query = self.env[model_name].sudo()._where_calc(dom, active_test=False)
return query.where_clause, query.where_clause_params, query.tables
return [], [], ['"%s"' % self.env[model_name]._table]
def unlink(self):
res = super(IrRule, self).unlink()
self.clear_caches()
return res
@api.model_create_multi
def create(self, vals_list):
res = super(IrRule, self).create(vals_list)
# DLE P33: tests
self.flush()
self.clear_caches()
return res
def write(self, vals):
res = super(IrRule, self).write(vals)
# DLE P33: tests
# - odoo/addons/test_access_rights/tests/test_feedback.py
# - odoo/addons/test_access_rights/tests/test_ir_rules.py
# - odoo/addons/base/tests/test_orm.py (/home/dle/src/odoo/master-nochange-fp/odoo/addons/base/tests/test_orm.py)
self.flush()
self.clear_caches()
return res
def _make_access_error(self, operation, records):
_logger.info('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, records.ids[:6], self._uid, records._name)
model = records._name
description = self.env['ir.model']._get(model).name or model
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Due to security restrictions, you are not allowed to access '%(document_kind)s' (%(document_model)s) records.", document_kind=description, document_model=model),
'write': _("Due to security restrictions, you are not allowed to modify '%(document_kind)s' (%(document_model)s) records.", document_kind=description, document_model=model),
'create': _("Due to security restrictions, you are not allowed to create '%(document_kind)s' (%(document_model)s) records.", document_kind=description, document_model=model),
'unlink': _("Due to security restrictions, you are not allowed to delete '%(document_kind)s' (%(document_model)s) records.", document_kind=description, document_model=model)
}
operation_error = msg_heads[operation]
resolution_info = _("Contact your administrator to request access if necessary.")
if not self.env.user.has_group('base.group_no_one') or not self.env.user.has_group('base.group_user'):
msg = """{operation_error}
{resolution_info}""".format(
operation_error=operation_error,
resolution_info=resolution_info)
return AccessError(msg)
# This extended AccessError is only displayed in debug mode.
# Note that by default, public and portal users do not have
# the group "base.group_no_one", even if debug mode is enabled,
# so it is relatively safe here to include the list of rules and record names.
rules = self._get_failing(records, mode=operation).sudo()
records_description = ', '.join(['%s (id=%s)' % (rec.display_name, rec.id) for rec in records[:6].sudo()])
failing_records = _("Records: %s", records_description)
user_description = '%s (id=%s)' % (self.env.user.name, self.env.user.id)
failing_user = _("User: %s", user_description)
rules_description = '\n'.join('- %s' % rule.name for rule in rules)
failing_rules = _("This restriction is due to the following rules:\n%s", rules_description)
if any('company_id' in (r.domain_force or []) for r in rules):
failing_rules += "\n\n" + _('Note: this might be a multi-company issue.')
msg = """{operation_error}
{failing_records}
{failing_user}
{failing_rules}
{resolution_info}""".format(
operation_error=operation_error,
failing_records=failing_records,
failing_user=failing_user,
failing_rules=failing_rules,
resolution_info=resolution_info)
# clean up the cache of records prefetched with display_name above
for record in records[:6]:
record._cache.clear()
return AccessError(msg)
#
# Hack for field 'global': this field cannot be defined like others, because
# 'global' is a Python keyword. Therefore, we add it to the class by assignment.
# Note that the attribute '_module' is normally added by the class' metaclass.
#
global_ = fields.Boolean(compute='_compute_global', store=True,
help="If no group is specified the rule is global and applied to everyone")
setattr(IrRule, 'global', global_)
global_.__set_name__(IrRule, 'global')
| 45.653846 | 13,057 |
105,023 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import itertools
import logging
import re
import psycopg2
from ast import literal_eval
from collections import defaultdict
from collections.abc import Mapping
from operator import itemgetter
from psycopg2 import sql
from odoo import api, fields, models, tools, _, Command
from odoo.exceptions import AccessError, UserError, ValidationError
from odoo.osv import expression
from odoo.tools import pycompat, unique, OrderedSet
from odoo.tools.safe_eval import safe_eval, datetime, dateutil, time
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
RE_ORDER_FIELDS = re.compile(r'"?(\w+)"?\s*(?:asc|desc)?', flags=re.I)
# base environment for doing a safe_eval
SAFE_EVAL_BASE = {
'datetime': datetime,
'dateutil': dateutil,
'time': time,
}
def make_compute(text, deps):
""" Return a compute function from its code body and dependencies. """
func = lambda self: safe_eval(text, SAFE_EVAL_BASE, {'self': self}, mode="exec")
deps = [arg.strip() for arg in deps.split(",")] if deps else []
return api.depends(*deps)(func)
def mark_modified(records, fnames):
""" Mark the given fields as modified on records. """
# protect all modified fields, to avoid them being recomputed
fields = [records._fields[fname] for fname in fnames]
with records.env.protecting(fields, records):
records.modified(fnames)
def model_xmlid(module, model_name):
""" Return the XML id of the given model. """
return '%s.model_%s' % (module, model_name.replace('.', '_'))
def field_xmlid(module, model_name, field_name):
""" Return the XML id of the given field. """
return '%s.field_%s__%s' % (module, model_name.replace('.', '_'), field_name)
def selection_xmlid(module, model_name, field_name, value):
""" Return the XML id of the given selection. """
xmodel = model_name.replace('.', '_')
xvalue = value.replace('.', '_').replace(' ', '_').lower()
return '%s.selection__%s__%s__%s' % (module, xmodel, field_name, xvalue)
# generic INSERT and UPDATE queries
INSERT_QUERY = "INSERT INTO {table} ({cols}) VALUES {rows} RETURNING id"
UPDATE_QUERY = "UPDATE {table} SET {assignment} WHERE {condition} RETURNING id"
quote = '"{}"'.format
def query_insert(cr, table, rows):
""" Insert rows in a table. ``rows`` is a list of dicts, all with the same
set of keys. Return the ids of the new rows.
"""
if isinstance(rows, Mapping):
rows = [rows]
cols = list(rows[0])
query = INSERT_QUERY.format(
table='"{}"'.format(table),
cols=",".join(['"{}"'.format(col) for col in cols]),
rows=",".join("%s" for row in rows),
)
params = [tuple(row[col] for col in cols) for row in rows]
cr.execute(query, params)
return [row[0] for row in cr.fetchall()]
def query_update(cr, table, values, selectors):
""" Update the table with the given values (dict), and use the columns in
``selectors`` to select the rows to update.
"""
setters = set(values) - set(selectors)
query = UPDATE_QUERY.format(
table='"{}"'.format(table),
assignment=",".join('"{0}"=%({0})s'.format(s) for s in setters),
condition=" AND ".join('"{0}"=%({0})s'.format(s) for s in selectors),
)
cr.execute(query, values)
return [row[0] for row in cr.fetchall()]
def upsert(cr, table, cols, rows, conflict):
""" Insert or update the table with the given rows.
:param cr: database cursor
:param table: table name
:param cols: list of column names
:param rows: list of tuples, where each tuple value corresponds to a column name
:param conflict: list of column names to put into the ON CONFLICT clause
:return: the ids of the inserted or updated rows
"""
query = """
INSERT INTO {table} ({cols}) VALUES {rows}
ON CONFLICT ({conflict}) DO UPDATE SET ({cols}) = ({excluded})
RETURNING id
""".format(
table=quote(table),
cols=", ".join(quote(col) for col in cols),
rows=", ".join("%s" for row in rows),
conflict=", ".join(conflict),
excluded=", ".join("EXCLUDED." + quote(col) for col in cols),
)
cr.execute(query, rows)
return [row[0] for row in cr.fetchall()]
#
# IMPORTANT: this must be the first model declared in the module
#
class Base(models.AbstractModel):
""" The base model, which is implicitly inherited by all models. """
_name = 'base'
_description = 'Base'
class Unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
_description = 'Unknown'
class IrModel(models.Model):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _default_field_id(self):
if self.env.context.get('install_mode'):
return [] # no default field when importing
return [Command.create({'name': 'x_name', 'field_description': 'Name', 'ttype': 'char', 'copied': True})]
name = fields.Char(string='Model Description', translate=True, required=True)
model = fields.Char(default='x_', required=True, index=True)
order = fields.Char(string='Order', default='id', required=True,
help='SQL expression for ordering records in the model; e.g. "x_sequence asc, id desc"')
info = fields.Text(string='Information')
field_id = fields.One2many('ir.model.fields', 'model_id', string='Fields', required=True, copy=True,
default=_default_field_id)
inherited_model_ids = fields.Many2many('ir.model', compute='_inherited_models', string="Inherited models",
help="The list of models that extends the current model.")
state = fields.Selection([('manual', 'Custom Object'), ('base', 'Base Object')], string='Type', default='manual', readonly=True)
access_ids = fields.One2many('ir.model.access', 'model_id', string='Access')
rule_ids = fields.One2many('ir.rule', 'model_id', string='Record Rules')
transient = fields.Boolean(string="Transient Model")
modules = fields.Char(compute='_in_modules', string='In Apps', help='List of modules in which the object is defined or inherited')
view_ids = fields.One2many('ir.ui.view', compute='_view_ids', string='Views')
count = fields.Integer(compute='_compute_count', string="Count (Incl. Archived)",
help="Total number of records in this model")
@api.depends()
def _inherited_models(self):
self.inherited_model_ids = False
for model in self:
parent_names = list(self.env[model.model]._inherits)
if parent_names:
model.inherited_model_ids = self.search([('model', 'in', parent_names)])
else:
model.inherited_model_ids = False
@api.depends()
def _in_modules(self):
installed_modules = self.env['ir.module.module'].search([('state', '=', 'installed')])
installed_names = set(installed_modules.mapped('name'))
xml_ids = models.Model._get_external_ids(self)
for model in self:
module_names = set(xml_id.split('.')[0] for xml_id in xml_ids[model.id])
model.modules = ", ".join(sorted(installed_names & module_names))
@api.depends()
def _view_ids(self):
for model in self:
model.view_ids = self.env['ir.ui.view'].search([('model', '=', model.model)])
@api.depends()
def _compute_count(self):
cr = self.env.cr
self.count = 0
for model in self:
records = self.env[model.model]
if not records._abstract and records._auto:
cr.execute(sql.SQL('SELECT COUNT(*) FROM {}').format(sql.Identifier(records._table)))
model.count = cr.fetchone()[0]
@api.constrains('model')
def _check_model_name(self):
for model in self:
if model.state == 'manual':
if not model.model.startswith('x_'):
raise ValidationError(_("The model name must start with 'x_'."))
if not models.check_object_name(model.model):
raise ValidationError(_("The model name can only contain lowercase characters, digits, underscores and dots."))
@api.constrains('order', 'field_id')
def _check_order(self):
for model in self:
try:
model._check_qorder(model.order) # regex check for the whole clause ('is it valid sql?')
except UserError as e:
raise ValidationError(str(e))
# add MAGIC_COLUMNS to 'stored_fields' in case 'model' has not been
# initialized yet, or 'field_id' is not up-to-date in cache
stored_fields = set(
model.field_id.filtered('store').mapped('name') + models.MAGIC_COLUMNS
)
order_fields = RE_ORDER_FIELDS.findall(model.order)
for field in order_fields:
if field not in stored_fields:
raise ValidationError(_("Unable to order by %s: fields used for ordering must be present on the model and stored.", field))
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must have a unique name.'),
]
def _get(self, name):
""" Return the (sudoed) `ir.model` record with the given name.
The result may be an empty recordset if the model is not found.
"""
model_id = self._get_id(name) if name else False
return self.sudo().browse(model_id)
@tools.ormcache('name')
def _get_id(self, name):
self.env.cr.execute("SELECT id FROM ir_model WHERE model=%s", (name,))
result = self.env.cr.fetchone()
return result and result[0]
# overridden to allow searching both on model name (field 'model') and model
# description (field 'name')
@api.model
def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self._search(domain, limit=limit, access_rights_uid=name_get_uid)
def _drop_table(self):
for model in self:
current_model = self.env.get(model.model)
if current_model is not None:
table = current_model._table
kind = tools.table_kind(self._cr, table)
if kind == 'v':
self._cr.execute(sql.SQL('DROP VIEW {}').format(sql.Identifier(table)))
elif kind == 'r':
self._cr.execute(sql.SQL('DROP TABLE {} CASCADE').format(sql.Identifier(table)))
# discard all translations for this model
self._cr.execute("""
DELETE FROM ir_translation
WHERE type IN ('model', 'model_terms') AND name LIKE %s
""", [model.model + ',%'])
else:
_logger.runbot('The model %s could not be dropped because it did not exist in the registry.', model.model)
return True
@api.ondelete(at_uninstall=False)
def _unlink_if_manual(self):
# Prevent manual deletion of module tables
for model in self:
if model.state != 'manual':
raise UserError(_("Model '%s' contains module data and cannot be removed.", model.name))
def unlink(self):
# prevent screwing up fields that depend on these models' fields
self.field_id._prepare_update()
# delete fields whose comodel is being removed
self.env['ir.model.fields'].search([('relation', 'in', self.mapped('model'))]).unlink()
# delete ir_crons created by user
crons = self.env['ir.cron'].with_context(active_test=False).search([('model_id', 'in', self.ids)])
if crons:
crons.unlink()
self._drop_table()
res = super(IrModel, self).unlink()
# Reload registry for normal unlink only. For module uninstall, the
# reload is done independently in odoo.modules.loading.
if not self._context.get(MODULE_UNINSTALL_FLAG):
# setup models; this automatically removes model from registry
self.flush()
self.pool.setup_models(self._cr)
return res
def write(self, vals):
if '__last_update' in self._context:
self = self.with_context({k: v for k, v in self._context.items() if k != '__last_update'})
if 'model' in vals and any(rec.model != vals['model'] for rec in self):
raise UserError(_('Field "Model" cannot be modified on models.'))
if 'state' in vals and any(rec.state != vals['state'] for rec in self):
raise UserError(_('Field "Type" cannot be modified on models.'))
if 'transient' in vals and any(rec.transient != vals['transient'] for rec in self):
raise UserError(_('Field "Transient Model" cannot be modified on models.'))
# Filter out operations 4 from field id, because the web client always
# writes (4,id,False) even for non dirty items.
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
res = super(IrModel, self).write(vals)
# ordering has been changed, reload registry to reflect update + signaling
if 'order' in vals:
self.flush() # setup_models need to fetch the updated values from the db
self.pool.setup_models(self._cr)
return res
@api.model
def create(self, vals):
res = super(IrModel, self).create(vals)
if vals.get('state', 'manual') == 'manual':
# setup models; this automatically adds model in registry
self.flush()
self.pool.setup_models(self._cr)
# update database schema
self.pool.init_models(self._cr, [vals['model']], dict(self._context, update_custom_fields=True))
return res
@api.model
def name_create(self, name):
""" Infer the model from the name. E.g.: 'My New Model' should become 'x_my_new_model'. """
vals = {
'name': name,
'model': 'x_' + '_'.join(name.lower().split(' ')),
}
return self.create(vals).name_get()[0]
def _reflect_model_params(self, model):
""" Return the values to write to the database for the given model. """
return {
'model': model._name,
'name': model._description,
'order': model._order,
'info': next(cls.__doc__ for cls in type(model).mro() if cls.__doc__),
'state': 'manual' if model._custom else 'base',
'transient': model._transient,
}
def _reflect_models(self, model_names):
""" Reflect the given models. """
# determine expected and existing rows
rows = [
self._reflect_model_params(self.env[model_name])
for model_name in model_names
]
cols = list(unique(['model'] + list(rows[0])))
expected = [tuple(row[col] for col in cols) for row in rows]
cr = self.env.cr
query = "SELECT {}, id FROM ir_model WHERE model IN %s".format(
", ".join(quote(col) for col in cols)
)
cr.execute(query, [tuple(model_names)])
model_ids = {}
existing = {}
for row in cr.fetchall():
model_ids[row[0]] = row[-1]
existing[row[0]] = row[:-1]
# create or update rows
rows = [row for row in expected if existing.get(row[0]) != row]
if rows:
ids = upsert(self.env.cr, self._table, cols, rows, ['model'])
for row, id_ in zip(rows, ids):
model_ids[row[0]] = id_
self.pool.post_init(mark_modified, self.browse(ids), cols[1:])
# update their XML id
module = self._context.get('module')
if not module:
return
data_list = []
for model_name, model_id in model_ids.items():
model = self.env[model_name]
if model._module == module:
# model._module is the name of the module that last extended model
xml_id = model_xmlid(module, model_name)
record = self.browse(model_id)
data_list.append({'xml_id': xml_id, 'record': record})
self.env['ir.model.data']._update_xmlids(data_list)
@api.model
def _instanciate(self, model_data):
""" Return a class for the custom model given by parameters ``model_data``. """
class CustomModel(models.Model):
_name = pycompat.to_text(model_data['model'])
_description = model_data['name']
_module = False
_custom = True
_transient = bool(model_data['transient'])
_order = model_data['order']
__doc__ = model_data['info']
return CustomModel
def _add_manual_models(self):
""" Add extra models to the registry. """
# clean up registry first
for name, Model in list(self.pool.items()):
if Model._custom:
del self.pool.models[name]
# remove the model's name from its parents' _inherit_children
for Parent in Model.__bases__:
if hasattr(Parent, 'pool'):
Parent._inherit_children.discard(name)
# add manual models
cr = self.env.cr
cr.execute('SELECT * FROM ir_model WHERE state=%s', ['manual'])
for model_data in cr.dictfetchall():
model_class = self._instanciate(model_data)
Model = model_class._build_model(self.pool, cr)
if tools.table_kind(cr, Model._table) not in ('r', None):
# not a regular table, so disable schema upgrades
Model._auto = False
cr.execute(
'''
SELECT a.attname
FROM pg_attribute a
JOIN pg_class t
ON a.attrelid = t.oid
AND t.relname = %s
WHERE a.attnum > 0 -- skip system columns
''',
[Model._table]
)
columns = {colinfo[0] for colinfo in cr.fetchall()}
Model._log_access = set(models.LOG_ACCESS_COLUMNS) <= columns
# retrieve field types defined by the framework only (not extensions)
FIELD_TYPES = [(key, key) for key in sorted(fields.Field.by_type)]
class IrModelFields(models.Model):
_name = 'ir.model.fields'
_description = "Fields"
_order = "name"
_rec_name = 'field_description'
name = fields.Char(string='Field Name', default='x_', required=True, index=True)
complete_name = fields.Char(index=True)
model = fields.Char(string='Model Name', required=True, index=True,
help="The technical name of the model this field belongs to")
relation = fields.Char(string='Related Model',
help="For relationship fields, the technical name of the target model")
relation_field = fields.Char(help="For one2many fields, the field on the target model that implement the opposite many2one relationship")
relation_field_id = fields.Many2one('ir.model.fields', compute='_compute_relation_field_id',
store=True, ondelete='cascade', string='Relation field')
model_id = fields.Many2one('ir.model', string='Model', required=True, index=True, ondelete='cascade',
help="The model this field belongs to")
field_description = fields.Char(string='Field Label', default='', required=True, translate=True)
help = fields.Text(string='Field Help', translate=True)
ttype = fields.Selection(selection=FIELD_TYPES, string='Field Type', required=True)
selection = fields.Char(string="Selection Options (Deprecated)",
compute='_compute_selection', inverse='_inverse_selection')
selection_ids = fields.One2many("ir.model.fields.selection", "field_id",
string="Selection Options", copy=True)
copied = fields.Boolean(string='Copied',
compute='_compute_copied', store=True, readonly=False,
help="Whether the value is copied when duplicating a record.")
related = fields.Char(string='Related Field', help="The corresponding related field, if any. This must be a dot-separated list of field names.")
related_field_id = fields.Many2one('ir.model.fields', compute='_compute_related_field_id',
store=True, string="Related field", ondelete='cascade')
required = fields.Boolean()
readonly = fields.Boolean()
index = fields.Boolean(string='Indexed')
translate = fields.Boolean(string='Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)")
size = fields.Integer()
state = fields.Selection([('manual', 'Custom Field'), ('base', 'Base Field')], string='Type', default='manual', required=True, readonly=True, index=True)
on_delete = fields.Selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
string='On Delete', default='set null', help='On delete property for many2one fields')
domain = fields.Char(default="[]", help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]")
groups = fields.Many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id') # CLEANME unimplemented field (empty table)
group_expand = fields.Boolean(string="Expand Groups",
help="If checked, all the records of the target model will be included\n"
"in a grouped result (e.g. 'Group By' filters, Kanban columns, etc.).\n"
"Note that it can significantly reduce performance if the target model\n"
"of the field contains a lot of records; usually used on models with\n"
"few records (e.g. Stages, Job Positions, Event Types, etc.).")
selectable = fields.Boolean(default=True)
modules = fields.Char(compute='_in_modules', string='In Apps', help='List of modules in which the field is defined')
relation_table = fields.Char(help="Used for custom many2many fields to define a custom relation table name")
column1 = fields.Char(string='Column 1', help="Column referring to the record in the model table")
column2 = fields.Char(string="Column 2", help="Column referring to the record in the comodel table")
compute = fields.Text(help="Code to compute the value of the field.\n"
"Iterate on the recordset 'self' and assign the field's value:\n\n"
" for record in self:\n"
" record['size'] = len(record.name)\n\n"
"Modules time, datetime, dateutil are available.")
depends = fields.Char(string='Dependencies', help="Dependencies of compute method; "
"a list of comma-separated field names, like\n\n"
" name, partner_id.name")
store = fields.Boolean(string='Stored', default=True, help="Whether the value is stored in the database.")
@api.depends('relation', 'relation_field')
def _compute_relation_field_id(self):
for rec in self:
if rec.state == 'manual' and rec.relation_field:
rec.relation_field_id = self._get(rec.relation, rec.relation_field)
else:
rec.relation_field_id = False
@api.depends('related')
def _compute_related_field_id(self):
for rec in self:
if rec.state == 'manual' and rec.related:
field = rec._related_field()
rec.related_field_id = self._get(field.model_name, field.name)
else:
rec.related_field_id = False
@api.depends('selection_ids')
def _compute_selection(self):
for rec in self:
if rec.ttype in ('selection', 'reference'):
rec.selection = str(self.env['ir.model.fields.selection']._get_selection(rec.id))
else:
rec.selection = False
def _inverse_selection(self):
for rec in self:
selection = literal_eval(rec.selection or "[]")
self.env['ir.model.fields.selection']._update_selection(rec.model, rec.name, selection)
@api.depends('ttype', 'related', 'compute')
def _compute_copied(self):
for rec in self:
rec.copied = (rec.ttype != 'one2many') and not (rec.related or rec.compute)
@api.depends()
def _in_modules(self):
installed_modules = self.env['ir.module.module'].search([('state', '=', 'installed')])
installed_names = set(installed_modules.mapped('name'))
xml_ids = models.Model._get_external_ids(self)
for field in self:
module_names = set(xml_id.split('.')[0] for xml_id in xml_ids[field.id])
field.modules = ", ".join(sorted(installed_names & module_names))
@api.constrains('domain')
def _check_domain(self):
for field in self:
safe_eval(field.domain or '[]')
@api.constrains('name', 'state')
def _check_name(self):
for field in self:
if field.state == 'manual' and not field.name.startswith('x_'):
raise ValidationError(_("Custom fields must have a name that starts with 'x_' !"))
try:
models.check_pg_name(field.name)
except ValidationError:
msg = _("Field names can only contain characters, digits and underscores (up to 63).")
raise ValidationError(msg)
_sql_constraints = [
('name_unique', 'UNIQUE(model, name)', "Field names must be unique per model."),
('size_gt_zero', 'CHECK (size>=0)', 'Size of the field cannot be negative.'),
]
def _related_field(self):
""" Return the ``Field`` instance corresponding to ``self.related``. """
names = self.related.split(".")
last = len(names) - 1
model = self.env[self.model or self.model_id.model]
for index, name in enumerate(names):
field = model._fields.get(name)
if field is None:
raise UserError(_("Unknown field name '%s' in related field '%s'") % (name, self.related))
if index < last and not field.relational:
raise UserError(_("Non-relational field name '%s' in related field '%s'") % (name, self.related))
model = model[name]
return field
@api.constrains('related')
def _check_related(self):
for rec in self:
if rec.state == 'manual' and rec.related:
field = rec._related_field()
if field.type != rec.ttype:
raise ValidationError(_("Related field '%s' does not have type '%s'") % (rec.related, rec.ttype))
if field.relational and field.comodel_name != rec.relation:
raise ValidationError(_("Related field '%s' does not have comodel '%s'") % (rec.related, rec.relation))
@api.onchange('related')
def _onchange_related(self):
if self.related:
try:
field = self._related_field()
except UserError as e:
return {'warning': {'title': _("Warning"), 'message': e}}
self.ttype = field.type
self.relation = field.comodel_name
self.readonly = True
@api.constrains('depends')
def _check_depends(self):
""" Check whether all fields in dependencies are valid. """
for record in self:
if not record.depends:
continue
for seq in record.depends.split(","):
if not seq.strip():
raise UserError(_("Empty dependency in %r") % (record.depends))
model = self.env[record.model]
names = seq.strip().split(".")
last = len(names) - 1
for index, name in enumerate(names):
if name == 'id':
raise UserError(_("Compute method cannot depend on field 'id'"))
field = model._fields.get(name)
if field is None:
raise UserError(_("Unknown field %r in dependency %r") % (name, seq.strip()))
if index < last and not field.relational:
raise UserError(_("Non-relational field %r in dependency %r") % (name, seq.strip()))
model = model[name]
@api.onchange('compute')
def _onchange_compute(self):
if self.compute:
self.readonly = True
@api.constrains('relation_table')
def _check_relation_table(self):
for rec in self:
if rec.relation_table:
models.check_pg_name(rec.relation_table)
@api.model
def _custom_many2many_names(self, model_name, comodel_name):
""" Return default names for the table and columns of a custom many2many field. """
rel1 = self.env[model_name]._table
rel2 = self.env[comodel_name]._table
table = 'x_%s_%s_rel' % tuple(sorted([rel1, rel2]))
if rel1 == rel2:
return (table, 'id1', 'id2')
else:
return (table, '%s_id' % rel1, '%s_id' % rel2)
@api.onchange('ttype', 'model_id', 'relation')
def _onchange_ttype(self):
if self.ttype == 'many2many' and self.model_id and self.relation:
if self.relation not in self.env:
return {
'warning': {
'title': _('Model %s does not exist', self.relation),
'message': _('Please specify a valid model for the object relation'),
}
}
names = self._custom_many2many_names(self.model_id.model, self.relation)
self.relation_table, self.column1, self.column2 = names
else:
self.relation_table = False
self.column1 = False
self.column2 = False
@api.onchange('relation_table')
def _onchange_relation_table(self):
if self.relation_table:
# check whether other fields use the same table
others = self.search([('ttype', '=', 'many2many'),
('relation_table', '=', self.relation_table),
('id', 'not in', self.ids)])
if others:
for other in others:
if (other.model, other.relation) == (self.relation, self.model):
# other is a candidate inverse field
self.column1 = other.column2
self.column2 = other.column1
return
return {'warning': {
'title': _("Warning"),
'message': _("The table %r if used for other, possibly incompatible fields.", self.relation_table),
}}
@api.onchange('required', 'ttype', 'on_delete')
def _onchange_required(self):
for rec in self:
if rec.ttype == 'many2one' and rec.required and rec.on_delete == 'set null':
return {'warning': {'title': _("Warning"), 'message': _(
"The m2o field %s is required but declares its ondelete policy "
"as being 'set null'. Only 'restrict' and 'cascade' make sense.", rec.name,
)}}
def _get(self, model_name, name):
""" Return the (sudoed) `ir.model.fields` record with the given model and name.
The result may be an empty recordset if the model is not found.
"""
field_id = model_name and name and self._get_ids(model_name).get(name)
return self.sudo().browse(field_id)
@tools.ormcache('model_name')
def _get_ids(self, model_name):
cr = self.env.cr
cr.execute("SELECT name, id FROM ir_model_fields WHERE model=%s", [model_name])
return dict(cr.fetchall())
def _drop_column(self):
tables_to_drop = set()
for field in self:
if field.name in models.MAGIC_COLUMNS:
continue
model = self.env.get(field.model)
is_model = model is not None
if field.store:
# TODO: Refactor this brol in master
if is_model and tools.column_exists(self._cr, model._table, field.name) and \
tools.table_kind(self._cr, model._table) == 'r':
self._cr.execute(sql.SQL('ALTER TABLE {} DROP COLUMN {} CASCADE').format(
sql.Identifier(model._table), sql.Identifier(field.name),
))
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = field.relation_table or (is_model and model._fields[field.name].relation)
tables_to_drop.add(rel_name)
if field.state == 'manual' and is_model:
model._pop_field(field.name)
if field.translate:
# discard all translations for this field
self._cr.execute("""
DELETE FROM ir_translation
WHERE type IN ('model', 'model_terms') AND name=%s
""", ['%s,%s' % (field.model, field.name)])
if tables_to_drop:
# drop the relation tables that are not used by other fields
self._cr.execute("""SELECT relation_table FROM ir_model_fields
WHERE relation_table IN %s AND id NOT IN %s""",
(tuple(tables_to_drop), tuple(self.ids)))
tables_to_keep = set(row[0] for row in self._cr.fetchall())
for rel_name in tables_to_drop - tables_to_keep:
self._cr.execute(sql.SQL('DROP TABLE {}').format(sql.Identifier(rel_name)))
return True
def _prepare_update(self):
""" Check whether the fields in ``self`` may be modified or removed.
This method prevents the modification/deletion of many2one fields
that have an inverse one2many, for instance.
"""
failed_dependencies = []
for rec in self:
model = self.env.get(rec.model)
if model is not None:
if rec.name in model._fields:
field = model._fields[rec.name]
else:
# field hasn't been loaded (yet?)
continue
for dep in model._dependent_fields(field):
if dep.manual:
failed_dependencies.append((field, dep))
for inverse in model.pool.field_inverses[field]:
if inverse.manual and inverse.type == 'one2many':
failed_dependencies.append((field, inverse))
uninstalling = self._context.get(MODULE_UNINSTALL_FLAG)
if not uninstalling and failed_dependencies:
msg = _("The field '%s' cannot be removed because the field '%s' depends on it.")
raise UserError(msg % failed_dependencies[0])
elif failed_dependencies:
dependants = {rel[1] for rel in failed_dependencies}
to_unlink = [self._get(field.model_name, field.name) for field in dependants]
self.browse().union(*to_unlink).unlink()
self = self.filtered(lambda record: record.state == 'manual')
if not self:
return
# remove pending write of this field
# DLE P16: if there are pending towrite of the field we currently try to unlink, pop them out from the towrite queue
# test `test_unlink_with_dependant`
for record in self:
for record_values in self.env.all.towrite[record.model].values():
record_values.pop(record.name, None)
# remove fields from registry, and check that views are not broken
fields = [self.env[record.model]._pop_field(record.name) for record in self]
domain = expression.OR([('arch_db', 'like', record.name)] for record in self)
views = self.env['ir.ui.view'].search(domain)
try:
for view in views:
view._check_xml()
except Exception:
if not uninstalling:
raise UserError("\n".join([
_("Cannot rename/delete fields that are still present in views:"),
_("Fields: %s") % ", ".join(str(f) for f in fields),
_("View: %s", view.name),
]))
else:
# uninstall mode
_logger.warning("The following fields were force-deleted to prevent a registry crash "
+ ", ".join(str(f) for f in fields)
+ " the following view might be broken %s" % view.name)
finally:
if not uninstalling:
# the registry has been modified, restore it
self.pool.setup_models(self._cr)
@api.ondelete(at_uninstall=False)
def _unlink_if_manual(self):
# Prevent manual deletion of module columns
if any(field.state != 'manual' for field in self):
raise UserError(_("This column contains module data and cannot be removed!"))
def unlink(self):
if not self:
return True
# prevent screwing up fields that depend on these fields
self._prepare_update()
# determine registry fields corresponding to self
fields = OrderedSet()
for record in self:
try:
fields.add(self.pool[record.model]._fields[record.name])
except KeyError:
pass
# clean the registry from the fields to remove
self.pool.registry_invalidated = True
# discard the removed fields from field triggers
def discard_fields(tree):
# discard fields from the tree's root node
tree.get(None, set()).difference_update(fields)
# discard subtrees labelled with any of the fields
for field in fields:
tree.pop(field, None)
# discard fields from remaining subtrees
for field, subtree in tree.items():
if field is not None:
discard_fields(subtree)
discard_fields(self.pool.field_triggers)
# discard the removed fields from field inverses
self.pool.field_inverses.discard_keys_and_values(fields)
# discard the removed fields from fields to compute
for field in fields:
self.env.all.tocompute.pop(field, None)
model_names = self.mapped('model')
self._drop_column()
res = super(IrModelFields, self).unlink()
# The field we just deleted might be inherited, and the registry is
# inconsistent in this case; therefore we reload the registry.
if not self._context.get(MODULE_UNINSTALL_FLAG):
# setup models; this re-initializes models in registry
self.flush()
self.pool.setup_models(self._cr)
# update database schema of model and its descendant models
models = self.pool.descendants(model_names, '_inherits')
self.pool.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
return res
@api.model
def create(self, vals):
if 'model_id' in vals:
model_data = self.env['ir.model'].browse(vals['model_id'])
vals['model'] = model_data.model
# for self._get_ids() in _update_selection()
self.clear_caches()
res = super(IrModelFields, self).create(vals)
if vals.get('state', 'manual') == 'manual':
if vals.get('relation') and not self.env['ir.model'].search([('model', '=', vals['relation'])]):
raise UserError(_("Model %s does not exist!", vals['relation']))
if vals.get('ttype') == 'one2many':
if not self.search([('model_id', '=', vals['relation']), ('name', '=', vals['relation_field']), ('ttype', '=', 'many2one')]):
raise UserError(_("Many2one %s on model %s does not exist!") % (vals['relation_field'], vals['relation']))
self.clear_caches() # for _existing_field_data()
if vals['model'] in self.pool:
# setup models; this re-initializes model in registry
self.flush()
self.pool.setup_models(self._cr)
# update database schema of model and its descendant models
models = self.pool.descendants([vals['model']], '_inherits')
self.pool.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
return res
def write(self, vals):
# if set, *one* column can be renamed here
column_rename = None
# names of the models to patch
patched_models = set()
if vals and self:
for item in self:
if item.state != 'manual':
raise UserError(_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if vals.get('model_id', item.model_id.id) != item.model_id.id:
raise UserError(_("Changing the model of a field is forbidden!"))
if vals.get('ttype', item.ttype) != item.ttype:
raise UserError(_("Changing the type of a field is not yet supported. "
"Please drop it and create it again!"))
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if vals.get('name', item.name) != item.name:
# We need to rename the field
item._prepare_update()
if item.ttype in ('one2many', 'many2many', 'binary'):
# those field names are not explicit in the database!
pass
else:
if column_rename:
raise UserError(_('Can only rename one field at a time!'))
column_rename = (obj._table, item.name, vals['name'], item.index, item.store)
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
patched_models.add(obj._name)
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(IrModelFields, self).write(vals)
self.flush()
self.clear_caches() # for _existing_field_data()
if column_rename:
# rename column in database, and its corresponding index if present
table, oldname, newname, index, stored = column_rename
if stored:
self._cr.execute(
sql.SQL('ALTER TABLE {} RENAME COLUMN {} TO {}').format(
sql.Identifier(table),
sql.Identifier(oldname),
sql.Identifier(newname)
))
if index:
self._cr.execute(
sql.SQL('ALTER INDEX {} RENAME TO {}').format(
sql.Identifier(f'{table}_{oldname}_index'),
sql.Identifier(f'{table}_{newname}_index'),
))
if column_rename or patched_models:
# setup models, this will reload all manual fields in registry
self.flush()
self.pool.setup_models(self._cr)
if patched_models:
# update the database schema of the models to patch
models = self.pool.descendants(patched_models, '_inherits')
self.pool.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
return res
def name_get(self):
res = []
for field in self:
res.append((field.id, '%s (%s)' % (field.field_description, field.model)))
return res
@tools.ormcache('model_name')
def _existing_field_data(self, model_name):
""" Return the given model's existing field data. """
cr = self._cr
cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", [model_name])
return {row['name']: row for row in cr.dictfetchall()}
def _reflect_field_params(self, field, model_id):
""" Return the values to write to the database for the given field. """
return {
'model_id': model_id,
'model': field.model_name,
'name': field.name,
'field_description': field.string,
'help': field.help or None,
'ttype': field.type,
'state': 'manual' if field.manual else 'base',
'relation': field.comodel_name or None,
'index': bool(field.index),
'store': bool(field.store),
'copied': bool(field.copy),
'on_delete': field.ondelete if field.type == 'many2one' else None,
'related': field.related or None,
'readonly': bool(field.readonly),
'required': bool(field.required),
'selectable': bool(field.search or field.store),
'size': getattr(field, 'size', None),
'translate': bool(field.translate),
'relation_field': field.inverse_name if field.type == 'one2many' else None,
'relation_table': field.relation if field.type == 'many2many' else None,
'column1': field.column1 if field.type == 'many2many' else None,
'column2': field.column2 if field.type == 'many2many' else None,
}
def _reflect_fields(self, model_names):
""" Reflect the fields of the given models. """
cr = self.env.cr
for model_name in model_names:
model = self.env[model_name]
by_label = {}
for field in model._fields.values():
if field.string in by_label:
_logger.warning('Two fields (%s, %s) of %s have the same label: %s.',
field.name, by_label[field.string], model, field.string)
else:
by_label[field.string] = field.name
# determine expected and existing rows
rows = []
for model_name in model_names:
model_id = self.env['ir.model']._get_id(model_name)
for field in self.env[model_name]._fields.values():
rows.append(self._reflect_field_params(field, model_id))
if not rows:
return
cols = list(unique(['model', 'name'] + list(rows[0])))
expected = [tuple(row[col] for col in cols) for row in rows]
query = "SELECT {}, id FROM ir_model_fields WHERE model IN %s".format(
", ".join(quote(col) for col in cols),
)
cr.execute(query, [tuple(model_names)])
field_ids = {}
existing = {}
for row in cr.fetchall():
field_ids[row[:2]] = row[-1]
existing[row[:2]] = row[:-1]
# create or update rows
rows = [row for row in expected if existing.get(row[:2]) != row]
if rows:
ids = upsert(cr, self._table, cols, rows, ['model', 'name'])
for row, id_ in zip(rows, ids):
field_ids[row[:2]] = id_
self.pool.post_init(mark_modified, self.browse(ids), cols[2:])
# update their XML id
module = self._context.get('module')
if not module:
return
data_list = []
for (field_model, field_name), field_id in field_ids.items():
model = self.env[field_model]
field = model._fields.get(field_name)
if field and (
module == model._original_module
or module in field._modules
or any(
# module introduced field on model by inheritance
field_name in self.env[parent]._fields
for parent, parent_module in model._inherit_module.items()
if module == parent_module
)
):
xml_id = field_xmlid(module, field_model, field_name)
record = self.browse(field_id)
data_list.append({'xml_id': xml_id, 'record': record})
self.env['ir.model.data']._update_xmlids(data_list)
@tools.ormcache()
def _all_manual_field_data(self):
cr = self._cr
cr.execute("SELECT * FROM ir_model_fields WHERE state='manual'")
result = defaultdict(dict)
for row in cr.dictfetchall():
result[row['model']][row['name']] = row
return result
def _get_manual_field_data(self, model_name):
""" Return the given model's manual field data. """
return self._all_manual_field_data().get(model_name, {})
def _instanciate_attrs(self, field_data):
""" Return the parameters for a field instance for ``field_data``. """
attrs = {
'manual': True,
'string': field_data['field_description'],
'help': field_data['help'],
'index': bool(field_data['index']),
'copy': bool(field_data['copied']),
'related': field_data['related'],
'required': bool(field_data['required']),
'readonly': bool(field_data['readonly']),
'store': bool(field_data['store']),
}
if field_data['ttype'] in ('char', 'text', 'html'):
attrs['translate'] = bool(field_data['translate'])
if field_data['ttype'] == 'char':
attrs['size'] = field_data['size'] or None
elif field_data['ttype'] in ('selection', 'reference'):
attrs['selection'] = self.env['ir.model.fields.selection']._get_selection_data(field_data['id'])
if field_data['ttype'] == 'selection':
attrs['group_expand'] = field_data['group_expand']
elif field_data['ttype'] == 'many2one':
if not self.pool.loaded and field_data['relation'] not in self.env:
return
attrs['comodel_name'] = field_data['relation']
attrs['ondelete'] = field_data['on_delete']
attrs['domain'] = safe_eval(field_data['domain'] or '[]')
attrs['group_expand'] = '_read_group_expand_full' if field_data['group_expand'] else None
elif field_data['ttype'] == 'one2many':
if not self.pool.loaded and not (
field_data['relation'] in self.env and (
field_data['relation_field'] in self.env[field_data['relation']]._fields or
field_data['relation_field'] in self._get_manual_field_data(field_data['relation'])
)):
return
attrs['comodel_name'] = field_data['relation']
attrs['inverse_name'] = field_data['relation_field']
attrs['domain'] = safe_eval(field_data['domain'] or '[]')
elif field_data['ttype'] == 'many2many':
if not self.pool.loaded and field_data['relation'] not in self.env:
return
attrs['comodel_name'] = field_data['relation']
rel, col1, col2 = self._custom_many2many_names(field_data['model'], field_data['relation'])
attrs['relation'] = field_data['relation_table'] or rel
attrs['column1'] = field_data['column1'] or col1
attrs['column2'] = field_data['column2'] or col2
attrs['domain'] = safe_eval(field_data['domain'] or '[]')
elif field_data['ttype'] == 'monetary' and not self.pool.loaded:
return
# add compute function if given
if field_data['compute']:
attrs['compute'] = make_compute(field_data['compute'], field_data['depends'])
return attrs
def _instanciate(self, field_data):
""" Return a field instance corresponding to parameters ``field_data``. """
attrs = self._instanciate_attrs(field_data)
if attrs:
return fields.Field.by_type[field_data['ttype']](**attrs)
def _add_manual_fields(self, model):
""" Add extra fields on model. """
fields_data = self._get_manual_field_data(model._name)
for name, field_data in fields_data.items():
if name not in model._fields and field_data['state'] == 'manual':
try:
field = self._instanciate(field_data)
if field:
model._add_field(name, field)
except Exception:
_logger.exception("Failed to load field %s.%s: skipped", model._name, field_data['name'])
class IrModelSelection(models.Model):
_name = 'ir.model.fields.selection'
_order = 'sequence, id'
_description = "Fields Selection"
field_id = fields.Many2one("ir.model.fields",
required=True, ondelete="cascade", index=True,
domain=[('ttype', 'in', ['selection', 'reference'])])
value = fields.Char(required=True)
name = fields.Char(translate=True, required=True)
sequence = fields.Integer(default=1000)
_sql_constraints = [
('selection_field_uniq', 'unique(field_id, value)',
'Selections values must be unique per field'),
]
def _get_selection(self, field_id):
""" Return the given field's selection as a list of pairs (value, string). """
self.flush(['value', 'name', 'field_id', 'sequence'])
return self._get_selection_data(field_id)
def _get_selection_data(self, field_id):
self._cr.execute("""
SELECT value, name
FROM ir_model_fields_selection
WHERE field_id=%s
ORDER BY sequence, id
""", (field_id,))
return self._cr.fetchall()
def _reflect_selections(self, model_names):
""" Reflect the selections of the fields of the given models. """
fields = [
field
for model_name in model_names
for field_name, field in self.env[model_name]._fields.items()
if field.type in ('selection', 'reference')
if isinstance(field.selection, list)
]
if not fields:
return
# determine expected and existing rows
IMF = self.env['ir.model.fields']
expected = {
(field_id, value): (label, index)
for field in fields
for field_id in [IMF._get_ids(field.model_name)[field.name]]
for index, (value, label) in enumerate(field.selection)
}
cr = self.env.cr
query = """
SELECT s.field_id, s.value, s.name, s.sequence
FROM ir_model_fields_selection s, ir_model_fields f
WHERE s.field_id = f.id AND f.model IN %s
"""
cr.execute(query, [tuple(model_names)])
existing = {row[:2]: row[2:] for row in cr.fetchall()}
# create or update rows
cols = ['field_id', 'value', 'name', 'sequence']
rows = [key + val for key, val in expected.items() if existing.get(key) != val]
if rows:
ids = upsert(cr, self._table, cols, rows, ['field_id', 'value'])
self.pool.post_init(mark_modified, self.browse(ids), cols[2:])
# update their XML ids
module = self._context.get('module')
if not module:
return
query = """
SELECT f.model, f.name, s.value, s.id
FROM ir_model_fields_selection s, ir_model_fields f
WHERE s.field_id = f.id AND f.model IN %s
"""
cr.execute(query, [tuple(model_names)])
selection_ids = {row[:3]: row[3] for row in cr.fetchall()}
data_list = []
for field in fields:
model = self.env[field.model_name]
for value, modules in field._selection_modules(model).items():
if module in modules:
xml_id = selection_xmlid(module, field.model_name, field.name, value)
record = self.browse(selection_ids[field.model_name, field.name, value])
data_list.append({'xml_id': xml_id, 'record': record})
self.env['ir.model.data']._update_xmlids(data_list)
def _update_selection(self, model_name, field_name, selection):
""" Set the selection of a field to the given list, and return the row
values of the given selection records.
"""
field_id = self.env['ir.model.fields']._get_ids(model_name)[field_name]
# selection rows {value: row}
cur_rows = self._existing_selection_data(model_name, field_name)
new_rows = {
value: dict(value=value, name=label, sequence=index)
for index, (value, label) in enumerate(selection)
}
rows_to_insert = []
rows_to_update = []
rows_to_remove = []
for value in new_rows.keys() | cur_rows.keys():
new_row, cur_row = new_rows.get(value), cur_rows.get(value)
if new_row is None:
if self.pool.ready:
# removing a selection in the new list, at your own risks
_logger.warning("Removing selection value %s on %s.%s",
cur_row['value'], model_name, field_name)
rows_to_remove.append(cur_row['id'])
elif cur_row is None:
rows_to_insert.append(dict(new_row, field_id=field_id))
elif any(new_row[key] != cur_row[key] for key in new_row):
rows_to_update.append(dict(new_row, id=cur_row['id']))
if rows_to_insert:
row_ids = query_insert(self.env.cr, self._table, rows_to_insert)
# update cur_rows for output
for row, row_id in zip(rows_to_insert, row_ids):
cur_rows[row['value']] = dict(row, id=row_id)
for row in rows_to_update:
query_update(self.env.cr, self._table, row, ['id'])
if rows_to_remove:
self.browse(rows_to_remove).unlink()
return cur_rows
def _existing_selection_data(self, model_name, field_name):
""" Return the selection data of the given model, by field and value, as
a dict {field_name: {value: row_values}}.
"""
query = """
SELECT s.*
FROM ir_model_fields_selection s
JOIN ir_model_fields f ON s.field_id=f.id
WHERE f.model=%s and f.name=%s
"""
self._cr.execute(query, [model_name, field_name])
return {row['value']: row for row in self._cr.dictfetchall()}
@api.model_create_multi
def create(self, vals_list):
field_ids = {vals['field_id'] for vals in vals_list}
for field in self.env['ir.model.fields'].browse(field_ids):
if field.state != 'manual':
raise UserError(_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
recs = super().create(vals_list)
# setup models; this re-initializes model in registry
self.flush()
self.pool.setup_models(self._cr)
return recs
def write(self, vals):
if (
not self.env.user._is_admin() and
any(record.field_id.state != 'manual' for record in self)
):
raise UserError(_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if 'value' in vals:
for selection in self:
if selection.value == vals['value']:
continue
if selection.field_id.store:
# replace the value by the new one in the field's corresponding column
query = 'UPDATE "{table}" SET "{field}"=%s WHERE "{field}"=%s'.format(
table=self.env[selection.field_id.model]._table,
field=selection.field_id.name,
)
self.env.cr.execute(query, [vals['value'], selection.value])
result = super().write(vals)
# setup models; this re-initializes model in registry
self.flush()
self.pool.setup_models(self._cr)
return result
@api.ondelete(at_uninstall=False)
def _unlink_if_manual(self):
# Prevent manual deletion of module columns
if (
self.pool.ready
and any(selection.field_id.state != 'manual' for selection in self)
):
raise UserError(_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
def unlink(self):
self._process_ondelete()
result = super().unlink()
# Reload registry for normal unlink only. For module uninstall, the
# reload is done independently in odoo.modules.loading.
if not self._context.get(MODULE_UNINSTALL_FLAG):
# setup models; this re-initializes model in registry
self.flush()
self.pool.setup_models(self._cr)
return result
def _process_ondelete(self):
""" Process the 'ondelete' of the given selection values. """
def safe_write(records, fname, value):
if not records:
return
try:
with self.env.cr.savepoint():
records.write({fname: value})
except Exception:
# going through the ORM failed, probably because of an exception
# in an override or possibly a constraint.
_logger.runbot(
"Could not fulfill ondelete action for field %s.%s, "
"attempting ORM bypass...", records._name, fname,
)
query = sql.SQL("UPDATE {} SET {}=%s WHERE id IN %s").format(
sql.Identifier(records._table),
sql.Identifier(fname),
)
# if this fails then we're shit out of luck and there's nothing
# we can do except fix on a case-by-case basis
value = field.convert_to_column(value, records)
self.env.cr.execute(query, [value, records._ids])
records.invalidate_cache([fname])
for selection in self:
Model = self.env[selection.field_id.model]
# The field may exist in database but not in registry. In this case
# we allow the field to be skipped, but for production this should
# be handled through a migration script. The ORM will take care of
# the orphaned 'ir.model.fields' down the stack, and will log a
# warning prompting the developer to write a migration script.
field = Model._fields.get(selection.field_id.name)
if not field or not field.store or Model._abstract:
continue
ondelete = (field.ondelete or {}).get(selection.value)
# special case for custom fields
if ondelete is None and field.manual and not field.required:
ondelete = 'set null'
if ondelete is None:
# nothing to do, the selection does not come from a field extension
continue
elif callable(ondelete):
ondelete(selection._get_records())
elif ondelete == 'set null':
safe_write(selection._get_records(), field.name, False)
elif ondelete == 'set default':
value = field.convert_to_write(field.default(Model), Model)
safe_write(selection._get_records(), field.name, value)
elif ondelete.startswith('set '):
safe_write(selection._get_records(), field.name, ondelete[4:])
elif ondelete == 'cascade':
selection._get_records().unlink()
else:
# this shouldn't happen... simply a sanity check
raise ValueError(_(
"The ondelete policy %r is not valid for field %r"
) % (ondelete, selection))
def _get_records(self):
""" Return the records having 'self' as a value. """
self.ensure_one()
Model = self.env[self.field_id.model]
query = 'SELECT id FROM "{table}" WHERE "{field}"=%s'.format(
table=Model._table, field=self.field_id.name,
)
self.env.cr.execute(query, [self.value])
return Model.browse(r[0] for r in self.env.cr.fetchall())
class IrModelConstraint(models.Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by Odoo
models.
"""
_name = 'ir.model.constraint'
_description = 'Model Constraint'
name = fields.Char(string='Constraint', required=True, index=True,
help="PostgreSQL constraint or foreign key name.")
definition = fields.Char(help="PostgreSQL constraint definition")
message = fields.Char(help="Error message returned when the constraint is violated.", translate=True)
model = fields.Many2one('ir.model', required=True, ondelete="cascade", index=True)
module = fields.Many2one('ir.module.module', required=True, index=True, ondelete='cascade')
type = fields.Char(string='Constraint Type', required=True, size=1, index=True,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints.")
write_date = fields.Datetime()
create_date = fields.Datetime()
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if not self.env.is_system():
raise AccessError(_('Administrator access is required to uninstall a module'))
ids_set = set(self.ids)
for data in self.sorted(key='id', reverse=True):
name = tools.ustr(data.name)
if data.model.model in self.env:
table = self.env[data.model.model]._table
else:
table = data.model.model.replace('.', '_')
typ = data.type
# double-check we are really going to delete all the owners of this schema element
self._cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = set(x[0] for x in self._cr.fetchall())
if external_ids - ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
self._cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""",
('f', name, table))
if self._cr.fetchone():
self._cr.execute(
sql.SQL('ALTER TABLE {} DROP CONSTRAINT {}').format(
sql.Identifier(table),
sql.Identifier(name)
))
_logger.info('Dropped FK CONSTRAINT %s@%s', name, data.model.model)
if typ == 'u':
# test if constraint exists
self._cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""",
('u', name, table))
if self._cr.fetchone():
self._cr.execute(sql.SQL('ALTER TABLE {} DROP CONSTRAINT {}').format(
sql.Identifier(table), sql.Identifier(name)))
_logger.info('Dropped CONSTRAINT %s@%s', name, data.model.model)
self.unlink()
def copy(self, default=None):
default = dict(default or {})
default['name'] = self.name + '_copy'
return super(IrModelConstraint, self).copy(default)
def _reflect_constraint(self, model, conname, type, definition, module, message=None):
""" Reflect the given constraint, and return its corresponding record.
The reflection makes it possible to remove a constraint when its
corresponding module is uninstalled. ``type`` is either 'f' or 'u'
depending on the constraint being a foreign key or not.
"""
if not module:
# no need to save constraints for custom models as they're not part
# of any module
return
assert type in ('f', 'u')
cr = self._cr
query = """ SELECT c.id, type, definition, message
FROM ir_model_constraint c, ir_module_module m
WHERE c.module=m.id AND c.name=%s AND m.name=%s """
cr.execute(query, (conname, module))
cons = cr.dictfetchone()
if not cons:
query = """ INSERT INTO ir_model_constraint
(name, create_date, write_date, create_uid, write_uid, module, model, type, definition, message)
VALUES (%s,
now() AT TIME ZONE 'UTC',
now() AT TIME ZONE 'UTC',
%s, %s,
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s),
%s, %s, %s)
RETURNING id"""
cr.execute(query,
(conname, self.env.uid, self.env.uid, module, model._name, type, definition, message))
return self.browse(cr.fetchone()[0])
cons_id = cons.pop('id')
if cons != dict(type=type, definition=definition, message=message):
query = """ UPDATE ir_model_constraint
SET write_date=now() AT TIME ZONE 'UTC',
write_uid=%s, type=%s, definition=%s, message=%s
WHERE id=%s"""
cr.execute(query, (self.env.uid, type, definition, message, cons_id))
return self.browse(cons_id)
def _reflect_constraints(self, model_names):
""" Reflect the SQL constraints of the given models. """
for model_name in model_names:
self._reflect_model(self.env[model_name])
def _reflect_model(self, model):
""" Reflect the _sql_constraints of the given model. """
def cons_text(txt):
return txt.lower().replace(', ',',').replace(' (','(')
# map each constraint on the name of the module where it is defined
constraint_module = {
constraint[0]: cls._module
for cls in reversed(type(model).mro())
if models.is_definition_class(cls)
for constraint in getattr(cls, '_local_sql_constraints', ())
}
data_list = []
for (key, definition, message) in model._sql_constraints:
conname = '%s_%s' % (model._table, key)
module = constraint_module.get(key)
record = self._reflect_constraint(model, conname, 'u', cons_text(definition), module, message)
if record:
xml_id = '%s.constraint_%s' % (module, conname)
data_list.append(dict(xml_id=xml_id, record=record))
self.env['ir.model.data']._update_xmlids(data_list)
class IrModelRelation(models.Model):
"""
This model tracks PostgreSQL tables used to implement Odoo many2many
relations.
"""
_name = 'ir.model.relation'
_description = 'Relation Model'
name = fields.Char(string='Relation Name', required=True, index=True,
help="PostgreSQL table name implementing a many2many relation.")
model = fields.Many2one('ir.model', required=True, index=True, ondelete='cascade')
module = fields.Many2one('ir.module.module', required=True, index=True, ondelete='cascade')
write_date = fields.Datetime()
create_date = fields.Datetime()
def _module_data_uninstall(self):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if not self.env.is_system():
raise AccessError(_('Administrator access is required to uninstall a module'))
ids_set = set(self.ids)
to_drop = tools.OrderedSet()
for data in self.sorted(key='id', reverse=True):
name = tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
self._cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = set(x[0] for x in self._cr.fetchall())
if external_ids - ids_set:
# as installed modules have defined this element we must not delete it!
continue
if tools.table_exists(self._cr, name):
to_drop.add(name)
self.unlink()
# drop m2m relation tables
for table in to_drop:
self._cr.execute(sql.SQL('DROP TABLE {} CASCADE').format(sql.Identifier(table)))
_logger.info('Dropped table %s', table)
def _reflect_relation(self, model, table, module):
""" Reflect the table of a many2many field for the given model, to make
it possible to delete it later when the module is uninstalled.
"""
cr = self._cr
query = """ SELECT 1 FROM ir_model_relation r, ir_module_module m
WHERE r.module=m.id AND r.name=%s AND m.name=%s """
cr.execute(query, (table, module))
if not cr.rowcount:
query = """ INSERT INTO ir_model_relation
(name, create_date, write_date, create_uid, write_uid, module, model)
VALUES (%s,
now() AT TIME ZONE 'UTC',
now() AT TIME ZONE 'UTC',
%s, %s,
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s)) """
cr.execute(query, (table, self.env.uid, self.env.uid, module, model._name))
self.invalidate_cache()
class IrModelAccess(models.Model):
_name = 'ir.model.access'
_description = 'Model Access'
_order = 'model_id,group_id,name,id'
name = fields.Char(required=True, index=True)
active = fields.Boolean(default=True, help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module).')
model_id = fields.Many2one('ir.model', string='Model', required=True, index=True, ondelete='cascade')
group_id = fields.Many2one('res.groups', string='Group', ondelete='restrict', index=True)
perm_read = fields.Boolean(string='Read Access')
perm_write = fields.Boolean(string='Write Access')
perm_create = fields.Boolean(string='Create Access')
perm_unlink = fields.Boolean(string='Delete Access')
@api.model
def check_groups(self, group):
""" Check whether the current user has the given group. """
grouparr = group.split('.')
if not grouparr:
return False
self._cr.execute("""SELECT 1 FROM res_groups_users_rel
WHERE uid=%s AND gid IN (
SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(self._uid, grouparr[0], grouparr[1],))
return bool(self._cr.fetchone())
@api.model
def check_group(self, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ('read', 'write', 'create', 'unlink'), 'Invalid access mode'
if isinstance(model, models.BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, int):
group_ids = [group_ids]
query = """ SELECT 1 FROM ir_model_access a
JOIN ir_model m ON (m.id = a.model_id)
WHERE a.active AND a.perm_{mode} AND
m.model=%s AND (a.group_id IN %s OR a.group_id IS NULL)
""".format(mode=mode)
self._cr.execute(query, (model_name, tuple(group_ids)))
return bool(self._cr.rowcount)
@api.model
def group_names_with_access(self, model_name, access_mode):
""" Return the names of visible groups which have been granted
``access_mode`` on the model ``model_name``.
:rtype: list
"""
assert access_mode in ('read', 'write', 'create', 'unlink'), 'Invalid access mode'
self._cr.execute("""
SELECT c.name, g.name
FROM ir_model_access a
JOIN ir_model m ON (a.model_id = m.id)
JOIN res_groups g ON (a.group_id = g.id)
LEFT JOIN ir_module_category c ON (c.id = g.category_id)
WHERE m.model = %%s
AND a.active = TRUE
AND a.perm_%s = TRUE
ORDER BY c.name, g.name NULLS LAST
""" % access_mode, [model_name])
return [('%s/%s' % x) if x[0] else x[1] for x in self._cr.fetchall()]
# The context parameter is useful when the method translates error messages.
# But as the method raises an exception in that case, the key 'lang' might
# not be really necessary as a cache key, unless the `ormcache_context`
# decorator catches the exception (it does not at the moment.)
@api.model
@tools.ormcache_context('self.env.uid', 'self.env.su', 'model', 'mode', 'raise_exception', keys=('lang',))
def check(self, model, mode='read', raise_exception=True):
if self.env.su:
# User root have all accesses
return True
assert isinstance(model, str), 'Not a model name: %s' % (model,)
assert mode in ('read', 'write', 'create', 'unlink'), 'Invalid access mode'
# TransientModel records have no access rights, only an implicit access rule
if model not in self.env:
_logger.error('Missing model %s', model)
self.flush(self._fields)
# We check if a specific rule exists
self._cr.execute("""SELECT MAX(CASE WHEN perm_{mode} THEN 1 ELSE 0 END)
FROM ir_model_access a
JOIN ir_model m ON (m.id = a.model_id)
JOIN res_groups_users_rel gu ON (gu.gid = a.group_id)
WHERE m.model = %s
AND gu.uid = %s
AND a.active IS TRUE""".format(mode=mode),
(model, self._uid,))
r = self._cr.fetchone()[0]
if not r:
# there is no specific rule. We check the generic rule
self._cr.execute("""SELECT MAX(CASE WHEN perm_{mode} THEN 1 ELSE 0 END)
FROM ir_model_access a
JOIN ir_model m ON (m.id = a.model_id)
WHERE a.group_id IS NULL
AND m.model = %s
AND a.active IS TRUE""".format(mode=mode),
(model,))
r = self._cr.fetchone()[0]
if not r and raise_exception:
groups = '\n'.join('\t- %s' % g for g in self.group_names_with_access(model, mode))
document_kind = self.env['ir.model']._get(model).name or model
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("You are not allowed to access '%(document_kind)s' (%(document_model)s) records.", document_kind=document_kind, document_model=model),
'write': _("You are not allowed to modify '%(document_kind)s' (%(document_model)s) records.", document_kind=document_kind, document_model=model),
'create': _("You are not allowed to create '%(document_kind)s' (%(document_model)s) records.", document_kind=document_kind, document_model=model),
'unlink': _("You are not allowed to delete '%(document_kind)s' (%(document_model)s) records.", document_kind=document_kind, document_model=model),
}
operation_error = msg_heads[mode]
if groups:
group_info = _("This operation is allowed for the following groups:\n%(groups_list)s", groups_list=groups)
else:
group_info = _("No group currently allows this operation.")
resolution_info = _("Contact your administrator to request access if necessary.")
_logger.info('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, self._uid, model)
msg = """{operation_error}
{group_info}
{resolution_info}""".format(
operation_error=operation_error,
group_info=group_info,
resolution_info=resolution_info)
raise AccessError(msg)
return bool(r)
__cache_clearing_methods = set()
@classmethod
def register_cache_clearing_method(cls, model, method):
cls.__cache_clearing_methods.add((model, method))
@classmethod
def unregister_cache_clearing_method(cls, model, method):
cls.__cache_clearing_methods.discard((model, method))
@api.model
def call_cache_clearing_methods(self):
self.invalidate_cache()
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.env:
getattr(self.env[model], method)()
#
# Check rights on actions
#
@api.model_create_multi
def create(self, vals_list):
self.call_cache_clearing_methods()
return super(IrModelAccess, self).create(vals_list)
def write(self, values):
self.call_cache_clearing_methods()
return super(IrModelAccess, self).write(values)
def unlink(self):
self.call_cache_clearing_methods()
return super(IrModelAccess, self).unlink()
class IrModelData(models.Model):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by Odoo
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_description = 'Model Data'
_order = 'module, model, name'
name = fields.Char(string='External Identifier', required=True,
help="External Key/Identifier that can be used for "
"data integration with third-party systems")
complete_name = fields.Char(compute='_compute_complete_name', string='Complete ID')
model = fields.Char(string='Model Name', required=True)
module = fields.Char(default='', required=True)
res_id = fields.Many2oneReference(string='Record ID', help="ID of the target record in the database", model_field='model')
noupdate = fields.Boolean(string='Non Updatable', default=False)
reference = fields.Char(string='Reference', compute='_compute_reference', readonly=True, store=False)
_sql_constraints = [
('name_nospaces', "CHECK(name NOT LIKE '% %')",
"External IDs cannot contain spaces"),
]
@api.depends('module', 'name')
def _compute_complete_name(self):
for res in self:
res.complete_name = ".".join(n for n in [res.module, res.name] if n)
@api.depends('model', 'res_id')
def _compute_reference(self):
for res in self:
res.reference = "%s,%s" % (res.model, res.res_id)
def _auto_init(self):
res = super(IrModelData, self)._auto_init()
tools.create_unique_index(self._cr, 'ir_model_data_module_name_uniq_index',
self._table, ['module', 'name'])
tools.create_index(self._cr, 'ir_model_data_model_res_id_index',
self._table, ['model', 'res_id'])
return res
def name_get(self):
model_id_name = defaultdict(dict) # {res_model: {res_id: name}}
for xid in self:
model_id_name[xid.model][xid.res_id] = None
# fill in model_id_name with name_get() of corresponding records
for model, id_name in model_id_name.items():
try:
ng = self.env[model].browse(id_name).name_get()
id_name.update(ng)
except Exception:
pass
# return results, falling back on complete_name
return [(xid.id, model_id_name[xid.model][xid.res_id] or xid.complete_name)
for xid in self]
# NEW V8 API
@api.model
@tools.ormcache('xmlid')
def _xmlid_lookup(self, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
query = "SELECT id, model, res_id FROM ir_model_data WHERE module=%s AND name=%s"
self.env.cr.execute(query, [module, name])
result = self.env.cr.fetchone()
if not (result and result[2]):
raise ValueError('External ID not found in the system: %s' % xmlid)
return result
@api.model
def _xmlid_to_res_model_res_id(self, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self._xmlid_lookup(xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
@api.model
def _xmlid_to_res_id(self, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self._xmlid_to_res_model_res_id(xmlid, raise_if_not_found)[1]
@api.model
def check_object_reference(self, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self._xmlid_lookup("%s.%s" % (module, xml_id))[1:3]
#search on id found in result to check if current user has read access right
if self.env[model].search([('id', '=', res_id)]):
return model, res_id
if raise_on_access_error:
raise AccessError(_('Not enough access rights on the external ID:') + ' %s.%s' % (module, xml_id))
return model, False
def unlink(self):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(IrModelData, self).unlink()
def _lookup_xmlids(self, xml_ids, model):
""" Look up the given XML ids of the given model. """
if not xml_ids:
return []
# group xml_ids by prefix
bymodule = defaultdict(set)
for xml_id in xml_ids:
prefix, suffix = xml_id.split('.', 1)
bymodule[prefix].add(suffix)
# query xml_ids by prefix
result = []
cr = self.env.cr
for prefix, suffixes in bymodule.items():
query = """
SELECT d.id, d.module, d.name, d.model, d.res_id, d.noupdate, r.id
FROM ir_model_data d LEFT JOIN "{}" r on d.res_id=r.id
WHERE d.module=%s AND d.name IN %s
""".format(model._table)
for subsuffixes in cr.split_for_in_conditions(suffixes):
cr.execute(query, (prefix, subsuffixes))
result.extend(cr.fetchall())
return result
@api.model
def _update_xmlids(self, data_list, update=False):
""" Create or update the given XML ids.
:param data_list: list of dicts with keys `xml_id` (XMLID to
assign), `noupdate` (flag on XMLID), `record` (target record).
:param update: should be ``True`` when upgrading a module
"""
if not data_list:
return
rows = tools.OrderedSet()
for data in data_list:
prefix, suffix = data['xml_id'].split('.', 1)
record = data['record']
noupdate = bool(data.get('noupdate'))
rows.add((prefix, suffix, record._name, record.id, noupdate))
for sub_rows in self.env.cr.split_for_in_conditions(rows):
# insert rows or update them
query = self._build_update_xmlids_query(sub_rows, update)
try:
self.env.cr.execute(query, [arg for row in sub_rows for arg in row])
except Exception:
_logger.error("Failed to insert ir_model_data\n%s", "\n".join(str(row) for row in sub_rows))
raise
# update loaded_xmlids
self.pool.loaded_xmlids.update("%s.%s" % row[:2] for row in rows)
# NOTE: this method is overriden in web_studio; if you need to make another
# override, make sure it is compatible with the one that is there.
def _build_update_xmlids_query(self, sub_rows, update):
rowf = "(%s, %s, %s, %s, %s)"
return """
INSERT INTO ir_model_data (module, name, model, res_id, noupdate)
VALUES {rows}
ON CONFLICT (module, name)
DO UPDATE SET (model, res_id, write_date) =
(EXCLUDED.model, EXCLUDED.res_id, now() at time zone 'UTC')
{where}
""".format(
rows=", ".join([rowf] * len(sub_rows)),
where="WHERE NOT ir_model_data.noupdate" if update else "",
)
@api.model
def _load_xmlid(self, xml_id):
""" Simply mark the given XML id as being loaded, and return the
corresponding record.
"""
record = self.env.ref(xml_id, raise_if_not_found=False)
if record:
self.pool.loaded_xmlids.add(xml_id)
return record
@api.model
def _module_data_uninstall(self, modules_to_remove):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
if not self.env.is_system():
raise AccessError(_('Administrator access is required to uninstall a module'))
# enable model/field deletion
# we deactivate prefetching to not try to read a column that has been deleted
self = self.with_context(**{MODULE_UNINSTALL_FLAG: True, 'prefetch_fields': False})
# determine records to unlink
records_items = [] # [(model, id)]
model_ids = []
field_ids = []
selection_ids = []
constraint_ids = []
module_data = self.search([('module', 'in', modules_to_remove)], order='id DESC')
for data in module_data:
if data.model == 'ir.model':
model_ids.append(data.res_id)
elif data.model == 'ir.model.fields':
field_ids.append(data.res_id)
elif data.model == 'ir.model.fields.selection':
selection_ids.append(data.res_id)
elif data.model == 'ir.model.constraint':
constraint_ids.append(data.res_id)
else:
records_items.append((data.model, data.res_id))
# avoid prefetching fields that are going to be deleted: during uninstall, it is
# possible to perform a recompute (via flush) after the database columns have been
# deleted but before the new registry has been created, meaning the recompute will
# be executed on a stale registry, and if some of the data for executing the compute
# methods is not in cache it will be fetched, and fields that exist in the registry but not
# in the database will be prefetched, this will of course fail and prevent the uninstall.
for ir_field in self.env['ir.model.fields'].browse(field_ids):
model = self.pool.get(ir_field.model)
if model is not None:
field = model._fields.get(ir_field.name)
if field is not None:
field.prefetch = False
# to collect external ids of records that cannot be deleted
undeletable_ids = []
def delete(records):
# do not delete records that have other external ids (and thus do
# not belong to the modules being installed)
ref_data = self.search([
('model', '=', records._name),
('res_id', 'in', records.ids),
])
records -= records.browse((ref_data - module_data).mapped('res_id'))
if not records:
return
# special case for ir.model.fields
if records._name == 'ir.model.fields':
missing = records - records.exists()
if missing:
# delete orphan external ids right now;
# an orphan ir.model.data can happen if the ir.model.field is deleted via
# an ONDELETE CASCADE, in which case we must verify that the records we're
# processing exist in the database otherwise a MissingError will be raised
orphans = ref_data.filtered(lambda r: r.res_id in missing._ids)
_logger.info('Deleting orphan ir_model_data %s', orphans)
orphans.unlink()
# /!\ this must go before any field accesses on `records`
records -= missing
# do not remove LOG_ACCESS_COLUMNS unless _log_access is False
# on the model
records -= records.filtered(lambda f: f.name == 'id' or (
f.name in models.LOG_ACCESS_COLUMNS and
f.model in self.env and self.env[f.model]._log_access
))
# now delete the records
_logger.info('Deleting %s', records)
try:
with self._cr.savepoint():
records.unlink()
except Exception:
if len(records) <= 1:
undeletable_ids.extend(ref_data._ids)
else:
# divide the batch in two, and recursively delete them
half_size = len(records) // 2
delete(records[:half_size])
delete(records[half_size:])
# remove non-model records first, grouped by batches of the same model
for model, items in itertools.groupby(unique(records_items), itemgetter(0)):
delete(self.env[model].browse(item[1] for item in items))
# Remove copied views. This must happen after removing all records from
# the modules to remove, otherwise ondelete='restrict' may prevent the
# deletion of some view. This must also happen before cleaning up the
# database schema, otherwise some dependent fields may no longer exist
# in database.
modules = self.env['ir.module.module'].search([('name', 'in', modules_to_remove)])
modules._remove_copied_views()
# remove constraints
delete(self.env['ir.model.constraint'].browse(unique(constraint_ids)))
constraints = self.env['ir.model.constraint'].search([('module', 'in', modules.ids)])
constraints._module_data_uninstall()
# If we delete a selection field, and some of its values have ondelete='cascade',
# we expect the records with that value to be deleted. If we delete the field first,
# the column is dropped and the selection is gone, and thus the records above will not
# be deleted.
delete(self.env['ir.model.fields.selection'].browse(unique(selection_ids)).exists())
delete(self.env['ir.model.fields'].browse(unique(field_ids)))
relations = self.env['ir.model.relation'].search([('module', 'in', modules.ids)])
relations._module_data_uninstall()
# remove models
delete(self.env['ir.model'].browse(unique(model_ids)))
# log undeletable ids
_logger.info("ir.model.data could not be deleted (%s)", undeletable_ids)
# sort out which undeletable model data may have become deletable again because
# of records being cascade-deleted or tables being dropped just above
for data in self.browse(undeletable_ids).exists():
record = self.env[data.model].browse(data.res_id)
try:
with self.env.cr.savepoint():
if record.exists():
# record exists therefore the data is still undeletable,
# remove it from module_data
module_data -= data
continue
except psycopg2.ProgrammingError:
# This most likely means that the record does not exist, since record.exists()
# is rougly equivalent to `SELECT id FROM table WHERE id=record.id` and it may raise
# a ProgrammingError because the table no longer exists (and so does the
# record), also applies to ir.model.fields, constraints, etc.
pass
# remove remaining module data records
module_data.unlink()
@api.model
def _process_end_unlink_record(self, record):
record.unlink()
@api.model
def _process_end(self, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.pool.loaded_xmlids.
"""
if not modules or tools.config.get('import_partial'):
return True
bad_imd_ids = []
self = self.with_context({MODULE_UNINSTALL_FLAG: True})
loaded_xmlids = self.pool.loaded_xmlids
query = """ SELECT id, module || '.' || name, model, res_id FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND COALESCE(noupdate, false) != %s ORDER BY id DESC
"""
self._cr.execute(query, (tuple(modules), True))
for (id, xmlid, model, res_id) in self._cr.fetchall():
if xmlid in loaded_xmlids:
continue
Model = self.env.get(model)
if Model is None:
continue
# when _inherits parents are implicitly created we give them an
# external id (if their descendant has one) in order to e.g.
# properly remove them when the module is deleted, however this
# generated id is *not* provided during update yet we don't want to
# try and remove either the xid or the record, so check if the
# record has a child we've just updated
keep = False
for inheriting in (self.env[m] for m in Model._inherits_children):
# ignore mixins
if inheriting._abstract:
continue
parent_field = inheriting._inherits[model]
children = inheriting.with_context(active_test=False).search([(parent_field, '=', res_id)])
children_xids = {
xid
for xids in (children and children._get_external_ids().values())
for xid in xids
}
if children_xids & loaded_xmlids:
# at least one child was loaded
keep = True
break
if keep:
continue
# if the record has other associated xids, only remove the xid
if self.search_count([
("model", "=", model),
("res_id", "=", res_id),
("id", "!=", id),
("id", "not in", bad_imd_ids),
]):
bad_imd_ids.append(id)
continue
_logger.info('Deleting %s@%s (%s)', res_id, model, xmlid)
record = Model.browse(res_id)
if record.exists():
module = xmlid.split('.', 1)[0]
record = record.with_context(module=module)
self._process_end_unlink_record(record)
else:
bad_imd_ids.append(id)
if bad_imd_ids:
self.browse(bad_imd_ids).unlink()
# Once all views are created create specific ones
self.env['ir.ui.view']._create_all_specific_views(modules)
loaded_xmlids.clear()
@api.model
def toggle_noupdate(self, model, res_id):
""" Toggle the noupdate flag on the external id of the record """
record = self.env[model].browse(res_id)
if record.check_access_rights('write'):
for xid in self.search([('model', '=', model), ('res_id', '=', res_id)]):
xid.noupdate = not xid.noupdate
class WizardModelMenu(models.TransientModel):
_name = 'wizard.ir.model.menu.create'
_description = 'Create Menu Wizard'
menu_id = fields.Many2one('ir.ui.menu', string='Parent Menu', required=True, ondelete='cascade')
name = fields.Char(string='Menu Name', required=True)
def menu_create(self):
for menu in self:
model = self.env['ir.model'].browse(self._context.get('model_id'))
vals = {
'name': menu.name,
'res_model': model.model,
'view_mode': 'tree,form',
}
action_id = self.env['ir.actions.act_window'].create(vals)
self.env['ir.ui.menu'].create({
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,)
})
return {'type': 'ir.actions.act_window_close'}
| 45.132359 | 105,023 |
2,040 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import inspect
import logging
import warnings
import traceback
from odoo import api, models
from odoo.exceptions import AccessDenied
_logger = logging.getLogger(__name__)
def is_autovacuum(func):
""" Return whether ``func`` is an autovacuum method. """
return callable(func) and getattr(func, '_autovacuum', False)
class AutoVacuum(models.AbstractModel):
""" Helper model to the ``@api.autovacuum`` method decorator. """
_name = 'ir.autovacuum'
_description = 'Automatic Vacuum'
def _run_vacuum_cleaner(self):
"""
Perform a complete database cleanup by safely calling every
``@api.autovacuum`` decorated method.
"""
if not self.env.is_admin():
raise AccessDenied()
for model in self.env.values():
cls = type(model)
for attr, func in inspect.getmembers(cls, is_autovacuum):
_logger.debug('Calling %s.%s()', model, attr)
try:
func(model)
self.env.cr.commit()
except Exception:
_logger.exception("Failed %s.%s()", model, attr)
self.env.cr.rollback()
# Ensure backward compatibility with the previous autovacuum API
try:
self.power_on()
self.env.cr.commit()
except Exception:
_logger.exception("Failed power_on")
self.env.cr.rollback()
# Deprecated API
@api.model
def power_on(self, *args, **kwargs):
tb = traceback.extract_stack(limit=2)
if tb[-2].name == 'power_on':
warnings.warn(
"You are extending the 'power_on' ir.autovacuum method"
f"in {tb[-2].filename} around line {tb[-2].lineno}. "
"You should instead use the @api.autovacuum decorator "
"on your garbage collecting method.", DeprecationWarning, stacklevel=2)
| 33.442623 | 2,040 |
48,097 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from markupsafe import Markup
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.exceptions import UserError, AccessError
from odoo.tools.safe_eval import safe_eval, time
from odoo.tools.misc import find_in_path, ustr
from odoo.tools import config, is_html_empty, parse_version
from odoo.http import request
from odoo.osv.expression import NEGATIVE_TERM_OPERATORS, FALSE_DOMAIN
import base64
import io
import logging
import os
import lxml.html
import tempfile
import subprocess
import re
import json
from lxml import etree
from contextlib import closing
from reportlab.graphics.barcode import createBarcodeDrawing
from PyPDF2 import PdfFileWriter, PdfFileReader, utils
from collections import OrderedDict
from collections.abc import Iterable
from PIL import Image, ImageFile
# Allow truncated images
ImageFile.LOAD_TRUNCATED_IMAGES = True
_logger = logging.getLogger(__name__)
# A lock occurs when the user wants to print a report having multiple barcode while the server is
# started in threaded-mode. The reason is that reportlab has to build a cache of the T1 fonts
# before rendering a barcode (done in a C extension) and this part is not thread safe. We attempt
# here to init the T1 fonts cache at the start-up of Odoo so that rendering of barcode in multiple
# thread does not lock the server.
try:
createBarcodeDrawing('Code128', value='foo', format='png', width=100, height=100, humanReadable=1).asString('png')
except Exception:
pass
def _get_wkhtmltopdf_bin():
return find_in_path('wkhtmltopdf')
# Check the presence of Wkhtmltopdf and return its version at Odoo start-up
wkhtmltopdf_state = 'install'
wkhtmltopdf_dpi_zoom_ratio = False
try:
process = subprocess.Popen(
[_get_wkhtmltopdf_bin(), '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (OSError, IOError):
_logger.info('You need Wkhtmltopdf to print a pdf version of the reports.')
else:
_logger.info('Will use the Wkhtmltopdf binary at %s' % _get_wkhtmltopdf_bin())
out, err = process.communicate()
match = re.search(b'([0-9.]+)', out)
if match:
version = match.group(0).decode('ascii')
if parse_version(version) < parse_version('0.12.0'):
_logger.info('Upgrade Wkhtmltopdf to (at least) 0.12.0')
wkhtmltopdf_state = 'upgrade'
else:
wkhtmltopdf_state = 'ok'
if parse_version(version) >= parse_version('0.12.2'):
wkhtmltopdf_dpi_zoom_ratio = True
if config['workers'] == 1:
_logger.info('You need to start Odoo with at least two workers to print a pdf version of the reports.')
wkhtmltopdf_state = 'workers'
else:
_logger.info('Wkhtmltopdf seems to be broken.')
wkhtmltopdf_state = 'broken'
class IrActionsReport(models.Model):
_name = 'ir.actions.report'
_description = 'Report Action'
_inherit = 'ir.actions.actions'
_table = 'ir_act_report_xml'
_sequence = 'ir_actions_id_seq'
_order = 'name'
name = fields.Char(translate=True)
type = fields.Char(default='ir.actions.report')
binding_type = fields.Selection(default='report')
model = fields.Char(required=True, string='Model Name')
model_id = fields.Many2one('ir.model', string='Model', compute='_compute_model_id', search='_search_model_id')
report_type = fields.Selection([
('qweb-html', 'HTML'),
('qweb-pdf', 'PDF'),
('qweb-text', 'Text'),
], required=True, default='qweb-pdf',
help='The type of the report that will be rendered, each one having its own'
' rendering method. HTML means the report will be opened directly in your'
' browser PDF means the report will be rendered using Wkhtmltopdf and'
' downloaded by the user.')
report_name = fields.Char(string='Template Name', required=True)
report_file = fields.Char(string='Report File', required=False, readonly=False, store=True,
help="The path to the main report file (depending on Report Type) or empty if the content is in another field")
groups_id = fields.Many2many('res.groups', 'res_groups_report_rel', 'uid', 'gid', string='Groups')
multi = fields.Boolean(string='On Multiple Doc.', help="If set to true, the action will not be displayed on the right toolbar of a form view.")
paperformat_id = fields.Many2one('report.paperformat', 'Paper Format')
print_report_name = fields.Char('Printed Report Name', translate=True,
help="This is the filename of the report going to download. Keep empty to not change the report filename. You can use a python expression with the 'object' and 'time' variables.")
attachment_use = fields.Boolean(string='Reload from Attachment',
help='If enabled, then the second time the user prints with same attachment name, it returns the previous report.')
attachment = fields.Char(string='Save as Attachment Prefix',
help='This is the filename of the attachment used to store the printing result. Keep empty to not save the printed reports. You can use a python expression with the object and time variables.')
@api.depends('model')
def _compute_model_id(self):
for action in self:
action.model_id = self.env['ir.model']._get(action.model).id
def _search_model_id(self, operator, value):
ir_model_ids = None
if isinstance(value, str):
names = self.env['ir.model'].name_search(value, operator=operator)
ir_model_ids = [n[0] for n in names]
elif isinstance(value, Iterable):
ir_model_ids = value
elif isinstance(value, int) and not isinstance(value, bool):
ir_model_ids = [value]
if ir_model_ids:
operator = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
ir_model = self.env['ir.model'].browse(ir_model_ids)
return [('model', operator, ir_model.mapped('model'))]
elif isinstance(value, bool) or value is None:
return [('model', operator, value)]
else:
return FALSE_DOMAIN
def _get_readable_fields(self):
return super()._get_readable_fields() | {
"report_name", "report_type", "target",
# these two are not real fields of ir.actions.report but are
# expected in the route /report/<converter>/<reportname> and must
# not be removed by clean_action
"context", "data",
# and this one is used by the frontend later on.
"close_on_report_download",
}
def associated_view(self):
"""Used in the ir.actions.report form view in order to search naively after the view(s)
used in the rendering.
"""
self.ensure_one()
action_ref = self.env.ref('base.action_ui_view')
if not action_ref or len(self.report_name.split('.')) < 2:
return False
action_data = action_ref.read()[0]
action_data['domain'] = [('name', 'ilike', self.report_name.split('.')[1]), ('type', '=', 'qweb')]
return action_data
def create_action(self):
""" Create a contextual action for each report. """
for report in self:
model = self.env['ir.model']._get(report.model)
report.write({'binding_model_id': model.id, 'binding_type': 'report'})
return True
def unlink_action(self):
""" Remove the contextual actions created for the reports. """
self.check_access_rights('write', raise_exception=True)
self.filtered('binding_model_id').write({'binding_model_id': False})
return True
#--------------------------------------------------------------------------
# Main report methods
#--------------------------------------------------------------------------
def _retrieve_stream_from_attachment(self, attachment):
#This import is needed to make sure a PDF stream can be saved in Image
from PIL import PdfImagePlugin
if attachment.mimetype.startswith('image'):
stream = io.BytesIO(base64.b64decode(attachment.datas))
img = Image.open(stream)
output_stream = io.BytesIO()
img.convert("RGB").save(output_stream, format="pdf")
return output_stream
return io.BytesIO(base64.decodebytes(attachment.datas))
def retrieve_attachment(self, record):
'''Retrieve an attachment for a specific record.
:param record: The record owning of the attachment.
:param attachment_name: The optional name of the attachment.
:return: A recordset of length <=1 or None
'''
attachment_name = safe_eval(self.attachment, {'object': record, 'time': time}) if self.attachment else ''
if not attachment_name:
return None
return self.env['ir.attachment'].search([
('name', '=', attachment_name),
('res_model', '=', self.model),
('res_id', '=', record.id)
], limit=1)
def _postprocess_pdf_report(self, record, buffer):
'''Hook to handle post processing during the pdf report generation.
The basic behavior consists to create a new attachment containing the pdf
base64 encoded.
:param record_id: The record that will own the attachment.
:param pdf_content: The optional name content of the file to avoid reading both times.
:return: A modified buffer if the previous one has been modified, None otherwise.
'''
attachment_name = safe_eval(self.attachment, {'object': record, 'time': time})
if not attachment_name:
return None
attachment_vals = {
'name': attachment_name,
'raw': buffer.getvalue(),
'res_model': self.model,
'res_id': record.id,
'type': 'binary',
}
try:
self.env['ir.attachment'].create(attachment_vals)
except AccessError:
_logger.info("Cannot save PDF report %r as attachment", attachment_vals['name'])
else:
_logger.info('The PDF document %s is now saved in the database', attachment_vals['name'])
return buffer
@api.model
def get_wkhtmltopdf_state(self):
'''Get the current state of wkhtmltopdf: install, ok, upgrade, workers or broken.
* install: Starting state.
* upgrade: The binary is an older version (< 0.12.0).
* ok: A binary was found with a recent version (>= 0.12.0).
* workers: Not enough workers found to perform the pdf rendering process (< 2 workers).
* broken: A binary was found but not responding.
:return: wkhtmltopdf_state
'''
return wkhtmltopdf_state
def get_paperformat(self):
return self.paperformat_id or self.env.company.paperformat_id
@api.model
def _build_wkhtmltopdf_args(
self,
paperformat_id,
landscape,
specific_paperformat_args=None,
set_viewport_size=False):
'''Build arguments understandable by wkhtmltopdf bin.
:param paperformat_id: A report.paperformat record.
:param landscape: Force the report orientation to be landscape.
:param specific_paperformat_args: A dictionary containing prioritized wkhtmltopdf arguments.
:param set_viewport_size: Enable a viewport sized '1024x1280' or '1280x1024' depending of landscape arg.
:return: A list of string representing the wkhtmltopdf process command args.
'''
if landscape is None and specific_paperformat_args and specific_paperformat_args.get('data-report-landscape'):
landscape = specific_paperformat_args.get('data-report-landscape')
command_args = ['--disable-local-file-access']
if set_viewport_size:
command_args.extend(['--viewport-size', landscape and '1024x1280' or '1280x1024'])
# Passing the cookie to wkhtmltopdf in order to resolve internal links.
try:
if request:
command_args.extend(['--cookie', 'session_id', request.session.sid])
except AttributeError:
pass
# Less verbose error messages
command_args.extend(['--quiet'])
# Build paperformat args
if paperformat_id:
if paperformat_id.format and paperformat_id.format != 'custom':
command_args.extend(['--page-size', paperformat_id.format])
if paperformat_id.page_height and paperformat_id.page_width and paperformat_id.format == 'custom':
command_args.extend(['--page-width', str(paperformat_id.page_width) + 'mm'])
command_args.extend(['--page-height', str(paperformat_id.page_height) + 'mm'])
if specific_paperformat_args and specific_paperformat_args.get('data-report-margin-top'):
command_args.extend(['--margin-top', str(specific_paperformat_args['data-report-margin-top'])])
else:
command_args.extend(['--margin-top', str(paperformat_id.margin_top)])
dpi = None
if specific_paperformat_args and specific_paperformat_args.get('data-report-dpi'):
dpi = int(specific_paperformat_args['data-report-dpi'])
elif paperformat_id.dpi:
if os.name == 'nt' and int(paperformat_id.dpi) <= 95:
_logger.info("Generating PDF on Windows platform require DPI >= 96. Using 96 instead.")
dpi = 96
else:
dpi = paperformat_id.dpi
if dpi:
command_args.extend(['--dpi', str(dpi)])
if wkhtmltopdf_dpi_zoom_ratio:
command_args.extend(['--zoom', str(96.0 / dpi)])
if specific_paperformat_args and specific_paperformat_args.get('data-report-header-spacing'):
command_args.extend(['--header-spacing', str(specific_paperformat_args['data-report-header-spacing'])])
elif paperformat_id.header_spacing:
command_args.extend(['--header-spacing', str(paperformat_id.header_spacing)])
command_args.extend(['--margin-left', str(paperformat_id.margin_left)])
command_args.extend(['--margin-bottom', str(paperformat_id.margin_bottom)])
command_args.extend(['--margin-right', str(paperformat_id.margin_right)])
if not landscape and paperformat_id.orientation:
command_args.extend(['--orientation', str(paperformat_id.orientation)])
if paperformat_id.header_line:
command_args.extend(['--header-line'])
if paperformat_id.disable_shrinking:
command_args.extend(['--disable-smart-shrinking'])
# Add extra time to allow the page to render
delay = self.env['ir.config_parameter'].sudo().get_param('report.print_delay', '1000')
command_args.extend(['--javascript-delay', delay])
if landscape:
command_args.extend(['--orientation', 'landscape'])
return command_args
def _prepare_html(self, html):
'''Divide and recreate the header/footer html by merging all found in html.
The bodies are extracted and added to a list. Then, extract the specific_paperformat_args.
The idea is to put all headers/footers together. Then, we will use a javascript trick
(see minimal_layout template) to set the right header/footer during the processing of wkhtmltopdf.
This allows the computation of multiple reports in a single call to wkhtmltopdf.
:param html: The html rendered by render_qweb_html.
:type: bodies: list of string representing each one a html body.
:type header: string representing the html header.
:type footer: string representing the html footer.
:type specific_paperformat_args: dictionary of prioritized paperformat values.
:return: bodies, header, footer, specific_paperformat_args
'''
IrConfig = self.env['ir.config_parameter'].sudo()
# Return empty dictionary if 'web.minimal_layout' not found.
layout = self.env.ref('web.minimal_layout', False)
if not layout:
return {}
layout = self.env['ir.ui.view'].browse(self.env['ir.ui.view'].get_view_id('web.minimal_layout'))
base_url = IrConfig.get_param('report.url') or layout.get_base_url()
root = lxml.html.fromstring(html)
match_klass = "//div[contains(concat(' ', normalize-space(@class), ' '), ' {} ')]"
header_node = etree.Element('div', id='minimal_layout_report_headers')
footer_node = etree.Element('div', id='minimal_layout_report_footers')
bodies = []
res_ids = []
body_parent = root.xpath('//main')[0]
# Retrieve headers
for node in root.xpath(match_klass.format('header')):
body_parent = node.getparent()
node.getparent().remove(node)
header_node.append(node)
# Retrieve footers
for node in root.xpath(match_klass.format('footer')):
body_parent = node.getparent()
node.getparent().remove(node)
footer_node.append(node)
# Retrieve bodies
layout_sections = None
for node in root.xpath(match_klass.format('article')):
layout_with_lang = layout
if node.get('data-oe-lang'):
# context language to body language
layout_with_lang = layout_with_lang.with_context(lang=node.get('data-oe-lang'))
# set header/lang to body lang prioritizing current user language
if not layout_sections or node.get('data-oe-lang') == self.env.lang:
layout_sections = layout_with_lang
body = layout_with_lang._render({
'subst': False,
'body': Markup(lxml.html.tostring(node, encoding='unicode')),
'base_url': base_url,
'report_xml_id': self.xml_id
})
bodies.append(body)
if node.get('data-oe-model') == self.model:
res_ids.append(int(node.get('data-oe-id', 0)))
else:
res_ids.append(None)
if not bodies:
body = ''.join(lxml.html.tostring(c, encoding='unicode') for c in body_parent.getchildren())
bodies.append(body)
# Get paperformat arguments set in the root html tag. They are prioritized over
# paperformat-record arguments.
specific_paperformat_args = {}
for attribute in root.items():
if attribute[0].startswith('data-report-'):
specific_paperformat_args[attribute[0]] = attribute[1]
header = (layout_sections or layout)._render({
'subst': True,
'body': Markup(lxml.html.tostring(header_node, encoding='unicode')),
'base_url': base_url
})
footer = (layout_sections or layout)._render({
'subst': True,
'body': Markup(lxml.html.tostring(footer_node, encoding='unicode')),
'base_url': base_url
})
return bodies, res_ids, header, footer, specific_paperformat_args
@api.model
def _run_wkhtmltopdf(
self,
bodies,
header=None,
footer=None,
landscape=False,
specific_paperformat_args=None,
set_viewport_size=False):
'''Execute wkhtmltopdf as a subprocess in order to convert html given in input into a pdf
document.
:param list[str] bodies: The html bodies of the report, one per page.
:param str header: The html header of the report containing all headers.
:param str footer: The html footer of the report containing all footers.
:param landscape: Force the pdf to be rendered under a landscape format.
:param specific_paperformat_args: dict of prioritized paperformat arguments.
:param set_viewport_size: Enable a viewport sized '1024x1280' or '1280x1024' depending of landscape arg.
:return: Content of the pdf as bytes
:rtype: bytes
'''
paperformat_id = self.get_paperformat()
# Build the base command args for wkhtmltopdf bin
command_args = self._build_wkhtmltopdf_args(
paperformat_id,
landscape,
specific_paperformat_args=specific_paperformat_args,
set_viewport_size=set_viewport_size)
files_command_args = []
temporary_files = []
if header:
head_file_fd, head_file_path = tempfile.mkstemp(suffix='.html', prefix='report.header.tmp.')
with closing(os.fdopen(head_file_fd, 'wb')) as head_file:
head_file.write(header.encode())
temporary_files.append(head_file_path)
files_command_args.extend(['--header-html', head_file_path])
if footer:
foot_file_fd, foot_file_path = tempfile.mkstemp(suffix='.html', prefix='report.footer.tmp.')
with closing(os.fdopen(foot_file_fd, 'wb')) as foot_file:
foot_file.write(footer.encode())
temporary_files.append(foot_file_path)
files_command_args.extend(['--footer-html', foot_file_path])
paths = []
for i, body in enumerate(bodies):
prefix = '%s%d.' % ('report.body.tmp.', i)
body_file_fd, body_file_path = tempfile.mkstemp(suffix='.html', prefix=prefix)
with closing(os.fdopen(body_file_fd, 'wb')) as body_file:
body_file.write(body.encode())
paths.append(body_file_path)
temporary_files.append(body_file_path)
pdf_report_fd, pdf_report_path = tempfile.mkstemp(suffix='.pdf', prefix='report.tmp.')
os.close(pdf_report_fd)
temporary_files.append(pdf_report_path)
try:
wkhtmltopdf = [_get_wkhtmltopdf_bin()] + command_args + files_command_args + paths + [pdf_report_path]
process = subprocess.Popen(wkhtmltopdf, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
err = ustr(err)
if process.returncode not in [0, 1]:
if process.returncode == -11:
message = _(
'Wkhtmltopdf failed (error code: %s). Memory limit too low or maximum file number of subprocess reached. Message : %s')
else:
message = _('Wkhtmltopdf failed (error code: %s). Message: %s')
_logger.warning(message, process.returncode, err[-1000:])
raise UserError(message % (str(process.returncode), err[-1000:]))
else:
if err:
_logger.warning('wkhtmltopdf: %s' % err)
except:
raise
with open(pdf_report_path, 'rb') as pdf_document:
pdf_content = pdf_document.read()
# Manual cleanup of the temporary files
for temporary_file in temporary_files:
try:
os.unlink(temporary_file)
except (OSError, IOError):
_logger.error('Error when trying to remove file %s' % temporary_file)
return pdf_content
@api.model
def _get_report_from_name(self, report_name):
"""Get the first record of ir.actions.report having the ``report_name`` as value for
the field report_name.
"""
report_obj = self.env['ir.actions.report']
conditions = [('report_name', '=', report_name)]
context = self.env['res.users'].context_get()
return report_obj.with_context(context).sudo().search(conditions, limit=1)
@api.model
def get_barcode_check_digit(self, numeric_barcode):
""" Computes and returns the barcode check digit. The used algorithm
follows the GTIN specifications and can be used by all compatible
barcode nomenclature, like as EAN-8, EAN-12 (UPC-A) or EAN-13.
https://www.gs1.org/sites/default/files/docs/barcodes/GS1_General_Specifications.pdf
https://www.gs1.org/services/how-calculate-check-digit-manually
:param numeric_barcode: the barcode to verify/recompute the check digit
:type numeric_barcode: str
:return: the number corresponding to the right check digit
:rtype: int
"""
# Multiply value of each position by
# N1 N2 N3 N4 N5 N6 N7 N8 N9 N10 N11 N12 N13 N14 N15 N16 N17 N18
# x3 X1 x3 x1 x3 x1 x3 x1 x3 x1 x3 x1 x3 x1 x3 x1 x3 CHECKSUM
oddsum = evensum = 0
code = numeric_barcode[-2::-1] # Remove the check digit and reverse the barcode.
# The CHECKSUM digit is removed because it will be recomputed and it must not interfer with
# the computation. Also, the barcode is inverted, so the barcode length doesn't matter.
# Otherwise, the digits' group (even or odd) could be different according to the barcode length.
for i, digit in enumerate(code):
if i % 2 == 0:
evensum += int(digit)
else:
oddsum += int(digit)
total = evensum * 3 + oddsum
return (10 - total % 10) % 10
@api.model
def check_barcode_encoding(self, barcode, encoding):
""" Checks if the given barcode is correctly encoded.
:return: True if the barcode string is encoded with the provided encoding.
:rtype: bool
"""
if encoding == "any":
return True
barcode_sizes = {
'ean8': 8,
'ean13': 13,
'upca': 12,
}
barcode_size = barcode_sizes[encoding]
return (encoding != 'ean13' or barcode[0] != '0') \
and len(barcode) == barcode_size \
and re.match(r"^\d+$", barcode) \
and self.get_barcode_check_digit(barcode) == int(barcode[-1])
@api.model
def barcode(self, barcode_type, value, **kwargs):
defaults = {
'width': (600, int),
'height': (100, int),
'humanreadable': (False, lambda x: bool(int(x))),
'quiet': (True, lambda x: bool(int(x))),
'mask': (None, lambda x: x),
'barBorder': (4, int),
# The QR code can have different layouts depending on the Error Correction Level
# See: https://en.wikipedia.org/wiki/QR_code#Error_correction
# Level 'L' – up to 7% damage (default)
# Level 'M' – up to 15% damage (i.e. required by l10n_ch QR bill)
# Level 'Q' – up to 25% damage
# Level 'H' – up to 30% damage
'barLevel': ('L', lambda x: x in ('L', 'M', 'Q', 'H') and x or 'L'),
}
kwargs = {k: validator(kwargs.get(k, v)) for k, (v, validator) in defaults.items()}
kwargs['humanReadable'] = kwargs.pop('humanreadable')
if barcode_type == 'UPCA' and len(value) in (11, 12, 13):
barcode_type = 'EAN13'
if len(value) in (11, 12):
value = '0%s' % value
elif barcode_type == 'auto':
symbology_guess = {8: 'EAN8', 13: 'EAN13'}
barcode_type = symbology_guess.get(len(value), 'Code128')
elif barcode_type == 'QR':
# for `QR` type, `quiet` is not supported. And is simply ignored.
# But we can use `barBorder` to get a similar behaviour.
if kwargs['quiet']:
kwargs['barBorder'] = 0
if barcode_type in ('EAN8', 'EAN13') and not self.check_barcode_encoding(value, barcode_type.lower()):
# If the barcode does not respect the encoding specifications, convert its type into Code128.
# Otherwise, the report-lab method may return a barcode different from its value. For instance,
# if the barcode type is EAN-8 and the value 11111111, the report-lab method will take the first
# seven digits and will compute the check digit, which gives: 11111115 -> the barcode does not
# match the expected value.
barcode_type = 'Code128'
try:
barcode = createBarcodeDrawing(barcode_type, value=value, format='png', **kwargs)
# If a mask is asked and it is available, call its function to
# post-process the generated QR-code image
if kwargs['mask']:
available_masks = self.get_available_barcode_masks()
mask_to_apply = available_masks.get(kwargs['mask'])
if mask_to_apply:
mask_to_apply(kwargs['width'], kwargs['height'], barcode)
return barcode.asString('png')
except (ValueError, AttributeError):
if barcode_type == 'Code128':
raise ValueError("Cannot convert into barcode.")
elif barcode_type == 'QR':
raise ValueError("Cannot convert into QR code.")
else:
return self.barcode('Code128', value, **kwargs)
@api.model
def get_available_barcode_masks(self):
""" Hook for extension.
This function returns the available QR-code masks, in the form of a
list of (code, mask_function) elements, where code is a string identifying
the mask uniquely, and mask_function is a function returning a reportlab
Drawing object with the result of the mask, and taking as parameters:
- width of the QR-code, in pixels
- height of the QR-code, in pixels
- reportlab Drawing object containing the barcode to apply the mask on
"""
return {}
def _render_template(self, template, values=None):
"""Allow to render a QWeb template python-side. This function returns the 'ir.ui.view'
render but embellish it with some variables/methods used in reports.
:param values: additional methods/variables used in the rendering
:returns: html representation of the template
:rtype: bytes
"""
if values is None:
values = {}
context = dict(self.env.context, inherit_branding=False)
# Browse the user instead of using the sudo self.env.user
user = self.env['res.users'].browse(self.env.uid)
website = None
if request and hasattr(request, 'website'):
if request.website is not None:
website = request.website
context = dict(context, translatable=context.get('lang') != request.env['ir.http']._get_default_lang().code)
view_obj = self.env['ir.ui.view'].sudo().with_context(context)
values.update(
time=time,
context_timestamp=lambda t: fields.Datetime.context_timestamp(self.with_context(tz=user.tz), t),
user=user,
res_company=self.env.company,
website=website,
web_base_url=self.env['ir.config_parameter'].sudo().get_param('web.base.url', default=''),
)
return view_obj._render_template(template, values).encode()
def _post_pdf(self, save_in_attachment, pdf_content=None, res_ids=None):
'''Merge the existing attachments by adding one by one the content of the attachments
and then, we add the pdf_content if exists. Create the attachments for each record individually
if required.
:param save_in_attachment: The retrieved attachments as map record.id -> attachment_id.
:param pdf_content: The pdf content newly generated by wkhtmltopdf.
:param res_ids: the ids of record to allow postprocessing.
:return: The pdf content of the merged pdf.
'''
def close_streams(streams):
for stream in streams:
try:
stream.close()
except Exception:
pass
# Check special case having only one record with existing attachment.
# In that case, return directly the attachment content.
# In that way, we also ensure the embedded files are well preserved.
if len(save_in_attachment) == 1 and not pdf_content:
return list(save_in_attachment.values())[0].getvalue()
# Create a list of streams representing all sub-reports part of the final result
# in order to append the existing attachments and the potentially modified sub-reports
# by the _postprocess_pdf_report calls.
streams = []
# In wkhtmltopdf has been called, we need to split the pdf in order to call the postprocess method.
if pdf_content:
pdf_content_stream = io.BytesIO(pdf_content)
# Build a record_map mapping id -> record
record_map = {r.id: r for r in self.env[self.model].browse([res_id for res_id in res_ids if res_id])}
# If no value in attachment or no record specified, only append the whole pdf.
if not record_map or not self.attachment:
streams.append(pdf_content_stream)
else:
if len(res_ids) == 1:
# Only one record, so postprocess directly and append the whole pdf.
if res_ids[0] in record_map and not res_ids[0] in save_in_attachment:
new_stream = self._postprocess_pdf_report(record_map[res_ids[0]], pdf_content_stream)
# If the buffer has been modified, mark the old buffer to be closed as well.
if new_stream and new_stream != pdf_content_stream:
close_streams([pdf_content_stream])
pdf_content_stream = new_stream
streams.append(pdf_content_stream)
else:
# In case of multiple docs, we need to split the pdf according the records.
# To do so, we split the pdf based on top outlines computed by wkhtmltopdf.
# An outline is a <h?> html tag found on the document. To retrieve this table,
# we look on the pdf structure using pypdf to compute the outlines_pages from
# the top level heading in /Outlines.
reader = PdfFileReader(pdf_content_stream)
root = reader.trailer['/Root']
outlines_pages = []
if '/Outlines' in root and '/First' in root['/Outlines']:
node = root['/Outlines']['/First']
while True:
outlines_pages.append(root['/Dests'][node['/Dest']][0])
if '/Next' not in node:
break
node = node['/Next']
outlines_pages = sorted(set(outlines_pages))
# There should be only one top-level heading by document
# There should be a top-level heading on first page
if len(outlines_pages) == len(res_ids) and outlines_pages[0] == 0:
for i, num in enumerate(outlines_pages):
to = outlines_pages[i + 1] if i + 1 < len(outlines_pages) else reader.numPages
attachment_writer = PdfFileWriter()
for j in range(num, to):
attachment_writer.addPage(reader.getPage(j))
stream = io.BytesIO()
attachment_writer.write(stream)
if res_ids[i] and res_ids[i] not in save_in_attachment:
new_stream = self._postprocess_pdf_report(record_map[res_ids[i]], stream)
# If the buffer has been modified, mark the old buffer to be closed as well.
if new_stream and new_stream != stream:
close_streams([stream])
stream = new_stream
streams.append(stream)
close_streams([pdf_content_stream])
else:
# We can not generate separate attachments because the outlines
# do not reveal where the splitting points should be in the pdf.
_logger.info('The PDF report can not be saved as attachment.')
streams.append(pdf_content_stream)
# If attachment_use is checked, the records already having an existing attachment
# are not been rendered by wkhtmltopdf. So, create a new stream for each of them.
if self.attachment_use:
for stream in save_in_attachment.values():
streams.append(stream)
# Build the final pdf.
# If only one stream left, no need to merge them (and then, preserve embedded files).
if len(streams) == 1:
result = streams[0].getvalue()
else:
try:
result = self._merge_pdfs(streams)
except utils.PdfReadError:
raise UserError(_("One of the documents you are trying to merge is encrypted"))
# We have to close the streams after PdfFileWriter's call to write()
close_streams(streams)
return result
def _get_unreadable_pdfs(self, streams):
unreadable_streams = []
for stream in streams:
writer = PdfFileWriter()
result_stream = io.BytesIO()
try:
reader = PdfFileReader(stream)
writer.appendPagesFromReader(reader)
writer.write(result_stream)
except (utils.PdfReadError, TypeError):
unreadable_streams.append(stream)
return unreadable_streams
def _raise_on_unreadable_pdfs(self, streams, stream_record):
unreadable_pdfs = self._get_unreadable_pdfs(streams)
if unreadable_pdfs:
records = [stream_record[s].name for s in unreadable_pdfs if s in stream_record]
raise UserError(_(
"Odoo is unable to merge the PDFs attached to the following records:\n"
"%s\n\n"
"Please exclude them from the selection to continue. It's possible to "
"still retrieve those PDFs by selecting each of the affected records "
"individually, which will avoid merging.") % "\n".join(records))
def _merge_pdfs(self, streams):
writer = PdfFileWriter()
for stream in streams:
reader = PdfFileReader(stream)
writer.appendPagesFromReader(reader)
result_stream = io.BytesIO()
streams.append(result_stream)
writer.write(result_stream)
return result_stream.getvalue()
def _render_qweb_pdf(self, res_ids=None, data=None):
"""
:rtype: bytes
"""
if not data:
data = {}
data.setdefault('report_type', 'pdf')
# access the report details with sudo() but evaluation context as sudo(False)
self_sudo = self.sudo()
# In case of test environment without enough workers to perform calls to wkhtmltopdf,
# fallback to render_html.
if (tools.config['test_enable'] or tools.config['test_file']) and not self.env.context.get('force_report_rendering'):
return self_sudo._render_qweb_html(res_ids, data=data)
# As the assets are generated during the same transaction as the rendering of the
# templates calling them, there is a scenario where the assets are unreachable: when
# you make a request to read the assets while the transaction creating them is not done.
# Indeed, when you make an asset request, the controller has to read the `ir.attachment`
# table.
# This scenario happens when you want to print a PDF report for the first time, as the
# assets are not in cache and must be generated. To workaround this issue, we manually
# commit the writes in the `ir.attachment` table. It is done thanks to a key in the context.
context = dict(self.env.context)
if not config['test_enable'] and 'commit_assetsbundle' not in context:
context['commit_assetsbundle'] = True
# Disable the debug mode in the PDF rendering in order to not split the assets bundle
# into separated files to load. This is done because of an issue in wkhtmltopdf
# failing to load the CSS/Javascript resources in time.
# Without this, the header/footer of the reports randomly disappear
# because the resources files are not loaded in time.
# https://github.com/wkhtmltopdf/wkhtmltopdf/issues/2083
context['debug'] = False
save_in_attachment = OrderedDict()
# Maps the streams in `save_in_attachment` back to the records they came from
stream_record = dict()
if res_ids:
# Dispatch the records by ones having an attachment and ones requesting a call to
# wkhtmltopdf.
Model = self.env[self_sudo.model]
record_ids = Model.browse(res_ids)
wk_record_ids = Model
if self_sudo.attachment:
for record_id in record_ids:
attachment = self_sudo.retrieve_attachment(record_id)
if attachment:
stream = self_sudo._retrieve_stream_from_attachment(attachment)
save_in_attachment[record_id.id] = stream
stream_record[stream] = record_id
if not self_sudo.attachment_use or not attachment:
wk_record_ids += record_id
else:
wk_record_ids = record_ids
res_ids = wk_record_ids.ids
# A call to wkhtmltopdf is mandatory in 2 cases:
# - The report is not linked to a record.
# - The report is not fully present in attachments.
if save_in_attachment and not res_ids:
_logger.info('The PDF report has been generated from attachments.')
if len(save_in_attachment) > 1:
self._raise_on_unreadable_pdfs(save_in_attachment.values(), stream_record)
return self_sudo._post_pdf(save_in_attachment), 'pdf'
if self.get_wkhtmltopdf_state() == 'install':
# wkhtmltopdf is not installed
# the call should be catched before (cf /report/check_wkhtmltopdf) but
# if get_pdf is called manually (email template), the check could be
# bypassed
raise UserError(_("Unable to find Wkhtmltopdf on this system. The PDF can not be created."))
html = self_sudo.with_context(context)._render_qweb_html(res_ids, data=data)[0]
bodies, html_ids, header, footer, specific_paperformat_args = self_sudo.with_context(context)._prepare_html(html)
if self_sudo.attachment and set(res_ids) != set(html_ids):
raise UserError(_("The report's template '%s' is wrong, please contact your administrator. \n\n"
"Can not separate file to save as attachment because the report's template does not contains the attributes 'data-oe-model' and 'data-oe-id' on the div with 'article' classname.") % self.name)
pdf_content = self._run_wkhtmltopdf(
bodies,
header=header,
footer=footer,
landscape=context.get('landscape'),
specific_paperformat_args=specific_paperformat_args,
set_viewport_size=context.get('set_viewport_size'),
)
if res_ids:
self._raise_on_unreadable_pdfs(save_in_attachment.values(), stream_record)
_logger.info('The PDF report has been generated for model: %s, records %s.' % (self_sudo.model, str(res_ids)))
return self_sudo._post_pdf(save_in_attachment, pdf_content=pdf_content, res_ids=html_ids), 'pdf'
return pdf_content, 'pdf'
@api.model
def _render_qweb_text(self, docids, data=None):
"""
:rtype: bytes
"""
if not data:
data = {}
data.setdefault('report_type', 'text')
data.setdefault('__keep_empty_lines', True)
data = self._get_rendering_context(docids, data)
return self._render_template(self.report_name, data), 'text'
@api.model
def _render_qweb_html(self, docids, data=None):
"""This method generates and returns html version of a report.
:rtype: bytes
"""
if not data:
data = {}
data.setdefault('report_type', 'html')
data = self._get_rendering_context(docids, data)
return self._render_template(self.report_name, data), 'html'
def _get_rendering_context_model(self):
report_model_name = 'report.%s' % self.report_name
return self.env.get(report_model_name)
def _get_rendering_context(self, docids, data):
# If the report is using a custom model to render its html, we must use it.
# Otherwise, fallback on the generic html rendering.
report_model = self._get_rendering_context_model()
data = data and dict(data) or {}
if report_model is not None:
# _render_ may be executed in sudo but evaluation context as real user
report_model = report_model.sudo(False)
data.update(report_model._get_report_values(docids, data=data))
else:
# _render_ may be executed in sudo but evaluation context as real user
docs = self.env[self.model].sudo(False).browse(docids)
data.update({
'doc_ids': docids,
'doc_model': self.model,
'docs': docs,
})
data['is_html_empty'] = is_html_empty
return data
def _render(self, res_ids, data=None):
report_type = self.report_type.lower().replace('-', '_')
render_func = getattr(self, '_render_' + report_type, None)
if not render_func:
return None
return render_func(res_ids, data=data)
def report_action(self, docids, data=None, config=True):
"""Return an action of type ir.actions.report.
:param docids: id/ids/browse record of the records to print (if not used, pass an empty list)
:param data:
:param bool config:
:rtype: bytes
"""
context = self.env.context
if docids:
if isinstance(docids, models.Model):
active_ids = docids.ids
elif isinstance(docids, int):
active_ids = [docids]
elif isinstance(docids, list):
active_ids = docids
context = dict(self.env.context, active_ids=active_ids)
report_action = {
'context': context,
'data': data,
'type': 'ir.actions.report',
'report_name': self.report_name,
'report_type': self.report_type,
'report_file': self.report_file,
'name': self.name,
}
discard_logo_check = self.env.context.get('discard_logo_check')
if self.env.is_admin() and not self.env.company.external_report_layout_id and config and not discard_logo_check:
return self._action_configure_external_report_layout(report_action)
return report_action
def _action_configure_external_report_layout(self, report_action):
action = self.env["ir.actions.actions"]._for_xml_id("web.action_base_document_layout_configurator")
py_ctx = json.loads(action.get('context', {}))
report_action['close_on_report_download'] = True
py_ctx['report_action'] = report_action
action['context'] = py_ctx
return action
| 47.099902 | 48,089 |
504 | py | PYTHON | 15.0 | # -*- coding: ascii -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ReportLayout(models.Model):
_name = "report.layout"
_description = 'Report Layout'
_order = 'sequence'
view_id = fields.Many2one('ir.ui.view', 'Document Template', required=True)
image = fields.Char(string="Preview image src")
pdf = fields.Char(string="Preview pdf src")
sequence = fields.Integer(default=50)
name = fields.Char()
| 29.647059 | 504 |
20,365 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
#----------------------------------------------------------
# ir_http modular http routing
#----------------------------------------------------------
import base64
import hashlib
import logging
import mimetypes
import os
import re
import sys
import traceback
import werkzeug
import werkzeug.exceptions
import werkzeug.routing
import werkzeug.utils
import odoo
from odoo import api, http, models, tools, SUPERUSER_ID
from odoo.exceptions import AccessDenied, AccessError, MissingError
from odoo.http import request, content_disposition, Response
from odoo.tools import consteq, pycompat
from odoo.tools.mimetypes import get_extension, guess_mimetype
from odoo.modules.module import get_resource_path, get_module_path
from odoo.http import ALLOWED_DEBUG_MODES
from odoo.tools.misc import str2bool
_logger = logging.getLogger(__name__)
class RequestUID(object):
def __init__(self, **kw):
self.__dict__.update(kw)
class ModelConverter(werkzeug.routing.BaseConverter):
def __init__(self, url_map, model=False):
super(ModelConverter, self).__init__(url_map)
self.model = model
self.regex = r'([0-9]+)'
def to_python(self, value):
_uid = RequestUID(value=value, converter=self)
env = api.Environment(request.cr, _uid, request.context)
return env[self.model].browse(int(value))
def to_url(self, value):
return value.id
class ModelsConverter(werkzeug.routing.BaseConverter):
def __init__(self, url_map, model=False):
super(ModelsConverter, self).__init__(url_map)
self.model = model
# TODO add support for slug in the form [A-Za-z0-9-] bla-bla-89 -> id 89
self.regex = r'([0-9,]+)'
def to_python(self, value):
_uid = RequestUID(value=value, converter=self)
env = api.Environment(request.cr, _uid, request.context)
return env[self.model].browse(int(v) for v in value.split(','))
def to_url(self, value):
return ",".join(value.ids)
class SignedIntConverter(werkzeug.routing.NumberConverter):
regex = r'-?\d+'
num_convert = int
class IrHttp(models.AbstractModel):
_name = 'ir.http'
_description = "HTTP Routing"
#------------------------------------------------------
# Routing map
#------------------------------------------------------
@classmethod
def _get_converters(cls):
return {'model': ModelConverter, 'models': ModelsConverter, 'int': SignedIntConverter}
@classmethod
def _match(cls, path_info, key=None):
return cls.routing_map().bind_to_environ(request.httprequest.environ).match(path_info=path_info, return_rule=True)
@classmethod
def _auth_method_user(cls):
request.uid = request.session.uid
if not request.uid:
raise http.SessionExpiredException("Session expired")
@classmethod
def _auth_method_none(cls):
request.uid = None
@classmethod
def _auth_method_public(cls):
if not request.session.uid:
request.uid = request.env.ref('base.public_user').id
else:
request.uid = request.session.uid
@classmethod
def _authenticate(cls, endpoint):
auth_method = endpoint.routing["auth"]
if request._is_cors_preflight(endpoint):
auth_method = 'none'
try:
if request.session.uid:
try:
request.session.check_security()
# what if error in security.check()
# -> res_users.check()
# -> res_users._check_credentials()
except (AccessDenied, http.SessionExpiredException):
# All other exceptions mean undetermined status (e.g. connection pool full),
# let them bubble up
request.session.logout(keep_db=True)
if request.uid is None:
getattr(cls, "_auth_method_%s" % auth_method)()
except (AccessDenied, http.SessionExpiredException, werkzeug.exceptions.HTTPException):
raise
except Exception:
_logger.info("Exception during request Authentication.", exc_info=True)
raise AccessDenied()
return auth_method
@classmethod
def _handle_debug(cls):
# Store URL debug mode (might be empty) into session
if 'debug' in request.httprequest.args:
debug_mode = []
for debug in request.httprequest.args['debug'].split(','):
if debug not in ALLOWED_DEBUG_MODES:
debug = '1' if str2bool(debug, debug) else ''
debug_mode.append(debug)
debug_mode = ','.join(debug_mode)
# Write on session only when needed
if debug_mode != request.session.debug:
request.session.debug = debug_mode
@classmethod
def _serve_attachment(cls):
env = api.Environment(request.cr, SUPERUSER_ID, request.context)
attach = env['ir.attachment'].get_serve_attachment(request.httprequest.path, extra_fields=['name', 'checksum'])
if attach:
wdate = attach[0]['__last_update']
datas = attach[0]['datas'] or b''
name = attach[0]['name']
checksum = attach[0]['checksum'] or hashlib.sha512(datas).hexdigest()[:64] # sha512/256
if (not datas and name != request.httprequest.path and
name.startswith(('http://', 'https://', '/'))):
return request.redirect(name, 301, local=False)
response = werkzeug.wrappers.Response()
response.last_modified = wdate
response.set_etag(checksum)
response.make_conditional(request.httprequest)
if response.status_code == 304:
return response
response.mimetype = attach[0]['mimetype'] or 'application/octet-stream'
response.data = base64.b64decode(datas)
return response
@classmethod
def _serve_fallback(cls, exception):
# serve attachment
attach = cls._serve_attachment()
if attach:
return attach
return False
@classmethod
def _handle_exception(cls, exception):
# in case of Exception, e.g. 404, we don't step into _dispatch
cls._handle_debug()
# If handle_exception returns something different than None, it will be used as a response
# This is done first as the attachment path may
# not match any HTTP controller
if (isinstance(exception, werkzeug.exceptions.HTTPException) and exception.code == 404) or \
(isinstance(exception, odoo.exceptions.AccessError)):
serve = cls._serve_fallback(exception)
if serve:
return serve
# Don't handle exception but use werkzeug debugger if server in --dev mode
# Don't intercept JSON request to respect the JSON Spec and return exception as JSON
# "The Response is expressed as a single JSON Object, with the following members:
# jsonrpc, result, error, id"
if ('werkzeug' in tools.config['dev_mode']
and not isinstance(exception, werkzeug.exceptions.NotFound)
and request._request_type != 'json'):
raise exception
try:
return request._handle_exception(exception)
except AccessDenied:
return werkzeug.exceptions.Forbidden()
@classmethod
def _dispatch(cls):
cls._handle_debug()
# locate the controller method
try:
rule, arguments = cls._match(request.httprequest.path)
func = rule.endpoint
except werkzeug.exceptions.NotFound as e:
return cls._handle_exception(e)
# check authentication level
try:
auth_method = cls._authenticate(func)
except Exception as e:
return cls._handle_exception(e)
processing = cls._postprocess_args(arguments, rule)
if processing:
return processing
# set and execute handler
try:
request.set_handler(func, arguments, auth_method)
result = request.dispatch()
if isinstance(result, Exception):
raise result
except Exception as e:
return cls._handle_exception(e)
return result
@classmethod
def _redirect(cls, location, code=303):
return werkzeug.utils.redirect(location, code=code, Response=Response)
@classmethod
def _postprocess_args(cls, arguments, rule):
""" post process arg to set uid on browse records """
for key, val in list(arguments.items()):
# Replace uid placeholder by the current request.uid
if isinstance(val, models.BaseModel) and isinstance(val._uid, RequestUID):
arguments[key] = val.with_user(request.uid)
@classmethod
def _generate_routing_rules(cls, modules, converters):
return http._generate_routing_rules(modules, False, converters)
@classmethod
def routing_map(cls, key=None):
if not hasattr(cls, '_routing_map'):
cls._routing_map = {}
cls._rewrite_len = {}
if key not in cls._routing_map:
_logger.info("Generating routing map for key %s" % str(key))
installed = request.registry._init_modules | set(odoo.conf.server_wide_modules)
if tools.config['test_enable'] and odoo.modules.module.current_test:
installed.add(odoo.modules.module.current_test)
mods = sorted(installed)
# Note : when routing map is generated, we put it on the class `cls`
# to make it available for all instance. Since `env` create an new instance
# of the model, each instance will regenared its own routing map and thus
# regenerate its EndPoint. The routing map should be static.
routing_map = werkzeug.routing.Map(strict_slashes=False, converters=cls._get_converters())
for url, endpoint, routing in cls._generate_routing_rules(mods, converters=cls._get_converters()):
xtra_keys = 'defaults subdomain build_only strict_slashes redirect_to alias host'.split()
kw = {k: routing[k] for k in xtra_keys if k in routing}
rule = werkzeug.routing.Rule(url, endpoint=endpoint, methods=routing['methods'], **kw)
rule.merge_slashes = False
routing_map.add(rule)
cls._routing_map[key] = routing_map
return cls._routing_map[key]
@classmethod
def _clear_routing_map(cls):
if hasattr(cls, '_routing_map'):
cls._routing_map = {}
_logger.debug("Clear routing map")
#------------------------------------------------------
# Binary server
#------------------------------------------------------
@classmethod
def _xmlid_to_obj(cls, env, xmlid):
return env.ref(xmlid, False)
def _get_record_and_check(self, xmlid=None, model=None, id=None, field='datas', access_token=None):
# get object and content
record = None
if xmlid:
record = self._xmlid_to_obj(self.env, xmlid)
elif id and model in self.env:
record = self.env[model].browse(int(id))
# obj exists
if not record or field not in record:
return None, 404
try:
if model == 'ir.attachment':
record_sudo = record.sudo()
if access_token and not consteq(record_sudo.access_token or '', access_token):
return None, 403
elif (access_token and consteq(record_sudo.access_token or '', access_token)):
record = record_sudo
elif record_sudo.public:
record = record_sudo
elif self.env.user.has_group('base.group_portal'):
# Check the read access on the record linked to the attachment
# eg: Allow to download an attachment on a task from /my/task/task_id
record.check('read')
record = record_sudo
# check read access
try:
# We have prefetched some fields of record, among which the field
# 'write_date' used by '__last_update' below. In order to check
# access on record, we have to invalidate its cache first.
if not record.env.su:
record._cache.clear()
record['__last_update']
except AccessError:
return None, 403
return record, 200
except MissingError:
return None, 404
@classmethod
def _binary_ir_attachment_redirect_content(cls, record, default_mimetype='application/octet-stream'):
# mainly used for theme images attachemnts
status = content = filename = filehash = None
mimetype = getattr(record, 'mimetype', False)
if record.type == 'url' and record.url:
# if url in in the form /somehint server locally
url_match = re.match("^/(\w+)/(.+)$", record.url)
if url_match:
module = url_match.group(1)
module_path = get_module_path(module)
module_resource_path = get_resource_path(module, url_match.group(2))
if module_path and module_resource_path:
module_path = os.path.join(os.path.normpath(module_path), '') # join ensures the path ends with '/'
module_resource_path = os.path.normpath(module_resource_path)
if module_resource_path.startswith(module_path):
with open(module_resource_path, 'rb') as f:
content = base64.b64encode(f.read())
status = 200
filename = os.path.basename(module_resource_path)
mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype)
filehash = '"%s"' % hashlib.md5(pycompat.to_text(content).encode('utf-8')).hexdigest()
if not content:
status = 301
content = record.url
return status, content, filename, mimetype, filehash
def _binary_record_content(
self, record, field='datas', filename=None,
filename_field='name', default_mimetype='application/octet-stream'):
model = record._name
mimetype = 'mimetype' in record and record.mimetype or False
content = None
filehash = 'checksum' in record and record['checksum'] or False
field_def = record._fields[field]
if field_def.type == 'binary' and field_def.attachment and not field_def.related:
if model != 'ir.attachment':
field_attachment = self.env['ir.attachment'].sudo().search_read(domain=[('res_model', '=', model), ('res_id', '=', record.id), ('res_field', '=', field)], fields=['datas', 'mimetype', 'checksum'], limit=1)
if field_attachment:
mimetype = field_attachment[0]['mimetype']
content = field_attachment[0]['datas']
filehash = field_attachment[0]['checksum']
else:
mimetype = record['mimetype']
content = record['datas']
filehash = record['checksum']
if not content:
try:
content = record[field] or ''
except AccessError:
# `record[field]` may not be readable for current user -> 404
content = ''
# filename
if not filename:
if filename_field in record:
filename = record[filename_field]
if not filename:
filename = "%s-%s-%s" % (record._name, record.id, field)
if not mimetype:
try:
decoded_content = base64.b64decode(content)
except base64.binascii.Error: # if we could not decode it, no need to pass it down: it would crash elsewhere...
return (404, [], None)
mimetype = guess_mimetype(decoded_content, default=default_mimetype)
# extension
has_extension = get_extension(filename) or mimetypes.guess_type(filename)[0]
if not has_extension:
extension = mimetypes.guess_extension(mimetype)
if extension:
filename = "%s%s" % (filename, extension)
if not filehash:
filehash = '"%s"' % hashlib.md5(pycompat.to_text(content).encode('utf-8')).hexdigest()
status = 200 if content else 404
return status, content, filename, mimetype, filehash
def _binary_set_headers(self, status, content, filename, mimetype, unique, filehash=None, download=False):
headers = [('Content-Type', mimetype), ('X-Content-Type-Options', 'nosniff'), ('Content-Security-Policy', "default-src 'none'")]
# cache
etag = bool(request) and request.httprequest.headers.get('If-None-Match')
status = status or 200
if filehash:
headers.append(('ETag', filehash))
if etag == filehash and status == 200:
status = 304
headers.append(('Cache-Control', 'max-age=%s' % (http.STATIC_CACHE_LONG if unique else 0)))
# content-disposition default name
if download:
headers.append(('Content-Disposition', content_disposition(filename)))
return (status, headers, content)
def binary_content(self, xmlid=None, model='ir.attachment', id=None, field='datas',
unique=False, filename=None, filename_field='name', download=False,
mimetype=None, default_mimetype='application/octet-stream',
access_token=None):
""" Get file, attachment or downloadable content
If the ``xmlid`` and ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param str xmlid: xmlid of the record
:param str model: name of the model to fetch the binary from
:param int id: id of the record from which to fetch the binary
:param str field: binary field
:param bool unique: add a max-age for the cache control
:param str filename: choose a filename
:param str filename_field: if not create an filename with model-id-field
:param bool download: apply headers to download the file
:param str mimetype: mintype of the field (for headers)
:param str default_mimetype: default mintype if no mintype found
:param str access_token: optional token for unauthenticated access
only available for ir.attachment
:returns: (status, headers, content)
"""
record, status = self._get_record_and_check(xmlid=xmlid, model=model, id=id, field=field, access_token=access_token)
if not record:
return (status or 404, [], None)
content, headers, status = None, [], None
if record._name == 'ir.attachment':
status, content, default_filename, mimetype, filehash = self._binary_ir_attachment_redirect_content(record, default_mimetype=default_mimetype)
filename = filename or default_filename
if not content:
status, content, filename, mimetype, filehash = self._binary_record_content(
record, field=field, filename=filename, filename_field=filename_field,
default_mimetype='application/octet-stream')
status, headers, content = self._binary_set_headers(
status, content, filename, mimetype, unique, filehash=filehash, download=download)
return status, headers, content
def _response_by_status(self, status, headers, content):
if status == 304:
return werkzeug.wrappers.Response(status=status, headers=headers)
elif status == 301:
return request.redirect(content, code=301, local=False)
elif status != 200:
return request.not_found()
| 41.058468 | 20,365 |
59,510 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
import logging
import os.path
import re
import traceback
import builtins
import token
import tokenize
import io
from markupsafe import Markup, escape
from collections.abc import Sized, Mapping
from itertools import count, chain
from textwrap import dedent, indent as _indent
from lxml import etree
from psycopg2.extensions import TransactionRollbackError
from odoo.tools import pycompat, freehash
_logger = logging.getLogger(__name__)
# same list as for `--dev-mode`
SUPPORTED_DEBUGGERS = ['pdb', 'ipdb', 'pudb', 'wdb']
token.QWEB = token.NT_OFFSET - 1
token.tok_name[token.QWEB] = 'QWEB'
####################################
### qweb tools ###
####################################
class QWebCodeFound(Exception):
"""
Exception raised when a qweb compilation encounter dynamic content if the
option `raise_on_code` is True.
"""
class QWebException(Exception):
def __init__(self, message, qweb, options, error=None, template=None, path=None, code=None):
self.error = error
self.name = template
self.code = code if options.get('dev_mode') else None
self.path = path
self.html = None
if template is not None and path and ':' not in path:
element = qweb._get_template(template, options)[0]
nodes = element.getroottree().xpath(self.path)
if nodes:
node = nodes[0]
node[:] = []
node.text = None
self.html = etree.tostring(node, encoding='unicode')
self.stack = traceback.format_exc()
self.message = message
if self.error is not None:
self.message = "%s\n%s: %s" % (self.message, self.error.__class__.__name__, self.error)
if self.name is not None:
self.message = "%s\nTemplate: %s" % (self.message, self.name)
if self.path is not None:
self.message = "%s\nPath: %s" % (self.message, self.path)
if self.html is not None:
self.message = "%s\nNode: %s" % (self.message, self.html)
super(QWebException, self).__init__(message)
def __str__(self):
message = "%s\n%s\n%s" % (self.error, self.stack, self.message)
if self.code is not None:
message = "%s\nCompiled code:\n%s" % (message, self.code)
return message
def __repr__(self):
return str(self)
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
def __hash__(self):
return hash(frozenset((key, freehash(val)) for key, val in self.items()))
unsafe_eval = eval
_FORMAT_REGEX = re.compile(r'(?:#\{(.+?)\})|(?:\{\{(.+?)\}\})') # ( ruby-style )|( jinja-style )
_VARNAME_REGEX = re.compile(r'\W')
####################################
### QWeb ###
####################################
class QWeb(object):
__slots__ = ()
_void_elements = frozenset([
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'])
_name_gen = count()
# _available_objects builtins is not security safe (it's dangerous), is overridden by ir_qweb to only expose the safe_eval builtins.
_available_objects = {k: v for k, v in vars(builtins).items() if not k.startswith('_')}
_allowed_keyword = ['False', 'None', 'True', 'and', 'as', 'elif', 'else', 'for', 'if', 'in', 'is', 'not', 'or']
def _render(self, template, values=None, **options):
""" render(template, values, **options)
Render the template specified by the given name.
:param template: template identifier, name or etree (see ``_get_template``)
:param dict values: template values to be used for rendering
:param options: used to compile the template (the dict available for the rendering is frozen)
* ``load`` (function) overrides the load method (returns: (template, ref))
:returns: str as Markup
:rtype: markupsafe.Markup
"""
if values and 0 in values:
raise ValueError('values[0] should be unset when call the _render method and only set into the template.')
render_template = self._compile(template, options)
rendering = render_template(self, values or {})
result = ''.join(rendering)
return Markup(result)
def _compile(self, template, options):
""" Compile the given template into a rendering function (generator)::
render(qweb, values)
where ``qweb`` is a QWeb instance and ``values`` are the values to render.
"""
if options is None:
options = {}
element, document, ref = self._get_template(template, options)
if not ref:
ref = element.get('t-name', str(document))
# reference to get xml and etree (usually the template ID)
options['ref'] = ref
# str xml of the reference template used for compilation. Useful for debugging, dev mode and profiling.
options['ref_xml'] = document if isinstance(document, str) else str(document, 'utf-8')
_options = dict(options)
options = frozendict(options)
# Initial template value send to render method (not in the froozen dict because it may be
# different from one render to another. Indeed, it may be the view ID or the key)
_options['template'] = template
# Root of the etree which will be processed during compilation.
_options['root'] = element.getroottree()
# Reference to the last node being compiled. It is mainly used for debugging and displaying
# error messages.
_options['last_path_node'] = None
if not options.get('nsmap'):
_options['nsmap'] = {}
# generate code
def_name = f"template_{ref}" if isinstance(ref, int) else "template"
try:
_options['_text_concat'] = []
self._appendText("", _options) # To ensure the template function is a generator and doesn't become a regular function
code_lines = ([f"def {def_name}(self, values, log):"] +
self._compile_node(element, _options, 1) +
self._flushText(_options, 1))
except QWebException as e:
raise e
except QWebCodeFound as e:
raise e
except Exception as e:
raise QWebException("Error when compiling xml template", self, options,
error=e, template=template, path=_options.get('last_path_node'))
try:
code = '\n'.join(code_lines)
except QWebException as e:
raise e
except Exception as e:
code = '\n'.join(map(str, code_lines))
raise QWebException("Error when compiling xml template", self, options,
error=e, template=template, code=code)
# compile code and defined default values
try:
# noinspection PyBroadException
compiled = compile(code, f'<{def_name}>', 'exec')
globals_dict = self._prepare_globals({}, options)
globals_dict['__builtins__'] = globals_dict # So that unknown/unsafe builtins are never added.
unsafe_eval(compiled, globals_dict)
compiled_fn = globals_dict[def_name]
except QWebException as e:
raise e
except Exception as e:
raise QWebException("Error when compiling xml template", self, options,
error=e, template=template, code=code)
# return the wrapped function
def render_template(self, values):
try:
log = {'last_path_node': None}
values = self._prepare_values(values, options)
yield from compiled_fn(self, values, log)
except (QWebException, TransactionRollbackError) as e:
raise e
except Exception as e:
raise QWebException("Error when render the template", self, options,
error=e, template=template, path=log.get('last_path_node'), code=code)
return render_template
def _get_template(self, template, options):
""" Retrieve the given template, and return it as a tuple ``(etree,
xml, ref)``, where ``element`` is an etree, ``document`` is the
string document that contains ``element``, and ``ref`` if the uniq
reference of the template (id, t-name or template).
:param template: template identifier, name or etree
:param options: used to compile the template (the dict available for
the rendering is frozen)
``load`` (function) overrides the load method
"""
ref = template
if isinstance(template, etree._Element):
element = template
document = etree.tostring(template)
return (element, document, template.get('t-name'))
else:
try:
loaded = options.get('load', self._load)(template, options)
if not loaded:
raise ValueError("Can not load template '%s'" % template)
document, ref = loaded
except QWebException as e:
raise e
except Exception as e:
template = options.get('caller_template', template)
raise QWebException("load could not load template", self, options, e, template)
if document is None:
raise QWebException("Template not found", self, options, template=template)
if isinstance(document, etree._Element):
element = document
document = etree.tostring(document, encoding='utf-8')
elif not document.strip().startswith('<') and os.path.exists(document):
element = etree.parse(document).getroot()
else:
element = etree.fromstring(document)
for node in element:
if node.get('t-name') == str(template):
return (node, document, ref)
return (element, document, ref)
def _load(self, template, options):
""" Load a given template and return a tuple ``(xml, ref)``` """
return (template, None)
# values for running time
def _prepare_values(self, values, options):
""" Prepare the context that will sent to the compiled and evaluated
function.
:param values: template values to be used for rendering
:param options: frozen dict of compilation parameters.
"""
return values
def _prepare_globals(self, globals_dict, options):
""" Prepare the global context that will sent to eval the qweb generated
code.
:param globals_dict: template global values use in compiled code
:param options: frozen dict of compilation parameters.
"""
globals_dict['Sized'] = Sized
globals_dict['Mapping'] = Mapping
globals_dict['Markup'] = Markup
globals_dict['escape'] = escape
globals_dict['compile_options'] = options
globals_dict.update(self._available_objects)
return globals_dict
# compute helpers
def _appendText(self, text, options):
""" Add an item (converts to a string) to the list.
This will be concatenated and added during a call to the
`_flushText` method. This makes it possible to return only one
yield containing all the parts."""
options['_text_concat'].append(self._compile_to_str(text))
def _flushText(self, options, indent):
"""Concatenate all the textual chunks added by the `_appendText`
method into a single yield."""
text_concat = options['_text_concat']
if text_concat:
text = ''.join(text_concat)
text_concat.clear()
return [f"{' ' * indent}yield {repr(text)}"]
else:
return []
def _indent(self, code, indent):
"""Indent the code to respect the python syntax."""
return _indent(code, ' ' * indent)
def _make_name(self, prefix='var'):
"""Generates a unique name."""
return f"{prefix}_{next(self._name_gen)}"
def _compile_node(self, el, options, indent):
""" Compile the given element into python code.
The t-* attributes (directives) will be converted to a python instruction. If there
are no t-* attributes, the element will be considered static.
Directives are compiled using the order provided by the
``_directives_eval_order`` method (an create the
``options['iter_directives']`` iterator).
For compilation, the directives supported are those with a
compilation method ``_compile_directive_*``
:return: list of string
"""
# if tag don't have qweb attributes don't use directives
if self._is_static_node(el, options):
return self._compile_static_node(el, options, indent)
if options.get('raise_on_code'):
raise QWebCodeFound()
path = options['root'].getpath(el)
if options['last_path_node'] != path:
options['last_path_node'] = path
body = [self._indent(f'log["last_path_node"] = {repr(path)}', indent)]
else:
body = []
# create an iterator on directives to compile in order
options['iter_directives'] = iter(self._directives_eval_order() + [None])
el.set('t-tag', el.tag)
if not ({'t-out', 't-esc', 't-raw', 't-field'} & set(el.attrib)):
el.set('t-content', 'True')
return body + self._compile_directives(el, options, indent)
def _compile_directives(self, el, options, indent):
""" Compile the given element, following the directives given in the
iterator ``options['iter_directives']`` create by `_compile_node``
method.
:return: list of code lines
"""
if self._is_static_node(el, options):
el.attrib.pop('t-tag', None)
el.attrib.pop('t-content', None)
return self._compile_static_node(el, options, indent)
# compile the first directive present on the element
for directive in options['iter_directives']:
if ('t-' + directive) in el.attrib:
return self._compile_directive(el, options, directive, indent)
return []
def _compile_format(self, expr):
""" Parses the provided format string and compiles it to a single
expression python, uses string with format method.
Use format is faster to concat string and values.
"""
text = ''
values = []
base_idx = 0
for m in _FORMAT_REGEX.finditer(expr):
literal = expr[base_idx:m.start()]
if literal:
text += literal.replace('{', '{{').replace("}", "}}")
text += '{}'
values.append(f'self._compile_to_str({self._compile_expr(m.group(1) or m.group(2))})')
base_idx = m.end()
# string past last regex match
literal = expr[base_idx:]
if literal:
text += literal.replace('{', '{{').replace("}", "}}")
code = repr(text)
if values:
code += f'.format({", ".join(values)})'
return code
def _compile_expr_tokens(self, tokens, allowed_keys, argument_names=None, raise_on_missing=False):
""" Transform the list of token coming into a python instruction in
textual form by adding the namepaces for the dynamic values.
Example: `5 + a + b.c` to be `5 + values.get('a') + values['b'].c`
Unknown values are considered to be None, but using `values['b']`
gives a clear error message in cases where there is an attribute for
example (have a `KeyError: 'b'`, instead of `AttributeError: 'NoneType'
object has no attribute 'c'`).
@returns str
"""
# Finds and extracts the current "scope"'s "allowed values": values
# which should not be accessed through the environment's namespace:
# * the local variables of a lambda should be accessed directly e.g.
# lambda a: a + b should be compiled to lambda a: a + values['b'],
# since a is local to the lambda it has to be accessed directly
# but b needs to be accessed through the rendering environment
# * similarly for a comprehensions [a + b for a in c] should be
# compiledto [a + values.get('b') for a in values.get('c')]
# to avoid the risk of confusion between nested lambdas / comprehensions,
# this is currently performed independently at each level of brackets
# nesting (hence the function being recursive).
index = 0
open_bracket_index = -1
bracket_depth = 0
argument_name = '_arg_%s__'
argument_names = argument_names or []
while index < len(tokens):
t = tokens[index]
if t.exact_type in [token.LPAR, token.LSQB, token.LBRACE]:
bracket_depth += 1
if t.exact_type in [token.RPAR, token.RSQB, token.RBRACE]:
bracket_depth -= 1
elif bracket_depth == 0 and t.exact_type == token.NAME:
string = t.string
if string == 'lambda': # lambda => allowed values for the current bracket depth
i = index + 1
while i < len(tokens):
t = tokens[i]
if t.exact_type == token.NAME:
argument_names.append(t.string)
elif t.exact_type == token.COMMA:
pass
elif t.exact_type == token.COLON:
break
elif t.exact_type == token.EQUAL:
raise NotImplementedError('Lambda default values are not supported')
else:
raise NotImplementedError('This lambda code style is not implemented.')
i += 1
elif string == 'for': # list comprehensions => allowed values for the current bracket depth
i = index + 1
while len(tokens) > i:
t = tokens[i]
if t.exact_type == token.NAME:
if t.string == 'in':
break
argument_names.append(t.string)
elif t.exact_type in [token.COMMA, token.LPAR, token.RPAR]:
pass
else:
raise NotImplementedError('This loop code style is not implemented.')
i += 1
index += 1
# Use bracket to nest structures.
# Recursively processes the "sub-scopes", and replace their content with
# a compiled node. During this recursive call we add to the allowed
# values the values provided by the list comprehension, lambda, etc.,
# previously extracted.
index = 0
open_bracket_index = -1
bracket_depth = 0
while index < len(tokens):
t = tokens[index]
string = t.string
if t.exact_type in [token.LPAR, token.LSQB, token.LBRACE]:
if bracket_depth == 0:
open_bracket_index = index
bracket_depth += 1
elif t.exact_type in [token.RPAR, token.RSQB, token.RBRACE]:
bracket_depth -= 1
if bracket_depth == 0:
code = self._compile_expr_tokens(
tokens[open_bracket_index + 1:index],
list(allowed_keys),
list(argument_names),
raise_on_missing,
)
code = tokens[open_bracket_index].string + code + t.string
tokens[open_bracket_index:index + 1] = [tokenize.TokenInfo(token.QWEB, code, tokens[open_bracket_index].start, t.end, '')]
index = open_bracket_index
index += 1
# The keys will be namespaced by values if they are not allowed. In
# order to have a clear keyError message, this will be replaced by
# values['key'] for certain cases (for example if an attribute is called
# key.attrib, or an index key[0] ...)
code = []
index = 0
pos = tokens and tokens[0].start # to keep indent when use expr on multi line
while index < len(tokens):
t = tokens[index]
string = t.string
if t.start[0] != pos[0]:
pos = (t.start[0], 0)
space = t.start[1] - pos[1]
if space:
code.append(' ' * space)
pos = t.start
if t.exact_type == token.NAME:
if string == 'lambda': # lambda => allowed values
code.append('lambda ')
index += 1
while index < len(tokens):
t = tokens[index]
if t.exact_type == token.NAME and t.string in argument_names:
code.append(argument_name % t.string)
if t.exact_type in [token.COMMA, token.COLON]:
code.append(t.string)
if t.exact_type == token.COLON:
break
index += 1
if t.end[0] != pos[0]:
pos = (t.end[0], 0)
else:
pos = t.end
elif string in argument_names:
code.append(argument_name % t.string)
elif string in allowed_keys:
code.append(string)
elif index + 1 < len(tokens) and tokens[index + 1].exact_type == token.EQUAL: # function kw
code.append(string)
elif index > 0 and tokens[index - 1] and tokens[index - 1].exact_type == token.DOT:
code.append(string)
elif raise_on_missing or index + 1 < len(tokens) and tokens[index + 1].exact_type in [token.DOT, token.LPAR, token.LSQB, 'qweb']:
# Should have values['product'].price to raise an error when get
# the 'product' value and not an 'NoneType' object has no
# attribute 'price' error.
code.append(f'values[{repr(string)}]')
else:
# not assignation allowed, only getter
code.append(f'values.get({repr(string)})')
elif t.type not in [tokenize.ENCODING, token.ENDMARKER, token.DEDENT]:
code.append(string)
if t.end[0] != pos[0]:
pos = (t.end[0], 0)
else:
pos = t.end
index += 1
return ''.join(code)
def _compile_expr(self, expr, raise_on_missing=False):
"""This method must be overridden by <ir.qweb> in order to compile the template."""
raise NotImplementedError("Templates should use the ir.qweb compile method")
def _compile_bool(self, attr, default=False):
"""Convert the statements as a boolean."""
if attr:
if attr is True:
return True
attr = attr.lower()
if attr in ('false', '0'):
return False
elif attr in ('true', '1'):
return True
return bool(default)
def _compile_to_str(self, expr):
""" Generates a text value (an instance of text_type) from an arbitrary
source.
"""
return pycompat.to_text(expr)
# order
def _directives_eval_order(self):
""" List all supported directives in the order in which they should be
evaluated on a given element. For instance, a node bearing both
``foreach`` and ``if`` should see ``foreach`` executed before ``if`` aka
.. code-block:: xml
<el t-foreach="foo" t-as="bar" t-if="bar">
should be equivalent to
.. code-block:: xml
<t t-foreach="foo" t-as="bar">
<t t-if="bar">
<el>
then this method should return ``['foreach', 'if']``.
"""
return [
'debug',
'foreach',
'if', 'elif', 'else',
'field', 'esc', 'raw', 'out',
'tag',
'call',
'set',
'content',
]
def _is_static_node(self, el, options):
""" Test whether the given element is purely static, i.e. (there
are no t-* attributes), does not require dynamic rendering for its
attributes.
"""
return el.tag != 't' and not any(att.startswith('t-') and att not in ['t-tag', 't-content'] for att in el.attrib)
# compile
def _compile_static_node(self, el, options, indent):
""" Compile a purely static element into a list of string. """
if not el.nsmap:
unqualified_el_tag = el_tag = el.tag
attrib = self._post_processing_att(el.tag, el.attrib, options)
else:
# Etree will remove the ns prefixes indirection by inlining the corresponding
# nsmap definition into the tag attribute. Restore the tag and prefix here.
unqualified_el_tag = etree.QName(el.tag).localname
el_tag = unqualified_el_tag
if el.prefix:
el_tag = f'{el.prefix}:{el_tag}'
attrib = {}
# If `el` introduced new namespaces, write them as attribute by using the
# `attrib` dict.
for ns_prefix, ns_definition in set(el.nsmap.items()) - set(options['nsmap'].items()):
if ns_prefix is None:
attrib['xmlns'] = ns_definition
else:
attrib[f'xmlns:{ns_prefix}'] = ns_definition
# Etree will also remove the ns prefixes indirection in the attributes. As we only have
# the namespace definition, we'll use an nsmap where the keys are the definitions and
# the values the prefixes in order to get back the right prefix and restore it.
ns = chain(options['nsmap'].items(), el.nsmap.items())
nsprefixmap = {v: k for k, v in ns}
for key, value in el.attrib.items():
attrib_qname = etree.QName(key)
if attrib_qname.namespace:
attrib[f'{nsprefixmap[attrib_qname.namespace]}:{attrib_qname.localname}'] = value
else:
attrib[key] = value
attrib = self._post_processing_att(el.tag, attrib, options)
# Update the dict of inherited namespaces before continuing the recursion. Note:
# since `options['nsmap']` is a dict (and therefore mutable) and we do **not**
# want changes done in deeper recursion to bevisible in earlier ones, we'll pass
# a copy before continuing the recursion and restore the original afterwards.
original_nsmap = dict(options['nsmap'])
if unqualified_el_tag != 't':
attributes = ''.join(f' {str(name)}="{str(escape(str(value)))}"'
for name, value in attrib.items() if value or isinstance(value, str))
self._appendText(f'<{el_tag}{attributes}', options)
if unqualified_el_tag in self._void_elements:
self._appendText('/>', options)
else:
self._appendText('>', options)
if el.nsmap:
options['nsmap'].update(el.nsmap)
body = self._compile_directive_content(el, options, indent)
options['nsmap'] = original_nsmap
else:
body = self._compile_directive_content(el, options, indent)
if unqualified_el_tag != 't':
if unqualified_el_tag not in self._void_elements:
self._appendText(f'</{el_tag}>', options)
return body
def _compile_attributes(self, options, indent):
"""Generates the part of the code that post-process the attributes
(this is ``attrs`` in the compiled code) during rendering time.
"""
# Use str(value) to change Markup into str and escape it, then use str
# to avoid the escaping of the other html content.
body = self._flushText(options, indent)
body.append(self._indent(dedent("""
attrs = self._post_processing_att(tagName, attrs, compile_options)
for name, value in attrs.items():
if value or isinstance(value, str):
yield f' {str(escape(str(name)))}="{str(escape(str(value)))}"'
""").strip(), indent))
return body
def _compile_static_attributes(self, el, options, indent):
""" Compile the static and dynamc attributes of the given element.
We do not support namespaced dynamic attributes.
"""
# Etree will also remove the ns prefixes indirection in the attributes. As we only have
# the namespace definition, we'll use an nsmap where the keys are the definitions and
# the values the prefixes in order to get back the right prefix and restore it.
nsprefixmap = {v: k for k, v in chain(options['nsmap'].items(), el.nsmap.items())}
code = []
for key, value in el.attrib.items():
if not key.startswith('t-'):
attrib_qname = etree.QName(key)
if attrib_qname.namespace:
key = f'{nsprefixmap[attrib_qname.namespace]}:{attrib_qname.localname}'
code.append(self._indent(f'attrs[{repr(key)}] = {repr(value)}', indent))
return code
def _compile_dynamic_attributes(self, el, options, indent):
""" Compile the dynamic attributes of the given element into a list
string (this is adding elements to ``attrs`` in the compiled code).
We do not support namespaced dynamic attributes.
"""
code = []
for name, value in el.attrib.items():
if name.startswith('t-attf-'):
code.append(self._indent(f"attrs[{repr(name[7:])}] = {self._compile_format(value)}", indent))
elif name.startswith('t-att-'):
code.append(self._indent(f"attrs[{repr(name[6:])}] = {self._compile_expr(value)}", indent))
elif name == 't-att':
code.append(self._indent(dedent(f"""
atts_value = {self._compile_expr(value)}
if isinstance(atts_value, dict):
attrs.update(atts_value)
elif isinstance(atts_value, (list, tuple)) and not isinstance(atts_value[0], (list, tuple)):
attrs.update([atts_value])
elif isinstance(atts_value, (list, tuple)):
attrs.update(dict(atts_value))
"""), indent))
return code
def _compile_all_attributes(self, el, options, indent, attr_already_created=False):
""" Compile the attributes (static and dynamic) of the given elements
into a list of str.
(this compiled The code will create the ``attrs`` in the compiled code).
"""
code = []
if any(name.startswith('t-att') or not name.startswith('t-') for name, value in el.attrib.items()):
if not attr_already_created:
attr_already_created = True
code.append(self._indent("attrs = {}", indent))
code.extend(self._compile_static_attributes(el, options, indent))
code.extend(self._compile_dynamic_attributes(el, options, indent))
if attr_already_created:
code.append(self._indent(f"tagName = {repr(el.tag)}", indent))
code.extend(self._compile_attributes(options, indent))
return code
def _compile_tag_open(self, el, options, indent, attr_already_created=False):
""" Compile the opening tag of the given element into a list of string. """
extra_attrib = {}
if not el.nsmap:
unqualified_el_tag = el_tag = el.tag
else:
# Etree will remove the ns prefixes indirection by inlining the corresponding
# nsmap definition into the tag attribute. Restore the tag and prefix here.
# Note: we do not support namespace dynamic attributes, we need a default URI
# on the root and use attribute directive t-att="{'xmlns:example': value}".
unqualified_el_tag = etree.QName(el.tag).localname
el_tag = unqualified_el_tag
if el.prefix:
el_tag = f'{el.prefix}:{el_tag}'
# If `el` introduced new namespaces, write them as attribute by using the
# `extra_attrib` dict.
for ns_prefix, ns_definition in set(el.nsmap.items()) - set(options['nsmap'].items()):
if ns_prefix is None:
extra_attrib['xmlns'] = ns_definition
else:
extra_attrib[f'xmlns:{ns_prefix}'] = ns_definition
code = []
if unqualified_el_tag != 't':
attributes = ''.join(f' {str(name)}="{str(escape(self._compile_to_str(value)))}"'
for name, value in extra_attrib.items())
self._appendText("<{}{}".format(el_tag, attributes), options)
code.extend(self._compile_all_attributes(el, options, indent, attr_already_created))
if unqualified_el_tag in self._void_elements:
self._appendText('/>', options)
else:
self._appendText('>', options)
return code
def _compile_tag_close(self, el, options):
""" Compile the closing tag of the given element into a list of string. """
if not el.nsmap:
unqualified_el_tag = el_tag = el.tag
else:
unqualified_el_tag = etree.QName(el.tag).localname
el_tag = unqualified_el_tag
if el.prefix:
el_tag = f'{el.prefix}:{el_tag}'
if unqualified_el_tag != 't' and el_tag not in self._void_elements:
self._appendText(f'</{el_tag}>', options)
return []
# compile directives
def _compile_directive(self, el, options, directive, indent):
compile_handler = getattr(self, f"_compile_directive_{directive.replace('-', '_')}", None)
return compile_handler(el, options, indent)
def _compile_directive_debug(self, el, options, indent):
"""Compile `t-debug` expressions into a python code as a list of
strings.
The code will contains the call to the debugger chosen from the valid
list.
"""
debugger = el.attrib.pop('t-debug')
code = []
if options.get('dev_mode'):
code.append(self._indent(f"self._debug_trace({repr(debugger)}, compile_options)", indent))
else:
_logger.warning("@t-debug in template is only available in qweb dev mode options")
code.extend(self._compile_directives(el, options, indent))
return code
def _compile_directive_options(self, el, options, indent):
"""
compile t-options and add to the dict the t-options-xxx values
"""
varname = options.get('t_options_varname', 't_options')
code = []
dict_arg = []
for key in list(el.attrib):
if key.startswith('t-options-'):
value = el.attrib.pop(key)
option_name = key[10:]
dict_arg.append(f'{repr(option_name)}:{self._compile_expr(value)}')
t_options = el.attrib.pop('t-options', None)
if t_options and dict_arg:
code.append(self._indent(f"{varname} = {{**{self._compile_expr(t_options)}, {', '.join(dict_arg)}}}", indent))
elif dict_arg:
code.append(self._indent(f"{varname} = {{{', '.join(dict_arg)}}}", indent))
elif t_options:
code.append(self._indent(f"{varname} = {self._compile_expr(t_options)}", indent))
return code
def _compile_directive_tag(self, el, options, indent):
"""Compile the element tag into a python code as a list of strings.
The code will contains the opening tag, namespace, static and dynamic
attributes and closing tag.
"""
el.attrib.pop('t-tag', None)
code = self._compile_tag_open(el, options, indent, False)
# Update the dict of inherited namespaces before continuing the recursion. Note:
# since `options['nsmap']` is a dict (and therefore mutable) and we do **not**
# want changes done in deeper recursion to bevisible in earlier ones, we'll pass
# a copy before continuing the recursion and restore the original afterwards.
if el.nsmap:
code.extend(self._compile_directives(el, dict(options, nsmap=el.nsmap), indent))
else:
code.extend(self._compile_directives(el, options, indent))
code.extend(self._compile_tag_close(el, options))
return code
def _compile_directive_set(self, el, options, indent):
"""Compile `t-set` expressions into a python code as a list of
strings.
There are 3 kinds of `t-set`:
* `t-value` containing python code;
* `t-valuef` containing strings to format;
* whose value is the content of the tag (being Markup safe).
The code will contain the assignment of the dynamically generated value.
"""
varname = el.attrib.pop('t-set')
code = self._flushText(options, indent)
if 't-value' in el.attrib:
if varname == '0':
raise ValueError('t-set="0" should not contains t-value or t-valuef')
expr = el.attrib.pop('t-value') or 'None'
expr = self._compile_expr(expr)
elif 't-valuef' in el.attrib:
if varname == '0':
raise ValueError('t-set="0" should not contains t-value or t-valuef')
exprf = el.attrib.pop('t-valuef')
expr = self._compile_format(exprf)
else:
# set the content as value
def_name = f"qweb_t_set_{re.sub(_VARNAME_REGEX, '_', options['last_path_node'])}"
content = self._compile_directive_content(el, options, indent + 1) + self._flushText(options, indent + 1)
if content:
code.append(self._indent(f"def {def_name}():", indent))
code.extend(content)
expr = f"Markup(''.join({def_name}()))"
else:
expr = "''"
code.append(self._indent(f"values[{repr(varname)}] = {expr}", indent))
return code
def _compile_directive_content(self, el, options, indent):
"""Compiles the content of the element (is the technical `t-content`
directive created by QWeb) into a python code as a list of
strings.
The code will contains the text content of the node or the compliled
code from the recursive call of ``_compile_node``.
"""
if el.text is not None:
self._appendText(el.text, options)
body = []
if el.getchildren():
for item in el:
if isinstance(item, etree._Comment):
if self.env.context.get('preserve_comments'):
self._appendText("<!--%s-->" % item.text, options)
else:
body.extend(self._compile_node(item, options, indent))
# comments can also contains tail text
if item.tail is not None:
self._appendText(item.tail, options)
return body
def _compile_directive_else(self, el, options, indent):
"""Compile `t-else` expressions into a python code as a list of strings.
This method is linked with the `t-if` directive.
The code will contain the compiled code of the element (without `else`
python part).
"""
if el.attrib.pop('t-else') == '_t_skip_else_':
return []
if not options.pop('t_if', None):
raise ValueError("t-else directive must be preceded by t-if directive")
compiled = self._compile_directives(el, options, indent)
el.attrib['t-else'] = '_t_skip_else_'
return compiled
def _compile_directive_elif(self, el, options, indent):
"""Compile `t-elif` expressions into a python code as a list of strings.
This method is linked with the `t-if` directive.
The code will contain the compiled code of the element (without `else`
python part).
"""
_elif = el.attrib['t-elif']
if _elif == '_t_skip_else_':
el.attrib.pop('t-elif')
return []
if not options.pop('t_if', None):
raise ValueError("t-elif directive must be preceded by t-if directive")
compiled = self._compile_directive_if(el, options, indent)
el.attrib['t-elif'] = '_t_skip_else_'
return compiled
def _compile_directive_if(self, el, options, indent):
"""Compile `t-if` expressions into a python code as a list of strings.
The code will contain the condition `if`, `else` and `elif` part that
wrap the rest of the compiled code of this element.
"""
if 't-elif' in el.attrib:
expr = el.attrib.pop('t-elif')
else:
expr = el.attrib.pop('t-if')
code = self._flushText(options, indent)
content_if = self._compile_directives(el, options, indent + 1) + self._flushText(options, indent + 1)
orelse = []
next_el = el.getnext()
comments_to_remove = []
while isinstance(next_el, etree._Comment):
comments_to_remove.append(next_el)
next_el = next_el.getnext()
if next_el is not None and {'t-else', 't-elif'} & set(next_el.attrib):
parent = el.getparent()
for comment in comments_to_remove:
parent.remove(comment)
if el.tail and not el.tail.isspace():
raise ValueError("Unexpected non-whitespace characters between t-if and t-else directives")
el.tail = None
orelse = self._compile_node(next_el, dict(options, t_if=True), indent + 1) + self._flushText(options, indent + 1)
code.append(self._indent(f"if {self._compile_expr(expr)}:", indent))
code.extend(content_if or [self._indent('pass', indent + 1)])
if orelse:
code.append(self._indent("else:", indent))
code.extend(orelse)
return code
def _compile_directive_foreach(self, el, options, indent):
"""Compile `t-foreach` expressions into a python code as a list of
strings.
`t-as` is used to define the key name.
`t-foreach` compiled value can be an iterable, an dictionary or a
number.
The code will contain loop `for` that wrap the rest of the compiled
code of this element.
Some key into values dictionary are create automatically:
*_size, *_index, *_value, *_first, *_last, *_odd, *_even, *_parity
"""
expr_foreach = el.attrib.pop('t-foreach')
expr_as = el.attrib.pop('t-as')
code = self._flushText(options, indent)
content_foreach = self._compile_directives(el, options, indent + 1) + self._flushText(options, indent + 1)
t_foreach = self._make_name('t_foreach')
size = self._make_name('size')
has_value = self._make_name('has_value')
if expr_foreach.isdigit():
code.append(self._indent(dedent(f"""
values[{repr(expr_as + '_size')}] = {size} = {int(expr_foreach)}
{t_foreach} = range({size})
{has_value} = False
""").strip(), indent))
else:
code.append(self._indent(dedent(f"""
{t_foreach} = {self._compile_expr(expr_foreach)} or []
if isinstance({t_foreach}, Sized):
values[{repr(expr_as + '_size')}] = {size} = len({t_foreach})
elif ({t_foreach}).__class__ == int:
values[{repr(expr_as + '_size')}] = {size} = {t_foreach}
{t_foreach} = range({size})
else:
{size} = None
{has_value} = False
if isinstance({t_foreach}, Mapping):
{t_foreach} = {t_foreach}.items()
{has_value} = True
""").strip(), indent))
code.append(self._indent(dedent(f"""
for index, item in enumerate({t_foreach}):
values[{repr(expr_as + '_index')}] = index
if {has_value}:
values[{repr(expr_as)}], values[{repr(expr_as + '_value')}] = item
else:
values[{repr(expr_as)}] = values[{repr(expr_as + '_value')}] = item
values[{repr(expr_as + '_first')}] = values[{repr(expr_as + '_index')}] == 0
if {size} is not None:
values[{repr(expr_as + '_last')}] = index + 1 == {size}
values[{repr(expr_as + '_odd')}] = index % 2
values[{repr(expr_as + '_even')}] = not values[{repr(expr_as + '_odd')}]
values[{repr(expr_as + '_parity')}] = 'odd' if values[{repr(expr_as + '_odd')}] else 'even'
"""), indent))
code.append(self._indent(f'log["last_path_node"] = {repr(options["root"].getpath(el))} ', indent + 1))
code.extend(content_foreach or self._indent('continue', indent + 1))
return code
def _compile_directive_out(self, el, options, indent):
"""Compile `t-out` expressions into a python code as a list of
strings.
The output can have some rendering option with `t-options-widget` or
`t-options={'widget': ...}. The compiled code will call ``_get_widget``
method at rendering time.
The code will contain evalution and rendering of the compiled value. If
the compiled value is None or False, the tag is not added to the render
(Except if the widget forces rendering or there is default content.).
"""
ttype = 't-out'
expr = el.attrib.pop('t-out', None)
if expr is None:
# deprecated use.
ttype = 't-esc'
expr = el.attrib.pop('t-esc', None)
if expr is None:
ttype = 't-raw'
expr = el.attrib.pop('t-raw')
code = self._flushText(options, indent)
options['t_options_varname'] = 't_out_t_options'
code_options = self._compile_directive(el, options, 'options', indent)
code.extend(code_options)
if expr == "0":
if code_options:
code.append(self._indent("content = Markup(''.join(values.get('0', [])))", indent))
else:
code.extend(self._compile_tag_open(el, options, indent))
code.extend(self._flushText(options, indent))
code.append(self._indent("yield from values.get('0', [])", indent))
code.extend(self._compile_tag_close(el, options))
return code
else:
code.append(self._indent(f"content = {self._compile_expr(expr)}", indent))
if code_options:
code.append(self._indent(f"attrs, content, force_display = self._get_widget(content, {repr(expr)}, {repr(el.tag)}, t_out_t_options, compile_options, values)", indent))
else:
code.append(self._indent("force_display = None", indent))
if ttype == 't-raw':
# deprecated use.
code.append(self._indent(dedent("""
if content is not None and content is not False:
content = Markup(content)
"""), indent))
code.extend(self._compile_widget_value(el, options, indent, without_attributes=not code_options))
return code
def _compile_directive_esc(self, el, options, indent):
# deprecated use.
if options.get('dev_mode'):
_logger.warning(
"Found deprecated directive @t-esc=%r in template %r. Replace by @t-out",
el.get('t-esc'),
options.get('ref', '<unknown>'),
)
return self._compile_directive_out(el, options, indent)
def _compile_directive_raw(self, el, options, indent):
# deprecated use.
_logger.warning(
"Found deprecated directive @t-raw=%r in template %r. Replace by "
"@t-out, and explicitely wrap content in `Markup` if "
"necessary (which likely is not the case)",
el.get('t-raw'),
options.get('ref', '<unknown>'),
)
return self._compile_directive_out(el, options, indent)
def _compile_directive_field(self, el, options, indent):
"""Compile `t-field` expressions into a python code as a list of
strings.
The compiled code will call ``_get_field`` method at rendering time
using the type of value supplied by the field. This behavior can be
changed with `t-options-widget` or `t-options={'widget': ...}.
The code will contain evalution and rendering of the compiled value
value from the record field. If the compiled value is None or False,
the tag is not added to the render
(Except if the widget forces rendering or there is default content.).
"""
tagName = el.tag
assert tagName not in ("table", "tbody", "thead", "tfoot", "tr", "td",
"li", "ul", "ol", "dl", "dt", "dd"),\
"RTE widgets do not work correctly on %r elements" % tagName
assert tagName != 't',\
"t-field can not be used on a t element, provide an actual HTML node"
assert "." in el.get('t-field'),\
"t-field must have at least a dot like 'record.field_name'"
expression = el.attrib.pop('t-field')
record, field_name = expression.rsplit('.', 1)
code = []
options['t_options_varname'] = 't_field_t_options'
code_options = self._compile_directive(el, options, 'options', indent) or [self._indent("t_field_t_options = {}", indent)]
code.extend(code_options)
code.append(self._indent(f"attrs, content, force_display = self._get_field({self._compile_expr(record, raise_on_missing=True)}, {repr(field_name)}, {repr(expression)}, {repr(tagName)}, t_field_t_options, compile_options, values)", indent))
code.append(self._indent("if content is not None and content is not False:", indent))
code.append(self._indent("content = self._compile_to_str(content)", indent + 1))
code.extend(self._compile_widget_value(el, options, indent))
return code
def _compile_widget_value(self, el, options, indent=0, without_attributes=False):
"""Take care of part of the compilation of `t-out` and `t-field` (and
the technical directive `t-tag). This is the part that takes care of
whether or not created the tags and the default content of the element.
"""
el.attrib.pop('t-tag', None)
code = self._flushText(options, indent)
code.append(self._indent("if content is not None and content is not False:", indent))
code.extend(self._compile_tag_open(el, options, indent + 1, not without_attributes))
code.extend(self._flushText(options, indent + 1))
# Use str to avoid the escaping of the other html content.
code.append(self._indent("yield str(escape(content))", indent + 1))
code.extend(self._compile_tag_close(el, options))
code.extend(self._flushText(options, indent + 1))
default_body = self._compile_directive_content(el, options, indent + 1)
if default_body or options['_text_concat']:
# default content
_text_concat = list(options['_text_concat'])
options['_text_concat'].clear()
code.append(self._indent("else:", indent))
code.extend(self._compile_tag_open(el, options, indent + 1, not without_attributes))
code.extend(self._flushText(options, indent + 1))
code.extend(default_body)
options['_text_concat'].extend(_text_concat)
code.extend(self._compile_tag_close(el, options))
code.extend(self._flushText(options, indent + 1))
else:
content = (self._compile_tag_open(el, options, indent + 1, not without_attributes) +
self._compile_tag_close(el, options) +
self._flushText(options, indent + 1))
if content:
code.append(self._indent("elif force_display:", indent))
code.extend(content)
return code
def _compile_directive_call(self, el, options, indent):
"""Compile `t-call` expressions into a python code as a list of
strings.
`t-call` allow formating string dynamic at rendering time.
Can use `t-options` used to call and render the sub-template at
rendering time.
The sub-template is called with a copy of the rendering values
dictionary. The dictionary contains the key 0 coming from the
compilation of the contents of this element
The code will contain the call of the template and a function from the
compilation of the content of this element.
"""
expr = el.attrib.pop('t-call')
if el.attrib.get('t-call-options'): # retro-compatibility
el.attrib.set('t-options', el.attrib.pop('t-call-options'))
nsmap = options.get('nsmap')
code = self._flushText(options, indent)
options['t_options_varname'] = 't_call_t_options'
code_options = self._compile_directive(el, options, 'options', indent) or [self._indent("t_call_t_options = {}", indent)]
code.extend(code_options)
# content (t-out="0" and variables)
def_name = "t_call_content"
content = self._compile_directive_content(el, options, indent + 1)
if content and not options['_text_concat']:
self._appendText('', options) # To ensure the template function is a generator and doesn't become a regular function
content.extend(self._flushText(options, indent + 1))
if content:
code.append(self._indent(f"def {def_name}(self, values, log):", indent))
code.extend(content)
code.append(self._indent("t_call_values = values.copy()", indent))
code.append(self._indent(f"t_call_values['0'] = Markup(''.join({def_name}(self, t_call_values, log)))", indent))
else:
code.append(self._indent("t_call_values = values.copy()", indent))
code.append(self._indent("t_call_values['0'] = Markup()", indent))
# options
code.append(self._indent(dedent(f"""
t_call_options = compile_options.copy()
t_call_options.update({{'caller_template': {repr(str(options.get('template')))}, 'last_path_node': {repr(str(options['root'].getpath(el)))} }})
""").strip(), indent))
if nsmap:
# update this dict with the current nsmap so that the callee know
# if he outputting the xmlns attributes is relevenat or not
nsmap = []
for key, value in options['nsmap'].items():
if isinstance(key, str):
nsmap.append(f'{repr(key)}:{repr(value)}')
else:
nsmap.append(f'None:{repr(value)}')
code.append(self._indent(f"t_call_options.update(nsmap={{{', '.join(nsmap)}}})", indent))
template = self._compile_format(expr)
# call
if code_options:
code.append(self._indent("t_call_options.update(t_call_t_options)", indent))
code.append(self._indent(dedent(f"""
if compile_options.get('lang') != t_call_options.get('lang'):
self_lang = self.with_context(lang=t_call_options.get('lang'))
yield from self_lang._compile({template}, t_call_options)(self_lang, t_call_values)
else:
yield from self._compile({template}, t_call_options)(self, t_call_values)
""").strip(), indent))
else:
code.append(self._indent(f"yield from self._compile({template}, t_call_options)(self, t_call_values)", indent))
return code
# method called by computing code
def _post_processing_att(self, tagName, atts, options):
""" Method called at compile time for the static node and called at
runing time for the dynamic attributes.
This method may be overwrited to filter or modify the attributes
(during compilation for static node or after they compilation in
the case of dynamic elements).
@returns dict
"""
return atts
def _get_field(self, record, field_name, expression, tagName, field_options, options, values):
"""Method called at compile time to return the field value.
:returns: tuple:
* dict: attributes
* string or None: content
* boolean: force_display display the tag if the content and default_content are None
"""
return self._get_widget(getattr(record, field_name, None), expression, tagName, field_options, options, values)
def _get_widget(self, value, expression, tagName, field_options, options, values):
"""Method called at compile time to return the widget value.
:returns: tuple:
* dict: attributes
* string or None: content
* boolean: force_display display the tag if the content and default_content are None
"""
return ({}, value, False)
def _debug_trace(self, debugger, options):
"""Method called at compile time to load debugger."""
if debugger in SUPPORTED_DEBUGGERS:
__import__(debugger).set_trace()
else:
raise QWebException(f"unsupported t-debug value: {debugger}", self, options)
| 44.212481 | 59,510 |
7,567 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import logging
from odoo import api, fields, models
from odoo.osv import expression
from odoo.exceptions import UserError
from psycopg2 import IntegrityError
from odoo.tools.translate import _
_logger = logging.getLogger(__name__)
FLAG_MAPPING = {
"GF": "fr",
"BV": "no",
"BQ": "nl",
"GP": "fr",
"HM": "au",
"YT": "fr",
"RE": "fr",
"MF": "fr",
"UM": "us",
}
NO_FLAG_COUNTRIES = [
"AQ", #Antarctica
"SJ", #Svalbard + Jan Mayen : separate jurisdictions : no dedicated flag
]
class Country(models.Model):
_name = 'res.country'
_description = 'Country'
_order = 'name'
name = fields.Char(
string='Country Name', required=True, translate=True, help='The full name of the country.')
code = fields.Char(
string='Country Code', size=2,
help='The ISO country code in two chars. \nYou can use this field for quick search.')
address_format = fields.Text(string="Layout in Reports",
help="Display format to use for addresses belonging to this country.\n\n"
"You can use python-style string pattern with all the fields of the address "
"(for example, use '%(street)s' to display the field 'street') plus"
"\n%(state_name)s: the name of the state"
"\n%(state_code)s: the code of the state"
"\n%(country_name)s: the name of the country"
"\n%(country_code)s: the code of the country",
default='%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s')
address_view_id = fields.Many2one(
comodel_name='ir.ui.view', string="Input View",
domain=[('model', '=', 'res.partner'), ('type', '=', 'form')],
help="Use this field if you want to replace the usual way to encode a complete address. "
"Note that the address_format field is used to modify the way to display addresses "
"(in reports for example), while this field is used to modify the input form for "
"addresses.")
currency_id = fields.Many2one('res.currency', string='Currency')
image_url = fields.Char(
compute="_compute_image_url", string="Flag",
help="Url of static flag image",
)
phone_code = fields.Integer(string='Country Calling Code')
country_group_ids = fields.Many2many('res.country.group', 'res_country_res_country_group_rel',
'res_country_id', 'res_country_group_id', string='Country Groups')
state_ids = fields.One2many('res.country.state', 'country_id', string='States')
name_position = fields.Selection([
('before', 'Before Address'),
('after', 'After Address'),
], string="Customer Name Position", default="before",
help="Determines where the customer/company name should be placed, i.e. after or before the address.")
vat_label = fields.Char(string='Vat Label', translate=True, help="Use this field if you want to change vat label.")
state_required = fields.Boolean(default=False)
zip_required = fields.Boolean(default=True)
_sql_constraints = [
('name_uniq', 'unique (name)',
'The name of the country must be unique !'),
('code_uniq', 'unique (code)',
'The code of the country must be unique !')
]
def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):
if args is None:
args = []
ids = []
if len(name) == 2:
ids = list(self._search([('code', 'ilike', name)] + args, limit=limit))
search_domain = [('name', operator, name)]
if ids:
search_domain.append(('id', 'not in', ids))
ids += list(self._search(search_domain + args, limit=limit))
return ids
@api.model_create_multi
def create(self, vals_list):
for vals in vals_list:
if vals.get('code'):
vals['code'] = vals['code'].upper()
return super(Country, self).create(vals_list)
def write(self, vals):
if vals.get('code'):
vals['code'] = vals['code'].upper()
return super(Country, self).write(vals)
def get_address_fields(self):
self.ensure_one()
return re.findall(r'\((.+?)\)', self.address_format)
@api.depends('code')
def _compute_image_url(self):
for country in self:
if not country.code or country.code in NO_FLAG_COUNTRIES:
country.image_url = False
else:
code = FLAG_MAPPING.get(country.code, country.code.lower())
country.image_url = "/base/static/img/country_flags/%s.png" % code
@api.constrains('address_format')
def _check_address_format(self):
for record in self:
if record.address_format:
address_fields = self.env['res.partner']._formatting_address_fields() + ['state_code', 'state_name', 'country_code', 'country_name', 'company_name']
try:
record.address_format % {i: 1 for i in address_fields}
except (ValueError, KeyError):
raise UserError(_('The layout contains an invalid format key'))
@api.constrains('code')
def _check_country_code(self):
for record in self:
if not record.code:
raise UserError(_('Country code cannot be empty'))
class CountryGroup(models.Model):
_description = "Country Group"
_name = 'res.country.group'
name = fields.Char(required=True, translate=True)
country_ids = fields.Many2many('res.country', 'res_country_res_country_group_rel',
'res_country_group_id', 'res_country_id', string='Countries')
class CountryState(models.Model):
_description = "Country state"
_name = 'res.country.state'
_order = 'code'
country_id = fields.Many2one('res.country', string='Country', required=True)
name = fields.Char(string='State Name', required=True,
help='Administrative divisions of a country. E.g. Fed. State, Departement, Canton')
code = fields.Char(string='State Code', help='The state code.', required=True)
_sql_constraints = [
('name_code_uniq', 'unique(country_id, code)', 'The code of the state must be unique by country !')
]
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if self.env.context.get('country_id'):
args = expression.AND([args, [('country_id', '=', self.env.context.get('country_id'))]])
if operator == 'ilike' and not (name or '').strip():
first_domain = []
domain = []
else:
first_domain = [('code', '=ilike', name)]
domain = [('name', operator, name)]
first_state_ids = self._search(expression.AND([first_domain, args]), limit=limit, access_rights_uid=name_get_uid) if first_domain else []
return list(first_state_ids) + [
state_id
for state_id in self._search(expression.AND([domain, args]),
limit=limit, access_rights_uid=name_get_uid)
if state_id not in first_state_ids
]
def name_get(self):
result = []
for record in self:
result.append((record.id, "{} ({})".format(record.name, record.country_id.code)))
return result
| 40.25 | 7,567 |
30,940 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import hashlib
import io
import itertools
import logging
import mimetypes
import os
import re
import uuid
from collections import defaultdict
from PIL import Image
from odoo import api, fields, models, tools, _
from odoo.exceptions import AccessError, ValidationError, MissingError, UserError
from odoo.tools import config, human_size, ustr, html_escape, ImageProcess, str2bool
from odoo.tools.mimetypes import guess_mimetype
from odoo.osv import expression
_logger = logging.getLogger(__name__)
class IrAttachment(models.Model):
"""Attachments are used to link binary files or url to any openerp document.
External attachment storage
---------------------------
The computed field ``datas`` is implemented using ``_file_read``,
``_file_write`` and ``_file_delete``, which can be overridden to implement
other storage engines. Such methods should check for other location pseudo
uri (example: hdfs://hadoopserver).
The default implementation is the file:dirname location that stores files
on the local filesystem using name based on their sha1 hash
"""
_name = 'ir.attachment'
_description = 'Attachment'
_order = 'id desc'
def _compute_res_name(self):
for attachment in self:
if attachment.res_model and attachment.res_id:
record = self.env[attachment.res_model].browse(attachment.res_id)
attachment.res_name = record.display_name
else:
attachment.res_name = False
@api.model
def _storage(self):
return self.env['ir.config_parameter'].sudo().get_param('ir_attachment.location', 'file')
@api.model
def _filestore(self):
return config.filestore(self._cr.dbname)
@api.model
def _get_storage_domain(self):
# domain to retrieve the attachments to migrate
return {
'db': [('store_fname', '!=', False)],
'file': [('db_datas', '!=', False)],
}[self._storage()]
@api.model
def force_storage(self):
"""Force all attachments to be stored in the currently configured storage"""
if not self.env.is_admin():
raise AccessError(_('Only administrators can execute this action.'))
# Migrate only binary attachments and bypass the res_field automatic
# filter added in _search override
self.search(expression.AND([
self._get_storage_domain(),
['&', ('type', '=', 'binary'), '|', ('res_field', '=', False), ('res_field', '!=', False)]
]))._migrate()
def _migrate(self):
record_count = len(self)
storage = self._storage().upper()
for index, attach in enumerate(self):
_logger.debug("Migrate attachment %s/%s to %s", index + 1, record_count, storage)
# pass mimetype, to avoid recomputation
attach.write({'raw': attach.raw, 'mimetype': attach.mimetype})
@api.model
def _full_path(self, path):
# sanitize path
path = re.sub('[.]', '', path)
path = path.strip('/\\')
return os.path.join(self._filestore(), path)
@api.model
def _get_path(self, bin_data, sha):
# retro compatibility
fname = sha[:3] + '/' + sha
full_path = self._full_path(fname)
if os.path.isfile(full_path):
return fname, full_path # keep existing path
# scatter files across 256 dirs
# we use '/' in the db (even on windows)
fname = sha[:2] + '/' + sha
full_path = self._full_path(fname)
dirname = os.path.dirname(full_path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
# prevent sha-1 collision
if os.path.isfile(full_path) and not self._same_content(bin_data, full_path):
raise UserError("The attachment is colliding with an existing file.")
return fname, full_path
@api.model
def _file_read(self, fname):
full_path = self._full_path(fname)
try:
with open(full_path, 'rb') as f:
return f.read()
except (IOError, OSError):
_logger.info("_read_file reading %s", full_path, exc_info=True)
return b''
@api.model
def _file_write(self, bin_value, checksum):
fname, full_path = self._get_path(bin_value, checksum)
if not os.path.exists(full_path):
try:
with open(full_path, 'wb') as fp:
fp.write(bin_value)
# add fname to checklist, in case the transaction aborts
self._mark_for_gc(fname)
except IOError:
_logger.info("_file_write writing %s", full_path, exc_info=True)
return fname
@api.model
def _file_delete(self, fname):
# simply add fname to checklist, it will be garbage-collected later
self._mark_for_gc(fname)
def _mark_for_gc(self, fname):
""" Add ``fname`` in a checklist for the filestore garbage collection. """
fname = re.sub('[.]', '', fname).strip('/\\')
# we use a spooldir: add an empty file in the subdirectory 'checklist'
full_path = os.path.join(self._full_path('checklist'), fname)
if not os.path.exists(full_path):
dirname = os.path.dirname(full_path)
if not os.path.isdir(dirname):
with tools.ignore(OSError):
os.makedirs(dirname)
open(full_path, 'ab').close()
@api.autovacuum
def _gc_file_store(self):
""" Perform the garbage collection of the filestore. """
if self._storage() != 'file':
return
# Continue in a new transaction. The LOCK statement below must be the
# first one in the current transaction, otherwise the database snapshot
# used by it may not contain the most recent changes made to the table
# ir_attachment! Indeed, if concurrent transactions create attachments,
# the LOCK statement will wait until those concurrent transactions end.
# But this transaction will not see the new attachements if it has done
# other requests before the LOCK (like the method _storage() above).
cr = self._cr
cr.commit()
# prevent all concurrent updates on ir_attachment while collecting,
# but only attempt to grab the lock for a little bit, otherwise it'd
# start blocking other transactions. (will be retried later anyway)
cr.execute("SET LOCAL lock_timeout TO '10s'")
cr.execute("LOCK ir_attachment IN SHARE MODE")
# retrieve the file names from the checklist
checklist = {}
for dirpath, _, filenames in os.walk(self._full_path('checklist')):
dirname = os.path.basename(dirpath)
for filename in filenames:
fname = "%s/%s" % (dirname, filename)
checklist[fname] = os.path.join(dirpath, filename)
# Clean up the checklist. The checklist is split in chunks and files are garbage-collected
# for each chunk.
removed = 0
for names in cr.split_for_in_conditions(checklist):
# determine which files to keep among the checklist
cr.execute("SELECT store_fname FROM ir_attachment WHERE store_fname IN %s", [names])
whitelist = set(row[0] for row in cr.fetchall())
# remove garbage files, and clean up checklist
for fname in names:
filepath = checklist[fname]
if fname not in whitelist:
try:
os.unlink(self._full_path(fname))
_logger.debug("_file_gc unlinked %s", self._full_path(fname))
removed += 1
except (OSError, IOError):
_logger.info("_file_gc could not unlink %s", self._full_path(fname), exc_info=True)
with tools.ignore(OSError):
os.unlink(filepath)
# commit to release the lock
cr.commit()
_logger.info("filestore gc %d checked, %d removed", len(checklist), removed)
@api.depends('store_fname', 'db_datas', 'file_size')
@api.depends_context('bin_size')
def _compute_datas(self):
if self._context.get('bin_size'):
for attach in self:
attach.datas = human_size(attach.file_size)
return
for attach in self:
attach.datas = base64.b64encode(attach.raw or b'')
@api.depends('store_fname', 'db_datas')
def _compute_raw(self):
for attach in self:
if attach.store_fname:
attach.raw = attach._file_read(attach.store_fname)
else:
attach.raw = attach.db_datas
def _inverse_raw(self):
self._set_attachment_data(lambda a: a.raw or b'')
def _inverse_datas(self):
self._set_attachment_data(lambda attach: base64.b64decode(attach.datas or b''))
def _set_attachment_data(self, asbytes):
for attach in self:
# compute the fields that depend on datas
bin_data = asbytes(attach)
vals = self._get_datas_related_values(bin_data, attach.mimetype)
# take current location in filestore to possibly garbage-collect it
fname = attach.store_fname
# write as superuser, as user probably does not have write access
super(IrAttachment, attach.sudo()).write(vals)
if fname:
self._file_delete(fname)
def _get_datas_related_values(self, data, mimetype):
checksum = self._compute_checksum(data)
try:
index_content = self._index(data, mimetype, checksum=checksum)
except TypeError:
index_content = self._index(data, mimetype)
values = {
'file_size': len(data),
'checksum': checksum,
'index_content': index_content,
'store_fname': False,
'db_datas': data,
}
if data and self._storage() != 'db':
values['store_fname'] = self._file_write(data, values['checksum'])
values['db_datas'] = False
return values
def _compute_checksum(self, bin_data):
""" compute the checksum for the given datas
:param bin_data : datas in its binary form
"""
# an empty file has a checksum too (for caching)
return hashlib.sha1(bin_data or b'').hexdigest()
@api.model
def _same_content(self, bin_data, filepath):
BLOCK_SIZE = 1024
with open(filepath, 'rb') as fd:
i = 0
while True:
data = fd.read(BLOCK_SIZE)
if data != bin_data[i * BLOCK_SIZE:(i + 1) * BLOCK_SIZE]:
return False
if not data:
break
i += 1
return True
def _compute_mimetype(self, values):
""" compute the mimetype of the given values
:param values : dict of values to create or write an ir_attachment
:return mime : string indicating the mimetype, or application/octet-stream by default
"""
mimetype = None
if values.get('mimetype'):
mimetype = values['mimetype']
if not mimetype and values.get('name'):
mimetype = mimetypes.guess_type(values['name'])[0]
if not mimetype and values.get('url'):
mimetype = mimetypes.guess_type(values['url'].split('?')[0])[0]
if not mimetype or mimetype == 'application/octet-stream':
raw = None
if values.get('raw'):
raw = values['raw']
elif values.get('datas'):
raw = base64.b64decode(values['datas'])
if raw:
mimetype = guess_mimetype(raw)
return mimetype or 'application/octet-stream'
def _postprocess_contents(self, values):
ICP = self.env['ir.config_parameter'].sudo().get_param
supported_subtype = ICP('base.image_autoresize_extensions', 'png,jpeg,bmp,tiff').split(',')
mimetype = values['mimetype'] = self._compute_mimetype(values)
_type, _subtype = mimetype.split('/')
is_image_resizable = _type == 'image' and _subtype in supported_subtype
if is_image_resizable and (values.get('datas') or values.get('raw')):
is_raw = values.get('raw')
# Can be set to 0 to skip the resize
max_resolution = ICP('base.image_autoresize_max_px', '1920x1920')
if str2bool(max_resolution, True):
try:
img = fn_quality = False
if is_raw:
img = ImageProcess(False, verify_resolution=False)
img.image = Image.open(io.BytesIO(values['raw']))
img.original_format = (img.image.format or '').upper()
else: # datas
img = ImageProcess(values['datas'], verify_resolution=False)
w, h = img.image.size
nw, nh = map(int, max_resolution.split('x'))
if w > nw or h > nh:
img = img.resize(nw, nh)
quality = int(ICP('base.image_autoresize_quality', 80))
fn_quality = img.image_quality if is_raw else img.image_base64
values[is_raw and 'raw' or 'datas'] = fn_quality(quality=quality)
except UserError as e:
# Catch error during test where we provide fake image
# raise UserError(_("This file could not be decoded as an image file. Please try with a different file."))
_logger.info('Post processing ignored : %s', e)
pass
return values
def _check_contents(self, values):
mimetype = values['mimetype'] = self._compute_mimetype(values)
xml_like = 'ht' in mimetype or ( # hta, html, xhtml, etc.
'xml' in mimetype and # other xml (svg, text/xml, etc)
not 'openxmlformats' in mimetype) # exception for Office formats
user = self.env.context.get('binary_field_real_user', self.env.user)
if not isinstance(user, self.pool['res.users']):
raise UserError(_("binary_field_real_user should be a res.users record."))
force_text = xml_like and (
self.env.context.get('attachments_mime_plainxml') or
not self.env['ir.ui.view'].with_user(user).check_access_rights('write', False))
if force_text:
values['mimetype'] = 'text/plain'
if not self.env.context.get('image_no_postprocess'):
values = self._postprocess_contents(values)
return values
@api.model
def _index(self, bin_data, file_type, checksum=None):
""" compute the index content of the given binary data.
This is a python implementation of the unix command 'strings'.
:param bin_data : datas in binary form
:return index_content : string containing all the printable character of the binary data
"""
index_content = False
if file_type:
index_content = file_type.split('/')[0]
if index_content == 'text': # compute index_content only for text type
words = re.findall(b"[\x20-\x7E]{4,}", bin_data)
index_content = b"\n".join(words).decode('ascii')
return index_content
@api.model
def get_serving_groups(self):
""" An ir.attachment record may be used as a fallback in the
http dispatch if its type field is set to "binary" and its url
field is set as the request's url. Only the groups returned by
this method are allowed to create and write on such records.
"""
return ['base.group_system']
name = fields.Char('Name', required=True)
description = fields.Text('Description')
res_name = fields.Char('Resource Name', compute='_compute_res_name')
res_model = fields.Char('Resource Model', readonly=True, help="The database object this attachment will be attached to.")
res_field = fields.Char('Resource Field', readonly=True)
res_id = fields.Many2oneReference('Resource ID', model_field='res_model',
readonly=True, help="The record id this is attached to.")
company_id = fields.Many2one('res.company', string='Company', change_default=True,
default=lambda self: self.env.company)
type = fields.Selection([('url', 'URL'), ('binary', 'File')],
string='Type', required=True, default='binary', change_default=True,
help="You can either upload a file from your computer or copy/paste an internet link to your file.")
url = fields.Char('Url', index=True, size=1024)
public = fields.Boolean('Is public document')
# for external access
access_token = fields.Char('Access Token', groups="base.group_user")
# the field 'datas' is computed and may use the other fields below
raw = fields.Binary(string="File Content (raw)", compute='_compute_raw', inverse='_inverse_raw')
datas = fields.Binary(string='File Content (base64)', compute='_compute_datas', inverse='_inverse_datas')
db_datas = fields.Binary('Database Data', attachment=False)
store_fname = fields.Char('Stored Filename')
file_size = fields.Integer('File Size', readonly=True)
checksum = fields.Char("Checksum/SHA1", size=40, index=True, readonly=True)
mimetype = fields.Char('Mime Type', readonly=True)
index_content = fields.Text('Indexed Content', readonly=True, prefetch=False)
def _auto_init(self):
res = super(IrAttachment, self)._auto_init()
tools.create_index(self._cr, 'ir_attachment_res_idx',
self._table, ['res_model', 'res_id'])
return res
@api.constrains('type', 'url')
def _check_serving_attachments(self):
if self.env.is_admin():
return
for attachment in self:
# restrict writing on attachments that could be served by the
# ir.http's dispatch exception handling
# XDO note: this should be done in check(write), constraints for access rights?
# XDO note: if read on sudo, read twice, one for constraints, one for _inverse_datas as user
if attachment.type == 'binary' and attachment.url:
has_group = self.env.user.has_group
if not any(has_group(g) for g in attachment.get_serving_groups()):
raise ValidationError("Sorry, you are not allowed to write on this document")
@api.model
def check(self, mode, values=None):
""" Restricts the access to an ir.attachment, according to referred mode """
if self.env.is_superuser():
return True
# Always require an internal user (aka, employee) to access to a attachment
if not (self.env.is_admin() or self.env.user.has_group('base.group_user')):
raise AccessError(_("Sorry, you are not allowed to access this document."))
# collect the records to check (by model)
model_ids = defaultdict(set) # {model_name: set(ids)}
if self:
# DLE P173: `test_01_portal_attachment`
self.env['ir.attachment'].flush(['res_model', 'res_id', 'create_uid', 'public', 'res_field'])
self._cr.execute('SELECT res_model, res_id, create_uid, public, res_field FROM ir_attachment WHERE id IN %s', [tuple(self.ids)])
for res_model, res_id, create_uid, public, res_field in self._cr.fetchall():
if public and mode == 'read':
continue
if not self.env.is_system() and (res_field or (not res_id and create_uid != self.env.uid)):
raise AccessError(_("Sorry, you are not allowed to access this document."))
if not (res_model and res_id):
continue
model_ids[res_model].add(res_id)
if values and values.get('res_model') and values.get('res_id'):
model_ids[values['res_model']].add(values['res_id'])
# check access rights on the records
for res_model, res_ids in model_ids.items():
# ignore attachments that are not attached to a resource anymore
# when checking access rights (resource was deleted but attachment
# was not)
if res_model not in self.env:
continue
if res_model == 'res.users' and len(res_ids) == 1 and self.env.uid == list(res_ids)[0]:
# by default a user cannot write on itself, despite the list of writeable fields
# e.g. in the case of a user inserting an image into his image signature
# we need to bypass this check which would needlessly throw us away
continue
records = self.env[res_model].browse(res_ids).exists()
# For related models, check if we can write to the model, as unlinking
# and creating attachments can be seen as an update to the model
access_mode = 'write' if mode in ('create', 'unlink') else mode
records.check_access_rights(access_mode)
records.check_access_rule(access_mode)
def _read_group_allowed_fields(self):
return ['type', 'company_id', 'res_id', 'create_date', 'create_uid', 'name', 'mimetype', 'id', 'url', 'res_field', 'res_model']
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
"""Override read_group to add res_field=False in domain if not present."""
if not fields:
raise AccessError(_("Sorry, you must provide fields to read on attachments"))
groupby = [groupby] if isinstance(groupby, str) else groupby
if any('(' in field for field in fields + groupby):
raise AccessError(_("Sorry, the syntax 'name:agg(field)' is not available for attachments"))
if not any(item[0] in ('id', 'res_field') for item in domain):
domain.insert(0, ('res_field', '=', False))
allowed_fields = self._read_group_allowed_fields()
fields_set = set(field.split(':')[0] for field in fields + groupby)
if not self.env.is_system() and (not fields or fields_set.difference(allowed_fields)):
raise AccessError(_("Sorry, you are not allowed to access these fields on attachments."))
return super().read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
# add res_field=False in domain if not present; the arg[0] trick below
# works for domain items and '&'/'|'/'!' operators too
discard_binary_fields_attachments = False
if not any(arg[0] in ('id', 'res_field') for arg in args):
discard_binary_fields_attachments = True
args.insert(0, ('res_field', '=', False))
ids = super(IrAttachment, self)._search(args, offset=offset, limit=limit, order=order,
count=False, access_rights_uid=access_rights_uid)
if self.env.is_superuser():
# rules do not apply for the superuser
return len(ids) if count else ids
if not ids:
return 0 if count else []
# Work with a set, as list.remove() is prohibitive for large lists of documents
# (takes 20+ seconds on a db with 100k docs during search_count()!)
orig_ids = ids
ids = set(ids)
# For attachments, the permissions of the document they are attached to
# apply, so we must remove attachments for which the user cannot access
# the linked document.
# Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
# and the permissions are checked in super() and below anyway.
model_attachments = defaultdict(lambda: defaultdict(set)) # {res_model: {res_id: set(ids)}}
binary_fields_attachments = set()
self._cr.execute("""SELECT id, res_model, res_id, public, res_field FROM ir_attachment WHERE id IN %s""", [tuple(ids)])
for row in self._cr.dictfetchall():
if not row['res_model'] or row['public']:
continue
# model_attachments = {res_model: {res_id: set(ids)}}
model_attachments[row['res_model']][row['res_id']].add(row['id'])
# Should not retrieve binary fields attachments if not explicitly required
if discard_binary_fields_attachments and row['res_field']:
binary_fields_attachments.add(row['id'])
if binary_fields_attachments:
ids.difference_update(binary_fields_attachments)
# To avoid multiple queries for each attachment found, checks are
# performed in batch as much as possible.
for res_model, targets in model_attachments.items():
if res_model not in self.env:
continue
if not self.env[res_model].check_access_rights('read', False):
# remove all corresponding attachment ids
ids.difference_update(itertools.chain(*targets.values()))
continue
# filter ids according to what access rules permit
target_ids = list(targets)
allowed = self.env[res_model].with_context(active_test=False).search([('id', 'in', target_ids)])
for res_id in set(target_ids).difference(allowed.ids):
ids.difference_update(targets[res_id])
# sort result according to the original sort ordering
result = [id for id in orig_ids if id in ids]
# If the original search reached the limit, it is important the
# filtered record set does so too. When a JS view receive a
# record set whose length is below the limit, it thinks it
# reached the last page. To avoid an infinite recursion due to the
# permission checks the sub-call need to be aware of the number of
# expected records to retrieve
if len(orig_ids) == limit and len(result) < self._context.get('need', limit):
need = self._context.get('need', limit) - len(result)
result.extend(self.with_context(need=need)._search(args, offset=offset + len(orig_ids),
limit=limit, order=order, count=count,
access_rights_uid=access_rights_uid)[:limit - len(result)])
return len(result) if count else list(result)
def _read(self, fields):
self.check('read')
return super(IrAttachment, self)._read(fields)
def write(self, vals):
self.check('write', values=vals)
# remove computed field depending of datas
for field in ('file_size', 'checksum', 'store_fname'):
vals.pop(field, False)
if 'mimetype' in vals or 'datas' in vals or 'raw' in vals:
vals = self._check_contents(vals)
return super(IrAttachment, self).write(vals)
def copy(self, default=None):
if not (default or {}).keys() & {'datas', 'db_datas', 'raw'}:
# ensure the content is kept and recomputes checksum/store_fname
default = dict(default or {}, raw=self.raw)
return super(IrAttachment, self).copy(default)
def unlink(self):
if not self:
return True
self.check('unlink')
# First delete in the database, *then* in the filesystem if the
# database allowed it. Helps avoid errors when concurrent transactions
# are deleting the same file, and some of the transactions are
# rolled back by PostgreSQL (due to concurrent updates detection).
to_delete = set(attach.store_fname for attach in self if attach.store_fname)
res = super(IrAttachment, self).unlink()
for file_path in to_delete:
self._file_delete(file_path)
return res
@api.model_create_multi
def create(self, vals_list):
record_tuple_set = set()
# remove computed field depending of datas
vals_list = [{
key: value
for key, value
in vals.items()
if key not in ('file_size', 'checksum', 'store_fname')
} for vals in vals_list]
for values in vals_list:
values = self._check_contents(values)
raw, datas = values.pop('raw', None), values.pop('datas', None)
if raw or datas:
if isinstance(raw, str):
# b64decode handles str input but raw needs explicit encoding
raw = raw.encode()
values.update(self._get_datas_related_values(
raw or base64.b64decode(datas or b''),
values['mimetype']
))
# 'check()' only uses res_model and res_id from values, and make an exists.
# We can group the values by model, res_id to make only one query when
# creating multiple attachments on a single record.
record_tuple = (values.get('res_model'), values.get('res_id'))
record_tuple_set.add(record_tuple)
# don't use possible contextual recordset for check, see commit for details
Attachments = self.browse()
for res_model, res_id in record_tuple_set:
Attachments.check('create', values={'res_model':res_model, 'res_id':res_id})
return super(IrAttachment, self).create(vals_list)
def _post_add_create(self):
pass
def generate_access_token(self):
tokens = []
for attachment in self:
if attachment.access_token:
tokens.append(attachment.access_token)
continue
access_token = self._generate_access_token()
attachment.write({'access_token': access_token})
tokens.append(access_token)
return tokens
def _generate_access_token(self):
return str(uuid.uuid4())
@api.model
def action_get(self):
return self.env['ir.actions.act_window']._for_xml_id('base.action_attachment')
@api.model
def get_serve_attachment(self, url, extra_domain=None, extra_fields=None, order=None):
domain = [('type', '=', 'binary'), ('url', '=', url)] + (extra_domain or [])
fieldNames = ['__last_update', 'datas', 'mimetype'] + (extra_fields or [])
return self.search_read(domain, fieldNames, order=order, limit=1)
| 46.179104 | 30,940 |
1,001 | py | PYTHON | 15.0 | from odoo import api, fields, models
class DemoFailure(models.TransientModel):
""" Stores modules for which we could not install demo data
"""
_name = 'ir.demo_failure'
_description = 'Demo failure'
module_id = fields.Many2one('ir.module.module', required=True, string="Module")
error = fields.Char(string="Error")
wizard_id = fields.Many2one('ir.demo_failure.wizard')
class DemoFailureWizard(models.TransientModel):
_name = 'ir.demo_failure.wizard'
_description = 'Demo Failure wizard'
failure_ids = fields.One2many(
'ir.demo_failure', 'wizard_id', readonly=True,
string="Demo Installation Failures"
)
failures_count = fields.Integer(compute='_compute_failures_count')
@api.depends('failure_ids')
def _compute_failures_count(self):
for r in self:
r.failures_count = len(r.failure_ids)
def done(self):
# pylint: disable=next-method-called
return self.env['ir.module.module'].next()
| 32.290323 | 1,001 |
542 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
from odoo.modules.loading import force_demo
from odoo.addons.base.models.ir_module import assert_log_admin_access
class IrDemo(models.TransientModel):
_name = 'ir.demo'
_description = 'Demo'
@assert_log_admin_access
def install_demo(self):
force_demo(self.env.cr)
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': '/web',
}
| 25.809524 | 542 |
27,396 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import functools
import itertools
import psycopg2
import pytz
from odoo import api, Command, fields, models, _
from odoo.tools import ustr
REFERENCING_FIELDS = {None, 'id', '.id'}
def only_ref_fields(record):
return {k: v for k, v in record.items() if k in REFERENCING_FIELDS}
def exclude_ref_fields(record):
return {k: v for k, v in record.items() if k not in REFERENCING_FIELDS}
class ImportWarning(Warning):
""" Used to send warnings upwards the stack during the import process """
pass
class ConversionNotFound(ValueError):
pass
class IrFieldsConverter(models.AbstractModel):
_name = 'ir.fields.converter'
_description = 'Fields Converter'
@api.model
def _format_import_error(self, error_type, error_msg, error_params=(), error_args=None):
# sanitize error params for later formatting by the import system
sanitize = lambda p: p.replace('%', '%%') if isinstance(p, str) else p
if error_params:
if isinstance(error_params, str):
error_params = sanitize(error_params)
elif isinstance(error_params, dict):
error_params = {k: sanitize(v) for k, v in error_params.items()}
elif isinstance(error_params, tuple):
error_params = tuple(sanitize(v) for v in error_params)
return error_type(error_msg % error_params, error_args)
def _get_import_field_path(self, field, value):
""" Rebuild field path for import error attribution to the right field.
This method uses the 'parent_fields_hierarchy' context key built during treatment of one2many fields
(_str_to_one2many). As the field to import is the last of the chain (child_id/child_id2/field_to_import),
we need to retrieve the complete hierarchy in case of error in order to assign the error to the correct
column in the import UI.
:param (str) field: field in which the value will be imported.
:param (str or list) value:
- str: in most of the case the value we want to import into a field is a string (or a number).
- list: when importing into a one2may field, all the records to import are regrouped into a list of dict.
E.g.: creating multiple partners: [{None: 'ChildA_1', 'type': 'Private address'}, {None: 'ChildA_2', 'type': 'Private address'}]
where 'None' is the name. (because we can find a partner by his name, we don't need to specify the field.)
The field_path value is computed based on the last field in the chain.
for example,
- path_field for 'Private address' at childA_1 is ['partner_id', 'type']
- path_field for 'childA_1' is ['partner_id']
So, by retrieving the correct field_path for each value to import, if errors are raised for those fields,
we can the link the errors to the correct header-field couple in the import UI.
"""
field_path = [field]
parent_fields_hierarchy = self._context.get('parent_fields_hierarchy')
if parent_fields_hierarchy:
field_path = parent_fields_hierarchy + field_path
field_path_value = value
while isinstance(field_path_value, list):
key = list(field_path_value[0].keys())[0]
if key:
field_path.append(key)
field_path_value = field_path_value[0][key]
return field_path
@api.model
def for_model(self, model, fromtype=str):
""" Returns a converter object for the model. A converter is a
callable taking a record-ish (a dictionary representing an odoo
record with values of typetag ``fromtype``) and returning a converted
records matching what :meth:`odoo.osv.orm.Model.write` expects.
:param model: :class:`odoo.osv.orm.Model` for the conversion base
:returns: a converter callable
:rtype: (record: dict, logger: (field, error) -> None) -> dict
"""
# make sure model is new api
model = self.env[model._name]
converters = {
name: self.to_field(model, field, fromtype)
for name, field in model._fields.items()
}
def fn(record, log):
converted = {}
import_file_context = self.env.context.get('import_file')
for field, value in record.items():
if field in REFERENCING_FIELDS:
continue
if not value:
converted[field] = False
continue
try:
converted[field], ws = converters[field](value)
for w in ws:
if isinstance(w, str):
# wrap warning string in an ImportWarning for
# uniform handling
w = ImportWarning(w)
log(field, w)
except (UnicodeEncodeError, UnicodeDecodeError) as e:
log(field, ValueError(str(e)))
except ValueError as e:
if import_file_context:
# if the error is linked to a matching error, the error is a tuple
# E.g.:("Value X cannot be found for field Y at row 1", {
# 'more_info': {},
# 'value': 'X',
# 'field': 'Y',
# 'field_path': child_id/Y,
# })
# In order to link the error to the correct header-field couple in the import UI, we need to add
# the field path to the additional error info.
# As we raise the deepest child in error, we need to add the field path only for the deepest
# error in the import recursion. (if field_path is given, don't overwrite it)
error_info = len(e.args) > 1 and e.args[1]
if error_info and not error_info.get('field_path'): # only raise the deepest child in error
error_info['field_path'] = self._get_import_field_path(field, value)
log(field, e)
return converted
return fn
@api.model
def to_field(self, model, field, fromtype=str):
""" Fetches a converter for the provided field object, from the
specified type.
A converter is simply a callable taking a value of type ``fromtype``
(or a composite of ``fromtype``, e.g. list or dict) and returning a
value acceptable for a write() on the field ``field``.
By default, tries to get a method on itself with a name matching the
pattern ``_$fromtype_to_$field.type`` and returns it.
Converter callables can either return a value and a list of warnings
to their caller or raise ``ValueError``, which will be interpreted as a
validation & conversion failure.
ValueError can have either one or two parameters. The first parameter
is mandatory, **must** be a unicode string and will be used as the
user-visible message for the error (it should be translatable and
translated). It can contain a ``field`` named format placeholder so the
caller can inject the field's translated, user-facing name (@string).
The second parameter is optional and, if provided, must be a mapping.
This mapping will be merged into the error dictionary returned to the
client.
If a converter can perform its function but has to make assumptions
about the data, it can send a warning to the user through adding an
instance of :class:`~.ImportWarning` to the second value
it returns. The handling of a warning at the upper levels is the same
as ``ValueError`` above.
:param field: field object to generate a value for
:type field: :class:`odoo.fields.Field`
:param fromtype: type to convert to something fitting for ``field``
:type fromtype: type | str
:param context: odoo request context
:return: a function (fromtype -> field.write_type), if a converter is found
:rtype: Callable | None
"""
assert isinstance(fromtype, (type, str))
# FIXME: return None
typename = fromtype.__name__ if isinstance(fromtype, type) else fromtype
converter = getattr(self, '_%s_to_%s' % (typename, field.type), None)
if not converter:
return None
return functools.partial(converter, model, field)
@api.model
def _str_to_boolean(self, model, field, value):
# all translatables used for booleans
# potentially broken casefolding? What about locales?
trues = set(word.lower() for word in itertools.chain(
[u'1', u"true", u"yes"], # don't use potentially translated values
self._get_translations(['code'], u"true"),
self._get_translations(['code'], u"yes"),
))
if value.lower() in trues:
return True, []
# potentially broken casefolding? What about locales?
falses = set(word.lower() for word in itertools.chain(
[u'', u"0", u"false", u"no"],
self._get_translations(['code'], u"false"),
self._get_translations(['code'], u"no"),
))
if value.lower() in falses:
return False, []
if field.name in self._context.get('import_skip_records', []):
return None, []
return True, [self._format_import_error(
ValueError,
_(u"Unknown value '%s' for boolean field '%%(field)s'"),
value,
{'moreinfo': _(u"Use '1' for yes and '0' for no")}
)]
@api.model
def _str_to_integer(self, model, field, value):
try:
return int(value), []
except ValueError:
raise self._format_import_error(
ValueError,
_(u"'%s' does not seem to be an integer for field '%%(field)s'"),
value
)
@api.model
def _str_to_float(self, model, field, value):
try:
return float(value), []
except ValueError:
raise self._format_import_error(
ValueError,
_(u"'%s' does not seem to be a number for field '%%(field)s'"),
value
)
_str_to_monetary = _str_to_float
@api.model
def _str_id(self, model, field, value):
return value, []
_str_to_reference = _str_to_char = _str_to_text = _str_to_binary = _str_to_html = _str_id
@api.model
def _str_to_date(self, model, field, value):
try:
parsed_value = fields.Date.from_string(value)
return fields.Date.to_string(parsed_value), []
except ValueError:
raise self._format_import_error(
ValueError,
_(u"'%s' does not seem to be a valid date for field '%%(field)s'"),
value,
{'moreinfo': _(u"Use the format '%s'", u"2012-12-31")}
)
@api.model
def _input_tz(self):
# if there's a tz in context, try to use that
if self._context.get('tz'):
try:
return pytz.timezone(self._context['tz'])
except pytz.UnknownTimeZoneError:
pass
# if the current user has a tz set, try to use that
user = self.env.user
if user.tz:
try:
return pytz.timezone(user.tz)
except pytz.UnknownTimeZoneError:
pass
# fallback if no tz in context or on user: UTC
return pytz.UTC
@api.model
def _str_to_datetime(self, model, field, value):
try:
parsed_value = fields.Datetime.from_string(value)
except ValueError:
raise self._format_import_error(
ValueError,
_(u"'%s' does not seem to be a valid datetime for field '%%(field)s'"),
value,
{'moreinfo': _(u"Use the format '%s'", u"2012-12-31 23:59:59")}
)
input_tz = self._input_tz()# Apply input tz to the parsed naive datetime
dt = input_tz.localize(parsed_value, is_dst=False)
# And convert to UTC before reformatting for writing
return fields.Datetime.to_string(dt.astimezone(pytz.UTC)), []
@api.model
def _get_translations(self, types, src):
types = tuple(types)
# Cache translations so they don't have to be reloaded from scratch on
# every row of the file
tnx_cache = self._cr.cache.setdefault(self._name, {})
if tnx_cache.setdefault(types, {}) and src in tnx_cache[types]:
return tnx_cache[types][src]
Translations = self.env['ir.translation']
tnx = Translations.search([('type', 'in', types), ('src', '=', src)])
result = tnx_cache[types][src] = [t.value for t in tnx if t.value is not False]
return result
@api.model
def _str_to_selection(self, model, field, value):
# get untranslated values
env = self.with_context(lang=None).env
selection = field.get_description(env)['selection']
for item, label in selection:
label = ustr(label)
labels = [label] + self._get_translations(('selection', 'model', 'code'), label)
# case insensitive comparaison of string to allow to set the value even if the given 'value' param is not
# exactly (case sensitive) the same as one of the selection item.
if value.lower() == str(item).lower() or any(value.lower() == label.lower() for label in labels):
return item, []
if field.name in self._context.get('import_skip_records', []):
return None, []
elif field.name in self._context.get('import_set_empty_fields', []):
return False, []
raise self._format_import_error(
ValueError,
_(u"Value '%s' not found in selection field '%%(field)s'"),
value,
{'moreinfo': [_label or str(item) for item, _label in selection if _label or item]}
)
@api.model
def db_id_for(self, model, field, subfield, value):
""" Finds a database id for the reference ``value`` in the referencing
subfield ``subfield`` of the provided field of the provided model.
:param model: model to which the field belongs
:param field: relational field for which references are provided
:param subfield: a relational subfield allowing building of refs to
existing records: ``None`` for a name_get/name_search,
``id`` for an external id and ``.id`` for a database
id
:param value: value of the reference to match to an actual record
:param context: OpenERP request context
:return: a pair of the matched database identifier (if any), the
translated user-readable name for the field and the list of
warnings
:rtype: (ID|None, unicode, list)
"""
# the function 'flush' comes from BaseModel.load(), and forces the
# creation/update of former records (batch creation)
flush = self._context.get('import_flush', lambda **kw: None)
id = None
warnings = []
error_msg = ''
action = {
'name': 'Possible Values',
'type': 'ir.actions.act_window', 'target': 'new',
'view_mode': 'tree,form',
'views': [(False, 'list'), (False, 'form')],
'context': {'create': False},
'help': _(u"See all possible values")}
if subfield is None:
action['res_model'] = field.comodel_name
elif subfield in ('id', '.id'):
action['res_model'] = 'ir.model.data'
action['domain'] = [('model', '=', field.comodel_name)]
RelatedModel = self.env[field.comodel_name]
if subfield == '.id':
field_type = _(u"database id")
if isinstance(value, str) and not self._str_to_boolean(model, field, value)[0]:
return False, field_type, warnings
try: tentative_id = int(value)
except ValueError: tentative_id = value
try:
if RelatedModel.search([('id', '=', tentative_id)]):
id = tentative_id
except psycopg2.DataError:
# type error
raise self._format_import_error(
ValueError,
_(u"Invalid database id '%s' for the field '%%(field)s'"),
value,
{'moreinfo': action})
elif subfield == 'id':
field_type = _(u"external id")
if not self._str_to_boolean(model, field, value)[0]:
return False, field_type, warnings
if '.' in value:
xmlid = value
else:
xmlid = "%s.%s" % (self._context.get('_import_current_module', ''), value)
flush(xml_id=xmlid)
id = self._xmlid_to_record_id(xmlid, RelatedModel)
elif subfield is None:
field_type = _(u"name")
if value == '':
return False, field_type, warnings
flush(model=field.comodel_name)
ids = RelatedModel.name_search(name=value, operator='=')
if ids:
if len(ids) > 1:
warnings.append(ImportWarning(
_(u"Found multiple matches for value '%s' in field '%%(field)s' (%d matches)")
%(str(value).replace('%', '%%'), len(ids))))
id, _name = ids[0]
else:
name_create_enabled_fields = self.env.context.get('name_create_enabled_fields') or {}
if name_create_enabled_fields.get(field.name):
try:
id, _name = RelatedModel.name_create(name=value)
except (Exception, psycopg2.IntegrityError):
error_msg = _(u"Cannot create new '%s' records from their name alone. Please create those records manually and try importing again.", RelatedModel._description)
else:
raise self._format_import_error(
Exception,
_(u"Unknown sub-field '%s'"),
subfield
)
set_empty = False
skip_record = False
if self.env.context.get('import_file'):
import_set_empty_fields = self.env.context.get('import_set_empty_fields') or []
field_path = "/".join((self.env.context.get('parent_fields_hierarchy', []) + [field.name]))
set_empty = field_path in import_set_empty_fields
skip_record = field_path in self.env.context.get('import_skip_records', [])
if id is None and not set_empty and not skip_record:
if error_msg:
message = _("No matching record found for %(field_type)s '%(value)s' in field '%%(field)s' and the following error was encountered when we attempted to create one: %(error_message)s")
else:
message = _("No matching record found for %(field_type)s '%(value)s' in field '%%(field)s'")
error_info_dict = {'moreinfo': action}
if self.env.context.get('import_file'):
# limit to 50 char to avoid too long error messages.
value = value[:50] if isinstance(value, str) else value
error_info_dict.update({'value': value, 'field_type': field_type})
if error_msg:
error_info_dict['error_message'] = error_msg
raise self._format_import_error(
ValueError,
message,
{'field_type': field_type, 'value': value, 'error_message': error_msg},
error_info_dict)
return id, field_type, warnings
def _xmlid_to_record_id(self, xmlid, model):
""" Return the record id corresponding to the given external id,
provided that the record actually exists; otherwise return ``None``.
"""
import_cache = self.env.context.get('import_cache', {})
result = import_cache.get(xmlid)
if not result:
module, name = xmlid.split('.', 1)
query = """
SELECT d.model, d.res_id
FROM ir_model_data d
JOIN "{}" r ON d.res_id = r.id
WHERE d.module = %s AND d.name = %s
""".format(model._table)
self.env.cr.execute(query, [module, name])
result = self.env.cr.fetchone()
if result:
res_model, res_id = import_cache[xmlid] = result
if res_model != model._name:
MSG = "Invalid external ID %s: expected model %r, found %r"
raise ValueError(MSG % (xmlid, model._name, res_model))
return res_id
def _referencing_subfield(self, record):
""" Checks the record for the subfields allowing referencing (an
existing record in an other table), errors out if it finds potential
conflicts (multiple referencing subfields) or non-referencing subfields
returns the name of the correct subfield.
:param record:
:return: the record subfield to use for referencing and a list of warnings
:rtype: str, list
"""
# Can import by name_get, external id or database id
fieldset = set(record)
if fieldset - REFERENCING_FIELDS:
raise ValueError(
_(u"Can not create Many-To-One records indirectly, import the field separately"))
if len(fieldset) > 1:
raise ValueError(
_(u"Ambiguous specification for field '%(field)s', only provide one of name, external id or database id"))
# only one field left possible, unpack
[subfield] = fieldset
return subfield, []
@api.model
def _str_to_many2one(self, model, field, values):
# Should only be one record, unpack
[record] = values
subfield, w1 = self._referencing_subfield(record)
id, _, w2 = self.db_id_for(model, field, subfield, record[subfield])
return id, w1 + w2
@api.model
def _str_to_many2one_reference(self, model, field, value):
return self._str_to_integer(model, field, value)
@api.model
def _str_to_many2many(self, model, field, value):
[record] = value
subfield, warnings = self._referencing_subfield(record)
ids = []
for reference in record[subfield].split(','):
id, _, ws = self.db_id_for(model, field, subfield, reference)
ids.append(id)
warnings.extend(ws)
if field.name in self._context.get('import_set_empty_fields', []) and any([id is None for id in ids]):
ids = [id for id in ids if id]
elif field.name in self._context.get('import_skip_records', []) and any([id is None for id in ids]):
return None, warnings
if self._context.get('update_many2many'):
return [Command.link(id) for id in ids], warnings
else:
return [Command.set(ids)], warnings
@api.model
def _str_to_one2many(self, model, field, records):
name_create_enabled_fields = self._context.get('name_create_enabled_fields') or {}
prefix = field.name + '/'
relative_name_create_enabled_fields = {
k[len(prefix):]: v
for k, v in name_create_enabled_fields.items()
if k.startswith(prefix)
}
commands = []
warnings = []
if len(records) == 1 and exclude_ref_fields(records[0]) == {}:
# only one row with only ref field, field=ref1,ref2,ref3 as in
# m2o/m2m
record = records[0]
subfield, ws = self._referencing_subfield(record)
warnings.extend(ws)
# transform [{subfield:ref1,ref2,ref3}] into
# [{subfield:ref1},{subfield:ref2},{subfield:ref3}]
records = ({subfield:item} for item in record[subfield].split(','))
def log(f, exception):
if not isinstance(exception, Warning):
current_field_name = self.env[field.comodel_name]._fields[f].string
arg0 = exception.args[0] % {'field': '%(field)s/' + current_field_name}
exception.args = (arg0, *exception.args[1:])
raise exception
warnings.append(exception)
# Complete the field hierarchy path
# E.g. For "parent/child/subchild", field hierarchy path for "subchild" is ['parent', 'child']
parent_fields_hierarchy = self._context.get('parent_fields_hierarchy', []) + [field.name]
convert = self.with_context(
name_create_enabled_fields=relative_name_create_enabled_fields,
parent_fields_hierarchy=parent_fields_hierarchy
).for_model(self.env[field.comodel_name])
for record in records:
id = None
refs = only_ref_fields(record)
writable = convert(exclude_ref_fields(record), log)
if refs:
subfield, w1 = self._referencing_subfield(refs)
warnings.extend(w1)
try:
id, _, w2 = self.db_id_for(model, field, subfield, record[subfield])
warnings.extend(w2)
except ValueError:
if subfield != 'id':
raise
writable['id'] = record['id']
if id:
commands.append(Command.link(id))
commands.append(Command.update(id, writable))
else:
commands.append(Command.create(writable))
return commands, warnings
class O2MIdMapper(models.AbstractModel):
"""
Updates the base class to support setting xids directly in create by
providing an "id" key (otherwise stripped by create) during an import
(which should strip 'id' from the input data anyway)
"""
_inherit = 'base'
# sadly _load_records_create is only called for the toplevel record so we
# can't hook into that
@api.model_create_multi
@api.returns('self', lambda value: value.id)
def create(self, vals_list):
recs = super().create(vals_list)
import_module = self.env.context.get('_import_current_module')
if not import_module: # not an import -> bail
return recs
noupdate = self.env.context.get('noupdate', False)
xids = (v.get('id') for v in vals_list)
self.env['ir.model.data']._update_xmlids([
{
'xml_id': xid if '.' in xid else ('%s.%s' % (import_module, xid)),
'record': rec,
# note: this is not used when updating o2ms above...
'noupdate': noupdate,
}
for rec, xid in zip(recs, xids)
if xid and isinstance(xid, str)
])
return recs
| 43.348101 | 27,396 |
5,098 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
import re
from collections.abc import Iterable
from odoo import api, fields, models, _
from odoo.osv import expression
def sanitize_account_number(acc_number):
if acc_number:
return re.sub(r'\W+', '', acc_number).upper()
return False
class Bank(models.Model):
_description = 'Bank'
_name = 'res.bank'
_order = 'name'
name = fields.Char(required=True)
street = fields.Char()
street2 = fields.Char()
zip = fields.Char()
city = fields.Char()
state = fields.Many2one('res.country.state', 'Fed. State', domain="[('country_id', '=?', country)]")
country = fields.Many2one('res.country')
email = fields.Char()
phone = fields.Char()
active = fields.Boolean(default=True)
bic = fields.Char('Bank Identifier Code', index=True, help="Sometimes called BIC or Swift.")
def name_get(self):
result = []
for bank in self:
name = bank.name + (bank.bic and (' - ' + bank.bic) or '')
result.append((bank.id, name))
return result
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
domain = []
if name:
domain = ['|', ('bic', '=ilike', name + '%'), ('name', operator, name)]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = ['&'] + domain
return self._search(domain + args, limit=limit, access_rights_uid=name_get_uid)
@api.onchange('country')
def _onchange_country_id(self):
if self.country and self.country != self.state.country_id:
self.state = False
@api.onchange('state')
def _onchange_state(self):
if self.state.country_id:
self.country = self.state.country_id
class ResPartnerBank(models.Model):
_name = 'res.partner.bank'
_rec_name = 'acc_number'
_description = 'Bank Accounts'
_order = 'sequence, id'
@api.model
def get_supported_account_types(self):
return self._get_supported_account_types()
@api.model
def _get_supported_account_types(self):
return [('bank', _('Normal'))]
active = fields.Boolean(default=True)
acc_type = fields.Selection(selection=lambda x: x.env['res.partner.bank'].get_supported_account_types(), compute='_compute_acc_type', string='Type', help='Bank account type: Normal or IBAN. Inferred from the bank account number.')
acc_number = fields.Char('Account Number', required=True)
sanitized_acc_number = fields.Char(compute='_compute_sanitized_acc_number', string='Sanitized Account Number', readonly=True, store=True)
acc_holder_name = fields.Char(string='Account Holder Name', help="Account holder name, in case it is different than the name of the Account Holder")
partner_id = fields.Many2one('res.partner', 'Account Holder', ondelete='cascade', index=True, domain=['|', ('is_company', '=', True), ('parent_id', '=', False)], required=True)
bank_id = fields.Many2one('res.bank', string='Bank')
bank_name = fields.Char(related='bank_id.name', readonly=False)
bank_bic = fields.Char(related='bank_id.bic', readonly=False)
sequence = fields.Integer(default=10)
currency_id = fields.Many2one('res.currency', string='Currency')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.company, ondelete='cascade', readonly=True)
_sql_constraints = [
('unique_number', 'unique(sanitized_acc_number, company_id)', 'Account Number must be unique'),
]
@api.depends('acc_number')
def _compute_sanitized_acc_number(self):
for bank in self:
bank.sanitized_acc_number = sanitize_account_number(bank.acc_number)
@api.depends('acc_number')
def _compute_acc_type(self):
for bank in self:
bank.acc_type = self.retrieve_acc_type(bank.acc_number)
@api.model
def retrieve_acc_type(self, acc_number):
""" To be overridden by subclasses in order to support other account_types.
"""
return 'bank'
def name_get(self):
return [(acc.id, '{} - {}'.format(acc.acc_number, acc.bank_id.name) if acc.bank_id else acc.acc_number)
for acc in self]
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
pos = 0
while pos < len(args):
# DLE P14
if args[pos][0] == 'acc_number':
op = args[pos][1]
value = args[pos][2]
if not isinstance(value, str) and isinstance(value, Iterable):
value = [sanitize_account_number(i) for i in value]
else:
value = sanitize_account_number(value)
if 'like' in op:
value = '%' + value + '%'
args[pos] = ('sanitized_acc_number', op, value)
pos += 1
return super(ResPartnerBank, self)._search(args, offset, limit, order, count=count, access_rights_uid=access_rights_uid)
| 39.828125 | 5,098 |
7,337 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
from odoo.tools.safe_eval import safe_eval, datetime
class IrFilters(models.Model):
_name = 'ir.filters'
_description = 'Filters'
_order = 'model_id, name, id desc'
name = fields.Char(string='Filter Name', translate=True, required=True)
user_id = fields.Many2one('res.users', string='User', ondelete='cascade',
help="The user this filter is private to. When left empty the filter is public "
"and available to all users.")
domain = fields.Text(default='[]', required=True)
context = fields.Text(default='{}', required=True)
sort = fields.Text(default='[]', required=True)
model_id = fields.Selection(selection='_list_all_models', string='Model', required=True)
is_default = fields.Boolean(string='Default Filter')
action_id = fields.Many2one('ir.actions.actions', string='Action', ondelete='cascade',
help="The menu action this filter applies to. "
"When left empty the filter applies to all menus "
"for this model.")
active = fields.Boolean(default=True)
@api.model
def _list_all_models(self):
self._cr.execute("SELECT model, name FROM ir_model ORDER BY name")
return self._cr.fetchall()
def copy(self, default=None):
self.ensure_one()
default = dict(default or {}, name=_('%s (copy)', self.name))
return super(IrFilters, self).copy(default)
def _get_eval_domain(self):
self.ensure_one()
return safe_eval(self.domain, {
'datetime': datetime,
'context_today': datetime.datetime.now,
})
@api.model
def _get_action_domain(self, action_id=None):
"""Return a domain component for matching filters that are visible in the
same context (menu/view) as the given action."""
if action_id:
# filters specific to this menu + global ones
return [('action_id', 'in', [action_id, False])]
# only global ones
return [('action_id', '=', False)]
@api.model
def get_filters(self, model, action_id=None):
"""Obtain the list of filters available for the user on the given model.
:param action_id: optional ID of action to restrict filters to this action
plus global filters. If missing only global filters are returned.
The action does not have to correspond to the model, it may only be
a contextual action.
:return: list of :meth:`~osv.read`-like dicts containing the
``name``, ``is_default``, ``domain``, ``user_id`` (m2o tuple),
``action_id`` (m2o tuple) and ``context`` of the matching ``ir.filters``.
"""
# available filters: private filters (user_id=uid) and public filters (uid=NULL),
# and filters for the action (action_id=action_id) or global (action_id=NULL)
action_domain = self._get_action_domain(action_id)
filters = self.search(action_domain + [('model_id', '=', model), ('user_id', 'in', [self._uid, False])])
user_context = self.env['res.users'].context_get()
return filters.with_context(user_context).read(['name', 'is_default', 'domain', 'context', 'user_id', 'sort'])
@api.model
def _check_global_default(self, vals, matching_filters):
""" _check_global_default(dict, list(dict), dict) -> None
Checks if there is a global default for the model_id requested.
If there is, and the default is different than the record being written
(-> we're not updating the current global default), raise an error
to avoid users unknowingly overwriting existing global defaults (they
have to explicitly remove the current default before setting a new one)
This method should only be called if ``vals`` is trying to set
``is_default``
:raises odoo.exceptions.UserError: if there is an existing default and
we're not updating it
"""
domain = self._get_action_domain(vals.get('action_id'))
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', False),
('is_default', '=', True),
])
if not defaults:
return
if matching_filters and (matching_filters[0]['id'] == defaults.id):
return
raise UserError(_("There is already a shared filter set as default for %(model)s, delete or change it before setting a new default") % {'model': vals.get('model_id')})
@api.model
@api.returns('self', lambda value: value.id)
def create_or_replace(self, vals):
action_id = vals.get('action_id')
current_filters = self.get_filters(vals['model_id'], action_id)
matching_filters = [f for f in current_filters
if f['name'].lower() == vals['name'].lower()
# next line looks for matching user_ids (specific or global), i.e.
# f.user_id is False and vals.user_id is False or missing,
# or f.user_id.id == vals.user_id
if (f['user_id'] and f['user_id'][0]) == vals.get('user_id')]
if vals.get('is_default'):
if vals.get('user_id'):
# Setting new default: any other default that belongs to the user
# should be turned off
domain = self._get_action_domain(action_id)
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', vals['user_id']),
('is_default', '=', True),
])
if defaults:
defaults.write({'is_default': False})
else:
self._check_global_default(vals, matching_filters)
# When a filter exists for the same (name, model, user) triple, we simply
# replace its definition (considering action_id irrelevant here)
if matching_filters:
matching_filter = self.browse(matching_filters[0]['id'])
matching_filter.write(vals)
return matching_filter
return self.create(vals)
_sql_constraints = [
# Partial constraint, complemented by unique index (see below). Still
# useful to keep because it provides a proper error message when a
# violation occurs, as it shares the same prefix as the unique index.
('name_model_uid_unique', 'unique (name, model_id, user_id, action_id)', 'Filter names must be unique'),
]
def _auto_init(self):
result = super(IrFilters, self)._auto_init()
# Use unique index to implement unique constraint on the lowercase name (not possible using a constraint)
tools.create_unique_index(self._cr, 'ir_filters_name_model_uid_unique_action_index',
self._table, ['lower(name)', 'model_id', 'COALESCE(user_id,-1)', 'COALESCE(action_id,-1)'])
return result
| 47.642857 | 7,337 |
19,229 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
import logging
import pytz
from psycopg2 import sql, OperationalError, errorcodes
from odoo import api, fields, models, _
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
def _create_sequence(cr, seq_name, number_increment, number_next):
""" Create a PostreSQL sequence. """
if number_increment == 0:
raise UserError(_('Step must not be zero.'))
sql = "CREATE SEQUENCE %s INCREMENT BY %%s START WITH %%s" % seq_name
cr.execute(sql, (number_increment, number_next))
def _drop_sequences(cr, seq_names):
""" Drop the PostreSQL sequences if they exist. """
names = sql.SQL(',').join(map(sql.Identifier, seq_names))
# RESTRICT is the default; it prevents dropping the sequence if an
# object depends on it.
cr.execute(sql.SQL("DROP SEQUENCE IF EXISTS {} RESTRICT").format(names))
def _alter_sequence(cr, seq_name, number_increment=None, number_next=None):
""" Alter a PostreSQL sequence. """
if number_increment == 0:
raise UserError(_("Step must not be zero."))
cr.execute("SELECT relname FROM pg_class WHERE relkind=%s AND relname=%s", ('S', seq_name))
if not cr.fetchone():
# sequence is not created yet, we're inside create() so ignore it, will be set later
return
statement = sql.SQL("ALTER SEQUENCE") + sql.Identifier(seq_name)
params = []
if number_increment is not None:
statement += sql.SQL("INCREMENT BY") + sql.Placeholder()
params.append(number_increment)
if number_next is not None:
statement += sql.SQL("RESTART WITH") + sql.Placeholder()
params.append(number_next)
cr.execute(statement.join(' '), params)
def _select_nextval(cr, seq_name):
cr.execute("SELECT nextval(%s)", [seq_name])
return cr.fetchone()
def _update_nogap(self, number_increment):
number_next = self.number_next
self._cr.execute("SELECT number_next FROM %s WHERE id=%%s FOR UPDATE NOWAIT" % self._table, [self.id])
self._cr.execute("UPDATE %s SET number_next=number_next+%%s WHERE id=%%s " % self._table, (number_increment, self.id))
self.invalidate_cache(['number_next'], [self.id])
return number_next
def _predict_nextval(self, seq_id):
"""Predict next value for PostgreSQL sequence without consuming it"""
# Cannot use currval() as it requires prior call to nextval()
seqname = 'ir_sequence_%s' % seq_id
seqtable = sql.Identifier(seqname)
query = sql.SQL("""SELECT last_value,
(SELECT increment_by
FROM pg_sequences
WHERE sequencename = %s),
is_called
FROM {}""")
params = [seqname]
if self.env.cr._cnx.server_version < 100000:
query = sql.SQL("SELECT last_value, increment_by, is_called FROM {}")
params = []
self.env.cr.execute(query.format(seqtable), params)
(last_value, increment_by, is_called) = self.env.cr.fetchone()
if is_called:
return last_value + increment_by
# sequence has just been RESTARTed to return last_value next time
return last_value
class IrSequence(models.Model):
""" Sequence model.
The sequence model allows to define and use so-called sequence objects.
Such objects are used to generate unique identifiers in a transaction-safe
way.
"""
_name = 'ir.sequence'
_description = 'Sequence'
_order = 'name'
def _get_number_next_actual(self):
'''Return number from ir_sequence row when no_gap implementation,
and number from postgres sequence when standard implementation.'''
for seq in self:
if not seq.id:
seq.number_next_actual = 0
elif seq.implementation != 'standard':
seq.number_next_actual = seq.number_next
else:
seq_id = "%03d" % seq.id
seq.number_next_actual = _predict_nextval(self, seq_id)
def _set_number_next_actual(self):
for seq in self:
seq.write({'number_next': seq.number_next_actual or 1})
@api.model
def _get_current_sequence(self, sequence_date=None):
'''Returns the object on which we can find the number_next to consider for the sequence.
It could be an ir.sequence or an ir.sequence.date_range depending if use_date_range is checked
or not. This function will also create the ir.sequence.date_range if none exists yet for today
'''
if not self.use_date_range:
return self
sequence_date = sequence_date or fields.Date.today()
seq_date = self.env['ir.sequence.date_range'].search(
[('sequence_id', '=', self.id), ('date_from', '<=', sequence_date), ('date_to', '>=', sequence_date)], limit=1)
if seq_date:
return seq_date[0]
#no date_range sequence was found, we create a new one
return self._create_date_range_seq(sequence_date)
name = fields.Char(required=True)
code = fields.Char(string='Sequence Code')
implementation = fields.Selection([('standard', 'Standard'), ('no_gap', 'No gap')],
string='Implementation', required=True, default='standard',
help="While assigning a sequence number to a record, the 'no gap' sequence implementation ensures that each previous sequence number has been assigned already. "
"While this sequence implementation will not skip any sequence number upon assignment, there can still be gaps in the sequence if records are deleted. "
"The 'no gap' implementation is slower than the standard one.")
active = fields.Boolean(default=True)
prefix = fields.Char(help="Prefix value of the record for the sequence", trim=False)
suffix = fields.Char(help="Suffix value of the record for the sequence", trim=False)
number_next = fields.Integer(string='Next Number', required=True, default=1, help="Next number of this sequence")
number_next_actual = fields.Integer(compute='_get_number_next_actual', inverse='_set_number_next_actual',
string='Actual Next Number',
help="Next number that will be used. This number can be incremented "
"frequently so the displayed value might already be obsolete")
number_increment = fields.Integer(string='Step', required=True, default=1,
help="The next number of the sequence will be incremented by this number")
padding = fields.Integer(string='Sequence Size', required=True, default=0,
help="Odoo will automatically adds some '0' on the left of the "
"'Next Number' to get the required padding size.")
company_id = fields.Many2one('res.company', string='Company',
default=lambda s: s.env.company)
use_date_range = fields.Boolean(string='Use subsequences per date_range')
date_range_ids = fields.One2many('ir.sequence.date_range', 'sequence_id', string='Subsequences')
@api.model
def create(self, values):
""" Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used.
"""
seq = super(IrSequence, self).create(values)
if values.get('implementation', 'standard') == 'standard':
_create_sequence(self._cr, "ir_sequence_%03d" % seq.id, values.get('number_increment', 1), values.get('number_next', 1))
return seq
def unlink(self):
_drop_sequences(self._cr, ["ir_sequence_%03d" % x.id for x in self])
return super(IrSequence, self).unlink()
def write(self, values):
new_implementation = values.get('implementation')
for seq in self:
# 4 cases: we test the previous impl. against the new one.
i = values.get('number_increment', seq.number_increment)
n = values.get('number_next', seq.number_next)
if seq.implementation == 'standard':
if new_implementation in ('standard', None):
# Implementation has NOT changed.
# Only change sequence if really requested.
if values.get('number_next'):
_alter_sequence(self._cr, "ir_sequence_%03d" % seq.id, number_next=n)
if seq.number_increment != i:
_alter_sequence(self._cr, "ir_sequence_%03d" % seq.id, number_increment=i)
seq.date_range_ids._alter_sequence(number_increment=i)
else:
_drop_sequences(self._cr, ["ir_sequence_%03d" % seq.id])
for sub_seq in seq.date_range_ids:
_drop_sequences(self._cr, ["ir_sequence_%03d_%03d" % (seq.id, sub_seq.id)])
else:
if new_implementation in ('no_gap', None):
pass
else:
_create_sequence(self._cr, "ir_sequence_%03d" % seq.id, i, n)
for sub_seq in seq.date_range_ids:
_create_sequence(self._cr, "ir_sequence_%03d_%03d" % (seq.id, sub_seq.id), i, n)
res = super(IrSequence, self).write(values)
# DLE P179
self.flush(values.keys())
return res
def _next_do(self):
if self.implementation == 'standard':
number_next = _select_nextval(self._cr, 'ir_sequence_%03d' % self.id)
else:
number_next = _update_nogap(self, self.number_increment)
return self.get_next_char(number_next)
def _get_prefix_suffix(self, date=None, date_range=None):
def _interpolate(s, d):
return (s % d) if s else ''
def _interpolation_dict():
now = range_date = effective_date = datetime.now(pytz.timezone(self._context.get('tz') or 'UTC'))
if date or self._context.get('ir_sequence_date'):
effective_date = fields.Datetime.from_string(date or self._context.get('ir_sequence_date'))
if date_range or self._context.get('ir_sequence_date_range'):
range_date = fields.Datetime.from_string(date_range or self._context.get('ir_sequence_date_range'))
sequences = {
'year': '%Y', 'month': '%m', 'day': '%d', 'y': '%y', 'doy': '%j', 'woy': '%W',
'weekday': '%w', 'h24': '%H', 'h12': '%I', 'min': '%M', 'sec': '%S'
}
res = {}
for key, format in sequences.items():
res[key] = effective_date.strftime(format)
res['range_' + key] = range_date.strftime(format)
res['current_' + key] = now.strftime(format)
return res
self.ensure_one()
d = _interpolation_dict()
try:
interpolated_prefix = _interpolate(self.prefix, d)
interpolated_suffix = _interpolate(self.suffix, d)
except ValueError:
raise UserError(_('Invalid prefix or suffix for sequence \'%s\'') % self.name)
return interpolated_prefix, interpolated_suffix
def get_next_char(self, number_next):
interpolated_prefix, interpolated_suffix = self._get_prefix_suffix()
return interpolated_prefix + '%%0%sd' % self.padding % number_next + interpolated_suffix
def _create_date_range_seq(self, date):
year = fields.Date.from_string(date).strftime('%Y')
date_from = '{}-01-01'.format(year)
date_to = '{}-12-31'.format(year)
date_range = self.env['ir.sequence.date_range'].search([('sequence_id', '=', self.id), ('date_from', '>=', date), ('date_from', '<=', date_to)], order='date_from desc', limit=1)
if date_range:
date_to = date_range.date_from + timedelta(days=-1)
date_range = self.env['ir.sequence.date_range'].search([('sequence_id', '=', self.id), ('date_to', '>=', date_from), ('date_to', '<=', date)], order='date_to desc', limit=1)
if date_range:
date_from = date_range.date_to + timedelta(days=1)
seq_date_range = self.env['ir.sequence.date_range'].sudo().create({
'date_from': date_from,
'date_to': date_to,
'sequence_id': self.id,
})
return seq_date_range
def _next(self, sequence_date=None):
""" Returns the next number in the preferred sequence in all the ones given in self."""
if not self.use_date_range:
return self._next_do()
# date mode
dt = sequence_date or self._context.get('ir_sequence_date', fields.Date.today())
seq_date = self.env['ir.sequence.date_range'].search([('sequence_id', '=', self.id), ('date_from', '<=', dt), ('date_to', '>=', dt)], limit=1)
if not seq_date:
seq_date = self._create_date_range_seq(dt)
return seq_date.with_context(ir_sequence_date_range=seq_date.date_from)._next()
def next_by_id(self, sequence_date=None):
""" Draw an interpolated string using the specified sequence."""
self.check_access_rights('read')
return self._next(sequence_date=sequence_date)
@api.model
def next_by_code(self, sequence_code, sequence_date=None):
""" Draw an interpolated string using a sequence with the requested code.
If several sequences with the correct code are available to the user
(multi-company cases), the one from the user's current company will
be used.
"""
self.check_access_rights('read')
company_id = self.env.company.id
seq_ids = self.search([('code', '=', sequence_code), ('company_id', 'in', [company_id, False])], order='company_id')
if not seq_ids:
_logger.debug("No ir.sequence has been found for code '%s'. Please make sure a sequence is set for current company." % sequence_code)
return False
seq_id = seq_ids[0]
return seq_id._next(sequence_date=sequence_date)
@api.model
def get_id(self, sequence_code_or_id, code_or_id='id'):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by the ``sequence_code_or_id``
argument, which can be a code or an id (as controlled by the
``code_or_id`` argument. This method is deprecated.
"""
_logger.warning("ir_sequence.get() and ir_sequence.get_id() are deprecated. "
"Please use ir_sequence.next_by_code() or ir_sequence.next_by_id().")
if code_or_id == 'id':
return self.browse(sequence_code_or_id).next_by_id()
else:
return self.next_by_code(sequence_code_or_id)
@api.model
def get(self, code):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by its code. This method is
deprecated.
"""
return self.get_id(code, 'code')
class IrSequenceDateRange(models.Model):
_name = 'ir.sequence.date_range'
_description = 'Sequence Date Range'
_rec_name = "sequence_id"
def _get_number_next_actual(self):
'''Return number from ir_sequence row when no_gap implementation,
and number from postgres sequence when standard implementation.'''
for seq in self:
if seq.sequence_id.implementation != 'standard':
seq.number_next_actual = seq.number_next
else:
seq_id = "%03d_%03d" % (seq.sequence_id.id, seq.id)
seq.number_next_actual = _predict_nextval(self, seq_id)
def _set_number_next_actual(self):
for seq in self:
seq.write({'number_next': seq.number_next_actual or 1})
@api.model
def default_get(self, fields):
result = super(IrSequenceDateRange, self).default_get(fields)
result['number_next_actual'] = 1
return result
date_from = fields.Date(string='From', required=True)
date_to = fields.Date(string='To', required=True)
sequence_id = fields.Many2one("ir.sequence", string='Main Sequence', required=True, ondelete='cascade')
number_next = fields.Integer(string='Next Number', required=True, default=1, help="Next number of this sequence")
number_next_actual = fields.Integer(compute='_get_number_next_actual', inverse='_set_number_next_actual',
string='Actual Next Number',
help="Next number that will be used. This number can be incremented "
"frequently so the displayed value might already be obsolete")
def _next(self):
if self.sequence_id.implementation == 'standard':
number_next = _select_nextval(self._cr, 'ir_sequence_%03d_%03d' % (self.sequence_id.id, self.id))
else:
number_next = _update_nogap(self, self.sequence_id.number_increment)
return self.sequence_id.get_next_char(number_next)
def _alter_sequence(self, number_increment=None, number_next=None):
for seq in self:
_alter_sequence(self._cr, "ir_sequence_%03d_%03d" % (seq.sequence_id.id, seq.id), number_increment=number_increment, number_next=number_next)
@api.model
def create(self, values):
""" Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used.
"""
seq = super(IrSequenceDateRange, self).create(values)
main_seq = seq.sequence_id
if main_seq.implementation == 'standard':
_create_sequence(self._cr, "ir_sequence_%03d_%03d" % (main_seq.id, seq.id), main_seq.number_increment, values.get('number_next_actual', 1))
return seq
def unlink(self):
_drop_sequences(self._cr, ["ir_sequence_%03d_%03d" % (x.sequence_id.id, x.id) for x in self])
return super(IrSequenceDateRange, self).unlink()
def write(self, values):
if values.get('number_next'):
seq_to_alter = self.filtered(lambda seq: seq.sequence_id.implementation == 'standard')
seq_to_alter._alter_sequence(number_next=values.get('number_next'))
# DLE P179: `test_in_invoice_line_onchange_sequence_number_1`
# _update_nogap do a select to get the next sequence number_next
# When changing (writing) the number next of a sequence, the number next must be flushed before doing the select.
# Normally in such a case, we flush just above the execute, but for the sake of performance
# I believe this is better to flush directly in the write:
# - Changing the number next of a sequence is really really rare,
# - But selecting the number next happens a lot,
# Therefore, if I chose to put the flush just above the select, it would check the flush most of the time for no reason.
res = super(IrSequenceDateRange, self).write(values)
self.flush(values.keys())
return res
| 49.687339 | 19,229 |
682 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class IrExports(models.Model):
_name = "ir.exports"
_description = 'Exports'
_order = 'name'
name = fields.Char(string='Export Name')
resource = fields.Char(index=True)
export_fields = fields.One2many('ir.exports.line', 'export_id', string='Export ID', copy=True)
class IrExportsLine(models.Model):
_name = 'ir.exports.line'
_description = 'Exports Line'
_order = 'id'
name = fields.Char(string='Field Name')
export_id = fields.Many2one('ir.exports', string='Export', index=True, ondelete='cascade')
| 29.652174 | 682 |
2,377 | py | PYTHON | 15.0 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class IrLogging(models.Model):
_name = 'ir.logging'
_description = 'Logging'
_order = 'id DESC'
# The _log_access fields are defined manually for the following reasons:
#
# - The entries in ir_logging are filled in with sql queries bypassing the orm. As the --log-db
# cli option allows to insert ir_logging entries into a remote database, the one2many *_uid
# fields make no sense in the first place but we will keep it for backward compatibility.
#
# - Also, when an ir_logging entry is triggered by the orm (when using --log-db) at the moment
# it is making changes to the res.users model, the ALTER TABLE will aquire an exclusive lock
# on res_users, preventing the ir_logging INSERT to be processed, hence the ongoing module
# install/update will hang forever as the orm is blocked by the ir_logging query that will
# never occur.
create_uid = fields.Integer(string='Created by', readonly=True)
create_date = fields.Datetime(string='Created on', readonly=True)
write_uid = fields.Integer(string='Last Updated by', readonly=True)
write_date = fields.Datetime(string='Last Updated on', readonly=True)
name = fields.Char(required=True)
type = fields.Selection([('client', 'Client'), ('server', 'Server')], required=True, index=True)
dbname = fields.Char(string='Database Name', index=True)
level = fields.Char(index=True)
message = fields.Text(required=True)
path = fields.Char(required=True)
func = fields.Char(string='Function', required=True)
line = fields.Char(required=True)
def init(self):
super(IrLogging, self).init()
self._cr.execute("select 1 from information_schema.constraint_column_usage where table_name = 'ir_logging' and constraint_name = 'ir_logging_write_uid_fkey'")
if self._cr.rowcount:
# DROP CONSTRAINT unconditionally takes an ACCESS EXCLUSIVE lock
# on the table, even "IF EXISTS" is set and not matching; disabling
# the relevant trigger instead acquires SHARE ROW EXCLUSIVE, which
# still conflicts with the ROW EXCLUSIVE needed for an insert
self._cr.execute("ALTER TABLE ir_logging DROP CONSTRAINT ir_logging_write_uid_fkey")
| 55.27907 | 2,377 |
12,434 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import operator
import re
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError
from odoo.http import request
from odoo.modules import get_module_resource
from odoo.osv import expression
MENU_ITEM_SEPARATOR = "/"
NUMBER_PARENS = re.compile(r"\(([0-9]+)\)")
class IrUiMenu(models.Model):
_name = 'ir.ui.menu'
_description = 'Menu'
_order = "sequence,id"
_parent_store = True
def __init__(self, *args, **kwargs):
super(IrUiMenu, self).__init__(*args, **kwargs)
self.pool['ir.model.access'].register_cache_clearing_method(self._name, 'clear_caches')
name = fields.Char(string='Menu', required=True, translate=True)
active = fields.Boolean(default=True)
sequence = fields.Integer(default=10)
child_id = fields.One2many('ir.ui.menu', 'parent_id', string='Child IDs')
parent_id = fields.Many2one('ir.ui.menu', string='Parent Menu', index=True, ondelete="restrict")
parent_path = fields.Char(index=True)
groups_id = fields.Many2many('res.groups', 'ir_ui_menu_group_rel',
'menu_id', 'gid', string='Groups',
help="If you have groups, the visibility of this menu will be based on these groups. "\
"If this field is empty, Odoo will compute visibility based on the related object's read access.")
complete_name = fields.Char(string='Full Path', compute='_compute_complete_name', recursive=True)
web_icon = fields.Char(string='Web Icon File')
action = fields.Reference(selection=[('ir.actions.report', 'ir.actions.report'),
('ir.actions.act_window', 'ir.actions.act_window'),
('ir.actions.act_url', 'ir.actions.act_url'),
('ir.actions.server', 'ir.actions.server'),
('ir.actions.client', 'ir.actions.client')])
web_icon_data = fields.Binary(string='Web Icon Image', attachment=True)
@api.depends('name', 'parent_id.complete_name')
def _compute_complete_name(self):
for menu in self:
menu.complete_name = menu._get_full_name()
def _get_full_name(self, level=6):
""" Return the full name of ``self`` (up to a certain level). """
if level <= 0:
return '...'
if self.parent_id:
return self.parent_id._get_full_name(level - 1) + MENU_ITEM_SEPARATOR + (self.name or "")
else:
return self.name
def read_image(self, path):
if not path:
return False
path_info = path.split(',')
icon_path = get_module_resource(path_info[0], path_info[1])
icon_image = False
if icon_path:
with tools.file_open(icon_path, 'rb') as icon_file:
icon_image = base64.encodebytes(icon_file.read())
return icon_image
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('Error! You cannot create recursive menus.'))
@api.model
@tools.ormcache('frozenset(self.env.user.groups_id.ids)', 'debug')
def _visible_menu_ids(self, debug=False):
""" Return the ids of the menu items visible to the user. """
# retrieve all menus, and determine which ones are visible
context = {'ir.ui.menu.full_list': True}
menus = self.with_context(context).search([]).sudo()
groups = self.env.user.groups_id
if not debug:
groups = groups - self.env.ref('base.group_no_one')
# first discard all menus with groups the user does not have
menus = menus.filtered(
lambda menu: not menu.groups_id or menu.groups_id & groups)
# take apart menus that have an action
action_menus = menus.filtered(lambda m: m.action and m.action.exists())
folder_menus = menus - action_menus
visible = self.browse()
# process action menus, check whether their action is allowed
access = self.env['ir.model.access']
MODEL_GETTER = {
'ir.actions.act_window': lambda action: action.res_model,
'ir.actions.report': lambda action: action.model,
'ir.actions.server': lambda action: action.model_id.model,
}
for menu in action_menus:
get_model = MODEL_GETTER.get(menu.action._name)
if not get_model or not get_model(menu.action) or \
access.check(get_model(menu.action), 'read', False):
# make menu visible, and its folder ancestors, too
visible += menu
menu = menu.parent_id
while menu and menu in folder_menus and menu not in visible:
visible += menu
menu = menu.parent_id
return set(visible.ids)
@api.returns('self')
def _filter_visible_menus(self):
""" Filter `self` to only keep the menu items that should be visible in
the menu hierarchy of the current user.
Uses a cache for speeding up the computation.
"""
visible_ids = self._visible_menu_ids(request.session.debug if request else False)
return self.filtered(lambda menu: menu.id in visible_ids)
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
menu_ids = super(IrUiMenu, self)._search(args, offset=0, limit=None, order=order, count=False, access_rights_uid=access_rights_uid)
menus = self.browse(menu_ids)
if menus:
# menu filtering is done only on main menu tree, not other menu lists
if not self._context.get('ir.ui.menu.full_list'):
menus = menus._filter_visible_menus()
if offset:
menus = menus[offset:]
if limit:
menus = menus[:limit]
return len(menus) if count else menus.ids
def name_get(self):
return [(menu.id, menu._get_full_name()) for menu in self]
@api.model_create_multi
def create(self, vals_list):
self.clear_caches()
for values in vals_list:
if 'web_icon' in values:
values['web_icon_data'] = self._compute_web_icon_data(values.get('web_icon'))
return super(IrUiMenu, self).create(vals_list)
def write(self, values):
self.clear_caches()
if 'web_icon' in values:
values['web_icon_data'] = self._compute_web_icon_data(values.get('web_icon'))
return super(IrUiMenu, self).write(values)
def _compute_web_icon_data(self, web_icon):
""" Returns the image associated to `web_icon`.
`web_icon` can either be:
- an image icon [module, path]
- a built icon [icon_class, icon_color, background_color]
and it only has to call `read_image` if it's an image.
"""
if web_icon and len(web_icon.split(',')) == 2:
return self.read_image(web_icon)
def unlink(self):
# Detach children and promote them to top-level, because it would be unwise to
# cascade-delete submenus blindly. We also can't use ondelete=set null because
# that is not supported when _parent_store is used (would silently corrupt it).
# TODO: ideally we should move them under a generic "Orphans" menu somewhere?
extra = {'ir.ui.menu.full_list': True,
'active_test': False}
direct_children = self.with_context(**extra).search([('parent_id', 'in', self.ids)])
direct_children.write({'parent_id': False})
self.clear_caches()
return super(IrUiMenu, self).unlink()
def copy(self, default=None):
record = super(IrUiMenu, self).copy(default=default)
match = NUMBER_PARENS.search(record.name)
if match:
next_num = int(match.group(1)) + 1
record.name = NUMBER_PARENS.sub('(%d)' % next_num, record.name)
else:
record.name = record.name + '(1)'
return record
@api.model
@api.returns('self')
def get_user_roots(self):
""" Return all root menu ids visible for the user.
:return: the root menu ids
:rtype: list(int)
"""
return self.search([('parent_id', '=', False)])
def _load_menus_blacklist(self):
return []
@api.model
@tools.ormcache_context('self._uid', keys=('lang',))
def load_menus_root(self):
fields = ['name', 'sequence', 'parent_id', 'action', 'web_icon_data']
menu_roots = self.get_user_roots()
menu_roots_data = menu_roots.read(fields) if menu_roots else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots_data,
'all_menu_ids': menu_roots.ids,
}
xmlids = menu_roots._get_menuitems_xmlids()
for menu in menu_roots_data:
menu['xmlid'] = xmlids.get(menu['id'], '')
return menu_root
@api.model
@tools.ormcache_context('self._uid', 'debug', keys=('lang',))
def load_menus(self, debug):
""" Loads all menu items (all applications and their sub-menus).
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
fields = ['name', 'sequence', 'parent_id', 'action', 'web_icon', 'web_icon_data']
menu_roots = self.get_user_roots()
menu_roots_data = menu_roots.read(fields) if menu_roots else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': [menu['id'] for menu in menu_roots_data],
}
all_menus = {'root': menu_root}
if not menu_roots_data:
return all_menus
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menus_domain = [('id', 'child_of', menu_roots.ids)]
blacklisted_menu_ids = self._load_menus_blacklist()
if blacklisted_menu_ids:
menus_domain = expression.AND([menus_domain, [('id', 'not in', blacklisted_menu_ids)]])
menus = self.search(menus_domain)
menu_items = menus.read(fields)
xmlids = (menu_roots + menus)._get_menuitems_xmlids()
# add roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots_data)
# set children ids and xmlids
menu_items_map = {menu_item["id"]: menu_item for menu_item in menu_items}
for menu_item in menu_items:
menu_item.setdefault('children', [])
parent = menu_item['parent_id'] and menu_item['parent_id'][0]
menu_item['xmlid'] = xmlids.get(menu_item['id'], "")
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item['id'])
all_menus.update(menu_items_map)
# sort by sequence
for menu_id in all_menus:
all_menus[menu_id]['children'].sort(key=lambda id: all_menus[id]['sequence'])
# recursively set app ids to related children
def _set_app_id(app_id, menu):
menu['app_id'] = app_id
for child_id in menu['children']:
_set_app_id(app_id, all_menus[child_id])
for app in menu_roots_data:
app_id = app['id']
_set_app_id(app_id, all_menus[app_id])
# filter out menus not related to an app (+ keep root menu)
all_menus = {menu['id']: menu for menu in all_menus.values() if menu.get('app_id')}
all_menus['root'] = menu_root
return all_menus
def _get_menuitems_xmlids(self):
menuitems = self.env['ir.model.data'].sudo().search([
('res_id', 'in', self.ids),
('model', '=', 'ir.ui.menu')
])
return {
menu.res_id: menu.complete_name
for menu in menuitems
}
| 41.036304 | 12,434 |
3,526 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from base64 import b64encode
from hashlib import sha512
from odoo import models, fields, api
from odoo.tools import html_escape, file_open
def get_hsl_from_seed(seed):
hashed_seed = sha512(seed.encode()).hexdigest()
# full range of colors, in degree
hue = int(hashed_seed[0:2], 16) * 360 / 255
# colorful result but not too flashy, in percent
sat = int(hashed_seed[2:4], 16) * ((70 - 40) / 255) + 40
# not too bright and not too dark, in percent
lig = 45
return f'hsl({hue:.0f}, {sat:.0f}%, {lig:.0f}%)'
class AvatarMixin(models.AbstractModel):
_name = 'avatar.mixin'
_inherit = ['image.mixin']
_description = "Avatar Mixin"
_avatar_name_field = "name"
# all image fields are base64 encoded and PIL-supported
avatar_1920 = fields.Image("Avatar", max_width=1920, max_height=1920, compute="_compute_avatar_1920")
avatar_1024 = fields.Image("Avatar 1024", max_width=1024, max_height=1024, compute="_compute_avatar_1024")
avatar_512 = fields.Image("Avatar 512", max_width=512, max_height=512, compute="_compute_avatar_512")
avatar_256 = fields.Image("Avatar 256", max_width=256, max_height=256, compute="_compute_avatar_256")
avatar_128 = fields.Image("Avatar 128", max_width=128, max_height=128, compute="_compute_avatar_128")
def _compute_avatar(self, avatar_field, image_field):
for record in self:
avatar = record[image_field]
if not avatar:
if record.id and record[record._avatar_name_field]:
avatar = record._avatar_generate_svg()
else:
avatar = record._avatar_get_placeholder()
record[avatar_field] = avatar
@api.depends(lambda self: [self._avatar_name_field, 'image_1920'])
def _compute_avatar_1920(self):
self._compute_avatar('avatar_1920', 'image_1920')
@api.depends(lambda self: [self._avatar_name_field, 'image_1024'])
def _compute_avatar_1024(self):
self._compute_avatar('avatar_1024', 'image_1024')
@api.depends(lambda self: [self._avatar_name_field, 'image_512'])
def _compute_avatar_512(self):
self._compute_avatar('avatar_512', 'image_512')
@api.depends(lambda self: [self._avatar_name_field, 'image_256'])
def _compute_avatar_256(self):
self._compute_avatar('avatar_256', 'image_256')
@api.depends(lambda self: [self._avatar_name_field, 'image_128'])
def _compute_avatar_128(self):
self._compute_avatar('avatar_128', 'image_128')
def _avatar_generate_svg(self):
initial = html_escape(self[self._avatar_name_field][0].upper())
bgcolor = get_hsl_from_seed(self[self._avatar_name_field] + str(self.create_date.timestamp() if self.create_date else ""))
return b64encode((
"<?xml version='1.0' encoding='UTF-8' ?>"
"<svg height='180' width='180' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>"
f"<rect fill='{bgcolor}' height='180' width='180'/>"
f"<text fill='#ffffff' font-size='96' text-anchor='middle' x='90' y='125' font-family='sans-serif'>{initial}</text>"
"</svg>"
).encode())
def _avatar_get_placeholder_path(self):
return "base/static/img/avatar_grey.png"
def _avatar_get_placeholder(self):
return b64encode(file_open(self._avatar_get_placeholder_path(), 'rb').read())
| 44.632911 | 3,526 |
30,326 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
import base64
import logging
import re
from io import BytesIO
import babel
import babel.dates
from markupsafe import Markup, escape
from PIL import Image
from lxml import etree, html
from odoo import api, fields, models, _, _lt
from odoo.tools import posix_to_ldml, float_utils, format_date, format_duration, pycompat
from odoo.tools.mail import safe_attrs
from odoo.tools.misc import get_lang, babel_locale_parse
_logger = logging.getLogger(__name__)
def nl2br(string):
""" Converts newlines to HTML linebreaks in ``string``. returns
the unicode result
:param str string:
:rtype: unicode
"""
return pycompat.to_text(string).replace('\n', Markup('<br>\n'))
#--------------------------------------------------------------------
# QWeb Fields converters
#--------------------------------------------------------------------
class FieldConverter(models.AbstractModel):
""" Used to convert a t-field specification into an output HTML field.
:meth:`~.to_html` is the entry point of this conversion from QWeb, it:
* converts the record value to html using :meth:`~.record_to_html`
* generates the metadata attributes (``data-oe-``) to set on the root
result node
* generates the root result node itself through :meth:`~.render_element`
"""
_name = 'ir.qweb.field'
_description = 'Qweb Field'
@api.model
def get_available_options(self):
"""
Get the available option informations.
Returns a dict of dict with:
* key equal to the option key.
* dict: type, params, name, description, default_value
* type:
'string'
'integer'
'float'
'model' (e.g. 'res.partner')
'array'
'selection' (e.g. [key1, key2...])
"""
return {}
@api.model
def attributes(self, record, field_name, options, values=None):
""" attributes(record, field_name, field, options, values)
Generates the metadata attributes (prefixed by ``data-oe-``) for the
root node of the field conversion.
The default attributes are:
* ``model``, the name of the record's model
* ``id`` the id of the record to which the field belongs
* ``type`` the logical field type (widget, may not match the field's
``type``, may not be any Field subclass name)
* ``translate``, a boolean flag (``0`` or ``1``) denoting whether the
field is translatable
* ``readonly``, has this attribute if the field is readonly
* ``expression``, the original expression
:returns: dict (attribute name, attribute value).
"""
data = {}
field = record._fields[field_name]
if not options['inherit_branding'] and not options['translate']:
return data
data['data-oe-model'] = record._name
data['data-oe-id'] = record.id
data['data-oe-field'] = field.name
data['data-oe-type'] = options.get('type')
data['data-oe-expression'] = options.get('expression')
if field.readonly:
data['data-oe-readonly'] = 1
return data
@api.model
def value_to_html(self, value, options):
""" value_to_html(value, field, options=None)
Converts a single value to its HTML version/output
:rtype: unicode
"""
return escape(pycompat.to_text(value))
@api.model
def record_to_html(self, record, field_name, options):
""" record_to_html(record, field_name, options)
Converts the specified field of the ``record`` to HTML
:rtype: unicode
"""
if not record:
return False
value = record[field_name]
return False if value is False else record.env[self._name].value_to_html(value, options=options)
@api.model
def user_lang(self):
""" user_lang()
Fetches the res.lang record corresponding to the language code stored
in the user's context.
:returns: Model[res.lang]
"""
return get_lang(self.env)
class IntegerConverter(models.AbstractModel):
_name = 'ir.qweb.field.integer'
_description = 'Qweb Field Integer'
_inherit = 'ir.qweb.field'
@api.model
def value_to_html(self, value, options):
return pycompat.to_text(self.user_lang().format('%d', value, grouping=True).replace(r'-', '-\N{ZERO WIDTH NO-BREAK SPACE}'))
class FloatConverter(models.AbstractModel):
_name = 'ir.qweb.field.float'
_description = 'Qweb Field Float'
_inherit = 'ir.qweb.field'
@api.model
def get_available_options(self):
options = super(FloatConverter, self).get_available_options()
options.update(
precision=dict(type='integer', string=_('Rounding precision')),
)
return options
@api.model
def value_to_html(self, value, options):
if 'decimal_precision' in options:
precision = self.env['decimal.precision'].precision_get(options['decimal_precision'])
else:
precision = options['precision']
if precision is None:
fmt = '%f'
else:
value = float_utils.float_round(value, precision_digits=precision)
fmt = '%.{precision}f'.format(precision=precision)
formatted = self.user_lang().format(fmt, value, grouping=True).replace(r'-', '-\N{ZERO WIDTH NO-BREAK SPACE}')
# %f does not strip trailing zeroes. %g does but its precision causes
# it to switch to scientific notation starting at a million *and* to
# strip decimals. So use %f and if no precision was specified manually
# strip trailing 0.
if precision is None:
formatted = re.sub(r'(?:(0|\d+?)0+)$', r'\1', formatted)
return pycompat.to_text(formatted)
@api.model
def record_to_html(self, record, field_name, options):
if 'precision' not in options and 'decimal_precision' not in options:
_, precision = record._fields[field_name].get_digits(record.env) or (None, None)
options = dict(options, precision=precision)
return super(FloatConverter, self).record_to_html(record, field_name, options)
class DateConverter(models.AbstractModel):
_name = 'ir.qweb.field.date'
_description = 'Qweb Field Date'
_inherit = 'ir.qweb.field'
@api.model
def get_available_options(self):
options = super(DateConverter, self).get_available_options()
options.update(
format=dict(type='string', string=_('Date format'))
)
return options
@api.model
def value_to_html(self, value, options):
return format_date(self.env, value, date_format=options.get('format'))
class DateTimeConverter(models.AbstractModel):
_name = 'ir.qweb.field.datetime'
_description = 'Qweb Field Datetime'
_inherit = 'ir.qweb.field'
@api.model
def get_available_options(self):
options = super(DateTimeConverter, self).get_available_options()
options.update(
format=dict(type='string', string=_('Pattern to format')),
tz_name=dict(type='char', string=_('Optional timezone name')),
time_only=dict(type='boolean', string=_('Display only the time')),
hide_seconds=dict(type='boolean', string=_('Hide seconds')),
date_only=dict(type='boolean', string=_('Display only the date')),
)
return options
@api.model
def value_to_html(self, value, options):
if not value:
return ''
options = options or {}
lang = self.user_lang()
locale = babel_locale_parse(lang.code)
format_func = babel.dates.format_datetime
if isinstance(value, str):
value = fields.Datetime.from_string(value)
value = fields.Datetime.context_timestamp(self, value)
if options.get('tz_name'):
tzinfo = babel.dates.get_timezone(options['tz_name'])
else:
tzinfo = None
if 'format' in options:
pattern = options['format']
else:
if options.get('time_only'):
strftime_pattern = ("%s" % (lang.time_format))
elif options.get('date_only'):
strftime_pattern = ("%s" % (lang.date_format))
else:
strftime_pattern = ("%s %s" % (lang.date_format, lang.time_format))
pattern = posix_to_ldml(strftime_pattern, locale=locale)
if options.get('hide_seconds'):
pattern = pattern.replace(":ss", "").replace(":s", "")
if options.get('time_only'):
format_func = babel.dates.format_time
return pycompat.to_text(format_func(value, format=pattern, tzinfo=tzinfo, locale=locale))
if options.get('date_only'):
format_func = babel.dates.format_date
return pycompat.to_text(format_func(value, format=pattern, locale=locale))
return pycompat.to_text(format_func(value, format=pattern, tzinfo=tzinfo, locale=locale))
class TextConverter(models.AbstractModel):
_name = 'ir.qweb.field.text'
_description = 'Qweb Field Text'
_inherit = 'ir.qweb.field'
@api.model
def value_to_html(self, value, options):
"""
Escapes the value and converts newlines to br. This is bullshit.
"""
return nl2br(escape(value)) if value else ''
class SelectionConverter(models.AbstractModel):
_name = 'ir.qweb.field.selection'
_description = 'Qweb Field Selection'
_inherit = 'ir.qweb.field'
@api.model
def get_available_options(self):
options = super(SelectionConverter, self).get_available_options()
options.update(
selection=dict(type='selection', string=_('Selection'), description=_('By default the widget uses the field information'), required=True)
)
return options
@api.model
def value_to_html(self, value, options):
if not value:
return ''
return escape(pycompat.to_text(options['selection'][value]) or '')
@api.model
def record_to_html(self, record, field_name, options):
if 'selection' not in options:
options = dict(options, selection=dict(record._fields[field_name].get_description(self.env)['selection']))
return super(SelectionConverter, self).record_to_html(record, field_name, options)
class ManyToOneConverter(models.AbstractModel):
_name = 'ir.qweb.field.many2one'
_description = 'Qweb Field Many to One'
_inherit = 'ir.qweb.field'
@api.model
def value_to_html(self, value, options):
if not value:
return False
value = value.sudo().display_name
if not value:
return False
return nl2br(escape(value))
class ManyToManyConverter(models.AbstractModel):
_name = 'ir.qweb.field.many2many'
_description = 'Qweb field many2many'
_inherit = 'ir.qweb.field'
@api.model
def value_to_html(self, value, options):
if not value:
return False
text = ', '.join(value.sudo().mapped('display_name'))
return nl2br(escape(text))
class HTMLConverter(models.AbstractModel):
_name = 'ir.qweb.field.html'
_description = 'Qweb Field HTML'
_inherit = 'ir.qweb.field'
@api.model
def value_to_html(self, value, options):
irQweb = self.env['ir.qweb']
# wrap value inside a body and parse it as HTML
body = etree.fromstring("<body>%s</body>" % value, etree.HTMLParser(encoding='utf-8'))[0]
# use pos processing for all nodes with attributes
for element in body.iter():
if element.attrib:
attrib = dict(element.attrib)
attrib = irQweb._post_processing_att(element.tag, attrib, options.get('template_options'))
element.attrib.clear()
element.attrib.update(attrib)
return Markup(etree.tostring(body, encoding='unicode', method='html')[6:-7])
class ImageConverter(models.AbstractModel):
""" ``image`` widget rendering, inserts a data:uri-using image tag in the
document. May be overridden by e.g. the website module to generate links
instead.
.. todo:: what happens if different output need different converters? e.g.
reports may need embedded images or FS links whereas website
needs website-aware
"""
_name = 'ir.qweb.field.image'
_description = 'Qweb Field Image'
_inherit = 'ir.qweb.field'
@api.model
def value_to_html(self, value, options):
try: # FIXME: maaaaaybe it could also take raw bytes?
image = Image.open(BytesIO(base64.b64decode(value)))
image.verify()
except IOError:
raise ValueError("Non-image binary fields can not be converted to HTML")
except: # image.verify() throws "suitable exceptions", I have no idea what they are
raise ValueError("Invalid image content")
return Markup('<img src="data:%s;base64,%s">' % (Image.MIME[image.format], value.decode('ascii')))
class ImageUrlConverter(models.AbstractModel):
""" ``image_url`` widget rendering, inserts an image tag in the
document.
"""
_name = 'ir.qweb.field.image_url'
_description = 'Qweb Field Image'
_inherit = 'ir.qweb.field.image'
@api.model
def value_to_html(self, value, options):
return Markup('<img src="%s">' % (value))
class MonetaryConverter(models.AbstractModel):
""" ``monetary`` converter, has a mandatory option
``display_currency`` only if field is not of type Monetary.
Otherwise, if we are in presence of a monetary field, the field definition must
have a currency_field attribute set.
The currency is used for formatting *and rounding* of the float value. It
is assumed that the linked res_currency has a non-empty rounding value and
res.currency's ``round`` method is used to perform rounding.
.. note:: the monetary converter internally adds the qweb context to its
options mapping, so that the context is available to callees.
It's set under the ``_values`` key.
"""
_name = 'ir.qweb.field.monetary'
_description = 'Qweb Field Monetary'
_inherit = 'ir.qweb.field'
@api.model
def get_available_options(self):
options = super(MonetaryConverter, self).get_available_options()
options.update(
from_currency=dict(type='model', params='res.currency', string=_('Original currency')),
display_currency=dict(type='model', params='res.currency', string=_('Display currency'), required="value_to_html"),
date=dict(type='date', string=_('Date'), description=_('Date used for the original currency (only used for t-esc). by default use the current date.')),
company_id=dict(type='model', params='res.company', string=_('Company'), description=_('Company used for the original currency (only used for t-esc). By default use the user company')),
)
return options
@api.model
def value_to_html(self, value, options):
display_currency = options['display_currency']
if not isinstance(value, (int, float)):
raise ValueError(_("The value send to monetary field is not a number."))
# lang.format mandates a sprintf-style format. These formats are non-
# minimal (they have a default fixed precision instead), and
# lang.format will not set one by default. currency.round will not
# provide one either. So we need to generate a precision value
# (integer > 0) from the currency's rounding (a float generally < 1.0).
fmt = "%.{0}f".format(display_currency.decimal_places)
if options.get('from_currency'):
date = options.get('date') or fields.Date.today()
company_id = options.get('company_id')
if company_id:
company = self.env['res.company'].browse(company_id)
else:
company = self.env.company
value = options['from_currency']._convert(value, display_currency, company, date)
lang = self.user_lang()
formatted_amount = lang.format(fmt, display_currency.round(value),
grouping=True, monetary=True).replace(r' ', '\N{NO-BREAK SPACE}').replace(r'-', '-\N{ZERO WIDTH NO-BREAK SPACE}')
pre = post = ''
if display_currency.position == 'before':
pre = '{symbol}\N{NO-BREAK SPACE}'.format(symbol=display_currency.symbol or '')
else:
post = '\N{NO-BREAK SPACE}{symbol}'.format(symbol=display_currency.symbol or '')
if options.get('label_price') and lang.decimal_point in formatted_amount:
sep = lang.decimal_point
integer_part, decimal_part = formatted_amount.split(sep)
integer_part += sep
return Markup('{pre}<span class="oe_currency_value">{0}</span><span class="oe_currency_value" style="font-size:0.5em">{1}</span>{post}').format(integer_part, decimal_part, pre=pre, post=post)
return Markup('{pre}<span class="oe_currency_value">{0}</span>{post}').format(formatted_amount, pre=pre, post=post)
@api.model
def record_to_html(self, record, field_name, options):
options = dict(options)
#currency should be specified by monetary field
field = record._fields[field_name]
if not options.get('display_currency') and field.type == 'monetary' and field.get_currency_field(record):
options['display_currency'] = record[field.get_currency_field(record)]
if not options.get('display_currency'):
# search on the model if they are a res.currency field to set as default
fields = record._fields.items()
currency_fields = [k for k, v in fields if v.type == 'many2one' and v.comodel_name == 'res.currency']
if currency_fields:
options['display_currency'] = record[currency_fields[0]]
if 'date' not in options:
options['date'] = record._context.get('date')
if 'company_id' not in options:
options['company_id'] = record._context.get('company_id')
return super(MonetaryConverter, self).record_to_html(record, field_name, options)
TIMEDELTA_UNITS = (
('year', _lt('year'), 3600 * 24 * 365),
('month', _lt('month'), 3600 * 24 * 30),
('week', _lt('week'), 3600 * 24 * 7),
('day', _lt('day'), 3600 * 24),
('hour', _lt('hour'), 3600),
('minute', _lt('minute'), 60),
('second', _lt('second'), 1)
)
class FloatTimeConverter(models.AbstractModel):
""" ``float_time`` converter, to display integral or fractional values as
human-readable time spans (e.g. 1.5 as "01:30").
Can be used on any numerical field.
"""
_name = 'ir.qweb.field.float_time'
_description = 'Qweb Field Float Time'
_inherit = 'ir.qweb.field'
@api.model
def value_to_html(self, value, options):
return format_duration(value)
class DurationConverter(models.AbstractModel):
""" ``duration`` converter, to display integral or fractional values as
human-readable time spans (e.g. 1.5 as "1 hour 30 minutes").
Can be used on any numerical field.
Has an option ``unit`` which can be one of ``second``, ``minute``,
``hour``, ``day``, ``week`` or ``year``, used to interpret the numerical
field value before converting it. By default use ``second``.
Has an option ``round``. By default use ``second``.
Has an option ``digital`` to display 01:00 instead of 1 hour
Sub-second values will be ignored.
"""
_name = 'ir.qweb.field.duration'
_description = 'Qweb Field Duration'
_inherit = 'ir.qweb.field'
@api.model
def get_available_options(self):
options = super(DurationConverter, self).get_available_options()
unit = [(value, str(label)) for value, label, ratio in TIMEDELTA_UNITS]
options.update(
digital=dict(type="boolean", string=_('Digital formatting')),
unit=dict(type="selection", params=unit, string=_('Date unit'), description=_('Date unit used for comparison and formatting'), default_value='second', required=True),
round=dict(type="selection", params=unit, string=_('Rounding unit'), description=_("Date unit used for the rounding. The value must be smaller than 'hour' if you use the digital formatting."), default_value='second'),
format=dict(
type="selection",
params=[
('long', _('Long')),
('short', _('Short')),
('narrow', _('Narrow'))],
string=_('Format'),
description=_("Formatting: long, short, narrow (not used for digital)"),
default_value='long'
),
add_direction=dict(
type="boolean",
string=_("Add direction"),
description=_("Add directional information (not used for digital)")
),
)
return options
@api.model
def value_to_html(self, value, options):
units = {unit: duration for unit, label, duration in TIMEDELTA_UNITS}
locale = babel_locale_parse(self.user_lang().code)
factor = units[options.get('unit', 'second')]
round_to = units[options.get('round', 'second')]
if options.get('digital') and round_to > 3600:
round_to = 3600
r = round((value * factor) / round_to) * round_to
sections = []
sign = ''
if value < 0:
r = -r
sign = '-'
if options.get('digital'):
for unit, label, secs_per_unit in TIMEDELTA_UNITS:
if secs_per_unit > 3600:
continue
v, r = divmod(r, secs_per_unit)
if not v and (secs_per_unit > factor or secs_per_unit < round_to):
continue
sections.append(u"%02.0f" % int(round(v)))
return sign + u':'.join(sections)
for unit, label, secs_per_unit in TIMEDELTA_UNITS:
v, r = divmod(r, secs_per_unit)
if not v:
continue
section = babel.dates.format_timedelta(
v*secs_per_unit,
granularity=round_to,
add_direction=options.get('add_direction'),
format=options.get('format', 'long'),
threshold=1,
locale=locale)
if section:
sections.append(section)
if sign:
sections.insert(0, sign)
return u' '.join(sections)
class RelativeDatetimeConverter(models.AbstractModel):
_name = 'ir.qweb.field.relative'
_description = 'Qweb Field Relative'
_inherit = 'ir.qweb.field'
@api.model
def get_available_options(self):
options = super(RelativeDatetimeConverter, self).get_available_options()
options.update(
now=dict(type='datetime', string=_('Reference date'), description=_('Date to compare with the field value, by default use the current date.'))
)
return options
@api.model
def value_to_html(self, value, options):
locale = babel_locale_parse(self.user_lang().code)
if isinstance(value, str):
value = fields.Datetime.from_string(value)
# value should be a naive datetime in UTC. So is fields.Datetime.now()
reference = fields.Datetime.from_string(options['now'])
return pycompat.to_text(babel.dates.format_timedelta(value - reference, add_direction=True, locale=locale))
@api.model
def record_to_html(self, record, field_name, options):
if 'now' not in options:
options = dict(options, now=record._fields[field_name].now())
return super(RelativeDatetimeConverter, self).record_to_html(record, field_name, options)
class BarcodeConverter(models.AbstractModel):
""" ``barcode`` widget rendering, inserts a data:uri-using image tag in the
document. May be overridden by e.g. the website module to generate links
instead.
"""
_name = 'ir.qweb.field.barcode'
_description = 'Qweb Field Barcode'
_inherit = 'ir.qweb.field'
@api.model
def get_available_options(self):
options = super(BarcodeConverter, self).get_available_options()
options.update(
symbology=dict(type='string', string=_('Barcode symbology'), description=_('Barcode type, eg: UPCA, EAN13, Code128'), default_value='Code128'),
width=dict(type='integer', string=_('Width'), default_value=600),
height=dict(type='integer', string=_('Height'), default_value=100),
humanreadable=dict(type='integer', string=_('Human Readable'), default_value=0),
quiet=dict(type='integer', string='Quiet', default_value=1),
mask=dict(type='string', string='Mask', default_value='')
)
return options
@api.model
def value_to_html(self, value, options=None):
if not value:
return ''
barcode_symbology = options.get('symbology', 'Code128')
barcode = self.env['ir.actions.report'].barcode(
barcode_symbology,
value,
**{key: value for key, value in options.items() if key in ['width', 'height', 'humanreadable', 'quiet', 'mask']})
img_element = html.Element('img')
for k, v in options.items():
if k.startswith('img_') and k[4:] in safe_attrs:
img_element.set(k[4:], v)
if not img_element.get('alt'):
img_element.set('alt', _('Barcode %s') % value)
img_element.set('src', 'data:image/png;base64,%s' % base64.b64encode(barcode).decode())
return Markup(html.tostring(img_element, encoding='unicode'))
class Contact(models.AbstractModel):
_name = 'ir.qweb.field.contact'
_description = 'Qweb Field Contact'
_inherit = 'ir.qweb.field.many2one'
@api.model
def get_available_options(self):
options = super(Contact, self).get_available_options()
contact_fields = [
{'field_name': 'name', 'label': _('Name'), 'default': True},
{'field_name': 'address', 'label': _('Address'), 'default': True},
{'field_name': 'phone', 'label': _('Phone'), 'default': True},
{'field_name': 'mobile', 'label': _('Mobile'), 'default': True},
{'field_name': 'email', 'label': _('Email'), 'default': True},
{'field_name': 'vat', 'label': _('VAT')},
]
separator_params = dict(
type='selection',
selection=[[" ", _("Space")], [",", _("Comma")], ["-", _("Dash")], ["|", _("Vertical bar")], ["/", _("Slash")]],
placeholder=_('Linebreak'),
)
options.update(
fields=dict(type='array', params=dict(type='selection', params=contact_fields), string=_('Displayed fields'), description=_('List of contact fields to display in the widget'), default_value=[param.get('field_name') for param in contact_fields if param.get('default')]),
separator=dict(type='selection', params=separator_params, string=_('Address separator'), description=_('Separator use to split the address from the display_name.'), default_value=False),
no_marker=dict(type='boolean', string=_('Hide badges'), description=_("Don't display the font awesome marker")),
no_tag_br=dict(type='boolean', string=_('Use comma'), description=_("Use comma instead of the <br> tag to display the address")),
phone_icons=dict(type='boolean', string=_('Display phone icons'), description=_("Display the phone icons even if no_marker is True")),
country_image=dict(type='boolean', string=_('Display country image'), description=_("Display the country image if the field is present on the record")),
)
return options
@api.model
def value_to_html(self, value, options):
if not value:
return ''
opf = options.get('fields') or ["name", "address", "phone", "mobile", "email"]
sep = options.get('separator')
template_options = options.get('template_options', {})
if sep:
opsep = escape(sep)
elif template_options.get('no_tag_br'):
# escaped joiners will auto-escape joined params
opsep = escape(', ')
else:
opsep = Markup('<br/>')
value = value.sudo().with_context(show_address=True)
name_get = value.name_get()[0][1]
# Avoid having something like:
# name_get = 'Foo\n \n' -> This is a res.partner with a name and no address
# That would return markup('<br/>') as address. But there is no address set.
if any(elem.strip() for elem in name_get.split("\n")[1:]):
address = opsep.join(name_get.split("\n")[1:]).strip()
else:
address = ''
val = {
'name': name_get.split("\n")[0],
'address': address,
'phone': value.phone,
'mobile': value.mobile,
'city': value.city,
'country_id': value.country_id.display_name,
'website': value.website,
'email': value.email,
'vat': value.vat,
'vat_label': value.country_id.vat_label or _('VAT'),
'fields': opf,
'object': value,
'options': options
}
return self.env['ir.qweb']._render('base.contact', val, **template_options)
class QwebView(models.AbstractModel):
_name = 'ir.qweb.field.qweb'
_description = 'Qweb Field qweb'
_inherit = 'ir.qweb.field.many2one'
@api.model
def record_to_html(self, record, field_name, options):
view = getattr(record, field_name)
if not view:
return ''
if view._name != "ir.ui.view":
_logger.warning("%s.%s must be a 'ir.ui.view', got %r.", record, field_name, view._name)
return ''
return view._render(options.get('values', {}), engine='ir.qweb')
| 39.538462 | 30,326 |
106,295 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
import collections
import datetime
import fnmatch
import inspect
import json
import logging
import math
import pprint
import re
import time
import uuid
import warnings
from dateutil.relativedelta import relativedelta
import werkzeug, werkzeug.urls
from lxml import etree
from lxml.etree import LxmlError
from lxml.builder import E
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError, AccessError
from odoo.http import request
from odoo.modules.module import get_resource_from_path, get_resource_path
from odoo.tools import config, ConstantMapping, get_diff, pycompat, apply_inheritance_specs, locate_node
from odoo.tools.convert import _fix_multiple_roots
from odoo.tools.json import scriptsafe as json_scriptsafe
from odoo.tools import safe_eval, lazy_property, frozendict
from odoo.tools.view_validation import valid_view, get_variable_names, get_domain_identifiers, get_dict_asts
from odoo.tools.translate import xml_translate, TRANSLATED_ATTRS
from odoo.tools.image import image_data_uri
from odoo.models import check_method_name
from odoo.osv.expression import expression
_logger = logging.getLogger(__name__)
MOVABLE_BRANDING = ['data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-xpath', 'data-oe-source-id']
def quick_eval(expr, globals_dict):
""" Functionally identical to safe_eval(), but optimized with special-casing. """
# most (~95%) elements are 1/True/0/False
if expr == '1':
return 1
if expr == 'True':
return True
if expr == '0':
return 0
if expr == 'False':
return False
return safe_eval.safe_eval(expr, globals_dict)
def att_names(name):
yield name
yield f"t-att-{name}"
yield f"t-attf-{name}"
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in field.get("states", {}).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
def transfer_node_to_modifiers(node, modifiers, context=None):
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
if node.get('attrs'):
attrs = node.get('attrs').strip()
modifiers.update(ast.literal_eval(attrs))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for attr in ('invisible', 'readonly', 'required'):
value_str = node.get(attr)
if value_str:
value = bool(quick_eval(value_str, {'context': context or {}}))
if (attr == 'invisible'
and any(parent.tag == 'tree' for parent in node.iterancestors())
and not any(parent.tag == 'header' for parent in node.iterancestors())):
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['column_invisible'] = value
elif value or (attr not in modifiers or not isinstance(modifiers[attr], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[attr] = value
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', json.dumps(modifiers))
def keep_query(*keep_params, **additional_params):
"""
Generate a query string keeping the current request querystring's parameters specified
in ``keep_params`` and also adds the parameters specified in ``additional_params``.
Multiple values query string params will be merged into a single one with comma seperated
values.
The ``keep_params`` arguments can use wildcards too, eg:
keep_query('search', 'shop_*', page=4)
"""
if not keep_params and not additional_params:
keep_params = ('*',)
params = additional_params.copy()
qs_keys = list(request.httprequest.args) if request else []
for keep_param in keep_params:
for param in fnmatch.filter(qs_keys, keep_param):
if param not in additional_params and param in qs_keys:
params[param] = request.httprequest.args.getlist(param)
return werkzeug.urls.url_encode(params)
class ViewCustom(models.Model):
_name = 'ir.ui.view.custom'
_description = 'Custom View'
_order = 'create_date desc' # search(limit=1) should return the last customization
ref_id = fields.Many2one('ir.ui.view', string='Original View', index=True, required=True, ondelete='cascade')
user_id = fields.Many2one('res.users', string='User', index=True, required=True, ondelete='cascade')
arch = fields.Text(string='View Architecture', required=True)
def name_get(self):
return [(rec.id, rec.user_id.name) for rec in self]
@api.model
def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):
if name:
return self._search([('user_id', operator, name)] + (args or []), limit=limit, access_rights_uid=name_get_uid)
return super(ViewCustom, self)._name_search(name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
def _auto_init(self):
res = super(ViewCustom, self)._auto_init()
tools.create_index(self._cr, 'ir_ui_view_custom_user_id_ref_id',
self._table, ['user_id', 'ref_id'])
return res
def _hasclass(context, *cls):
""" Checks if the context node has all the classes passed as arguments
"""
node_classes = set(context.context_node.attrib.get('class', '').split())
return node_classes.issuperset(cls)
def get_view_arch_from_file(filepath, xmlid):
module, view_id = xmlid.split('.')
xpath = f"//*[@id='{xmlid}' or @id='{view_id}']"
# when view is created from model with inheritS of ir_ui_view, the
# xmlid has been suffixed by '_ir_ui_view'. We need to also search
# for views without this prefix.
if view_id.endswith('_ir_ui_view'):
# len('_ir_ui_view') == 11
xpath = xpath[:-1] + f" or @id='{xmlid[:-11]}' or @id='{view_id[:-11]}']"
document = etree.parse(filepath)
for node in document.xpath(xpath):
if node.tag == 'record':
field_arch = node.find('field[@name="arch"]')
if field_arch is not None:
_fix_multiple_roots(field_arch)
inner = ''.join(
etree.tostring(child, encoding='unicode')
for child in field_arch.iterchildren()
)
return field_arch.text + inner
field_view = node.find('field[@name="view_id"]')
if field_view is not None:
ref_module, _, ref_view_id = field_view.attrib.get('ref').rpartition('.')
ref_xmlid = f'{ref_module or module}.{ref_view_id}'
return get_view_arch_from_file(filepath, ref_xmlid)
return None
elif node.tag == 'template':
# The following dom operations has been copied from convert.py's _tag_template()
if not node.get('inherit_id'):
node.set('t-name', xmlid)
node.tag = 't'
else:
node.tag = 'data'
node.attrib.pop('id', None)
return etree.tostring(node, encoding='unicode')
_logger.warning("Could not find view arch definition in file '%s' for xmlid '%s'", filepath, xmlid)
return None
xpath_utils = etree.FunctionNamespace(None)
xpath_utils['hasclass'] = _hasclass
TRANSLATED_ATTRS_RE = re.compile(r"@(%s)\b" % "|".join(TRANSLATED_ATTRS))
WRONGCLASS = re.compile(r"(@class\s*=|=\s*@class|contains\(@class)")
class View(models.Model):
_name = 'ir.ui.view'
_description = 'View'
_order = "priority,name,id"
name = fields.Char(string='View Name', required=True)
model = fields.Char(index=True)
key = fields.Char()
priority = fields.Integer(string='Sequence', default=16, required=True)
type = fields.Selection([('tree', 'Tree'),
('form', 'Form'),
('graph', 'Graph'),
('pivot', 'Pivot'),
('calendar', 'Calendar'),
('gantt', 'Gantt'),
('kanban', 'Kanban'),
('search', 'Search'),
('qweb', 'QWeb')], string='View Type')
arch = fields.Text(compute='_compute_arch', inverse='_inverse_arch', string='View Architecture',
help="""This field should be used when accessing view arch. It will use translation.
Note that it will read `arch_db` or `arch_fs` if in dev-xml mode.""")
arch_base = fields.Text(compute='_compute_arch_base', inverse='_inverse_arch_base', string='Base View Architecture',
help="This field is the same as `arch` field without translations")
arch_db = fields.Text(string='Arch Blob', translate=xml_translate,
help="This field stores the view arch.")
arch_fs = fields.Char(string='Arch Filename', help="""File from where the view originates.
Useful to (hard) reset broken views or to read arch from file in dev-xml mode.""")
arch_updated = fields.Boolean(string='Modified Architecture')
arch_prev = fields.Text(string='Previous View Architecture', help="""This field will save the current `arch_db` before writing on it.
Useful to (soft) reset a broken view.""")
inherit_id = fields.Many2one('ir.ui.view', string='Inherited View', ondelete='restrict', index=True)
inherit_children_ids = fields.One2many('ir.ui.view', 'inherit_id', string='Views which inherit from this one')
field_parent = fields.Char(string='Child Field')
model_data_id = fields.Many2one('ir.model.data', string="Model Data",
compute='_compute_model_data_id', search='_search_model_data_id')
xml_id = fields.Char(string="External ID", compute='_compute_xml_id',
help="ID of the view defined in xml file")
groups_id = fields.Many2many('res.groups', 'ir_ui_view_group_rel', 'view_id', 'group_id',
string='Groups', help="If this field is empty, the view applies to all users. Otherwise, the view applies to the users of those groups only.")
mode = fields.Selection([('primary', "Base view"), ('extension', "Extension View")],
string="View inheritance mode", default='primary', required=True,
help="""Only applies if this view inherits from an other one (inherit_id is not False/Null).
* if extension (default), if this view is requested the closest primary view
is looked up (via inherit_id), then all views inheriting from it with this
view's model are applied
* if primary, the closest primary view is fully resolved (even if it uses a
different model than this one), then this view's inheritance specs
(<xpath/>) are applied, and the result is used as if it were this view's
actual arch.
""")
# The "active" field is not updated during updates if <template> is used
# instead of <record> to define the view in XML, see _tag_template. For
# qweb views, you should not rely on the active field being updated anyway
# as those views, if used in frontend layouts, can be duplicated (see COW)
# and will thus always require upgrade scripts if you really want to change
# the default value of their "active" field.
active = fields.Boolean(default=True,
help="""If this view is inherited,
* if True, the view always extends its parent
* if False, the view currently does not extend its parent but can be enabled
""")
@api.depends('arch_db', 'arch_fs', 'arch_updated')
@api.depends_context('read_arch_from_file', 'lang')
def _compute_arch(self):
def resolve_external_ids(arch_fs, view_xml_id):
def replacer(m):
xmlid = m.group('xmlid')
if '.' not in xmlid:
xmlid = '%s.%s' % (view_xml_id.split('.')[0], xmlid)
return m.group('prefix') + str(self.env['ir.model.data']._xmlid_to_res_id(xmlid))
return re.sub(r'(?P<prefix>[^%])%\((?P<xmlid>.*?)\)[ds]', replacer, arch_fs)
for view in self:
arch_fs = None
read_file = self._context.get('read_arch_from_file') or \
('xml' in config['dev_mode'] and not view.arch_updated)
if read_file and view.arch_fs and (view.xml_id or view.key):
xml_id = view.xml_id or view.key
# It is safe to split on / herebelow because arch_fs is explicitely stored with '/'
fullpath = get_resource_path(*view.arch_fs.split('/'))
if fullpath:
arch_fs = get_view_arch_from_file(fullpath, xml_id)
# replace %(xml_id)s, %(xml_id)d, %%(xml_id)s, %%(xml_id)d by the res_id
if arch_fs:
arch_fs = resolve_external_ids(arch_fs, xml_id).replace('%%', '%')
if self.env.context.get('lang'):
tr = self._fields['arch_db'].get_trans_func(view)
arch_fs = tr(view.id, arch_fs)
else:
_logger.warning("View %s: Full path [%s] cannot be found.", xml_id, view.arch_fs)
arch_fs = False
view.arch = pycompat.to_text(arch_fs or view.arch_db)
def _inverse_arch(self):
for view in self:
data = dict(arch_db=view.arch)
if 'install_filename' in self._context:
# we store the relative path to the resource instead of the absolute path, if found
# (it will be missing e.g. when importing data-only modules using base_import_module)
path_info = get_resource_from_path(self._context['install_filename'])
if path_info:
data['arch_fs'] = '/'.join(path_info[0:2])
data['arch_updated'] = False
view.write(data)
# the field 'arch' depends on the context and has been implicitly
# modified in all languages; the invalidation below ensures that the
# field does not keep an old value in another environment
self.invalidate_cache(['arch'], self._ids)
@api.depends('arch')
@api.depends_context('read_arch_from_file')
def _compute_arch_base(self):
# 'arch_base' is the same as 'arch' without translation
for view, view_wo_lang in zip(self, self.with_context(lang=None)):
view.arch_base = view_wo_lang.arch
def _inverse_arch_base(self):
for view, view_wo_lang in zip(self, self.with_context(lang=None)):
view_wo_lang.arch = view.arch_base
def reset_arch(self, mode='soft'):
""" Reset the view arch to its previous arch (soft) or its XML file arch
if exists (hard).
"""
for view in self:
arch = False
if mode == 'soft':
arch = view.arch_prev
elif mode == 'hard' and view.arch_fs:
arch = view.with_context(read_arch_from_file=True, lang=None).arch
if arch:
# Don't save current arch in previous since we reset, this arch is probably broken
view.with_context(no_save_prev=True, lang=None).write({'arch_db': arch})
@api.depends('write_date')
def _compute_model_data_id(self):
# get the first ir_model_data record corresponding to self
for view in self:
view.model_data_id = False
domain = [('model', '=', 'ir.ui.view'), ('res_id', 'in', self.ids)]
for data in self.env['ir.model.data'].sudo().search_read(domain, ['res_id'], order='id desc'):
view = self.browse(data['res_id'])
view.model_data_id = data['id']
def _search_model_data_id(self, operator, value):
name = 'name' if isinstance(value, str) else 'id'
domain = [('model', '=', 'ir.ui.view'), (name, operator, value)]
data = self.env['ir.model.data'].sudo().search(domain)
return [('id', 'in', data.mapped('res_id'))]
def _compute_xml_id(self):
xml_ids = collections.defaultdict(list)
domain = [('model', '=', 'ir.ui.view'), ('res_id', 'in', self.ids)]
for data in self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name', 'res_id']):
xml_ids[data['res_id']].append("%s.%s" % (data['module'], data['name']))
for view in self:
view.xml_id = xml_ids.get(view.id, [''])[0]
def _valid_inheritance(self, arch):
""" Check whether view inheritance is based on translated attribute. """
for node in arch.xpath('//*[@position]'):
# inheritance may not use a translated attribute as selector
if node.tag == 'xpath':
match = TRANSLATED_ATTRS_RE.search(node.get('expr', ''))
if match:
message = "View inheritance may not use attribute %r as a selector." % match.group(1)
self._raise_view_error(message, node)
if WRONGCLASS.search(node.get('expr', '')):
_logger.warning(
"Error-prone use of @class in view %s (%s): use the "
"hasclass(*classes) function to filter elements by "
"their classes", self.name, self.xml_id
)
else:
for attr in TRANSLATED_ATTRS:
if node.get(attr):
message = "View inheritance may not use attribute %r as a selector." % attr
self._raise_view_error(message, node)
return True
@api.constrains('arch_db')
def _check_xml(self):
# Sanity checks: the view should not break anything upon rendering!
# Any exception raised below will cause a transaction rollback.
partial_validation = self.env.context.get('ir_ui_view_partial_validation')
self = self.with_context(validate_view_ids=(self._ids if partial_validation else True))
for view in self:
try:
# verify the view is valid xml and that the inheritance resolves
if view.inherit_id:
view_arch = etree.fromstring(view.arch)
view._valid_inheritance(view_arch)
combined_arch = view._get_combined_arch()
if view.type == 'qweb':
continue
except ValueError as e:
err = ValidationError(_(
"Error while validating view:\n\n%(error)s",
error=tools.ustr(e),
)).with_traceback(e.__traceback__)
err.context = getattr(e, 'context', None)
raise err from None
try:
# verify that all fields used are valid, etc.
view._validate_view(combined_arch, view.model)
combined_archs = [combined_arch]
if combined_archs[0].tag == 'data':
# A <data> element is a wrapper for multiple root nodes
combined_archs = combined_archs[0]
for view_arch in combined_archs:
for node in view_arch.xpath('//*[@__validate__]'):
del node.attrib['__validate__']
check = valid_view(view_arch, env=self.env, model=view.model)
if not check:
view_name = ('%s (%s)' % (view.name, view.xml_id)) if view.xml_id else view.name
raise ValidationError(_(
'Invalid view %(name)s definition in %(file)s',
name=view_name, file=view.arch_fs
))
if check == "Warning":
view_name = ('%s (%s)' % (view.name, view.xml_id)) if view.xml_id else view.name
_logger.warning('Invalid view %s definition in %s \n%s', view_name, view.arch_fs, view.arch)
except ValueError as e:
lines = etree.tostring(combined_arch, encoding='unicode').splitlines(keepends=True)
fivelines = "".join(lines[max(0, e.context["line"]-3):e.context["line"]+2])
err = ValidationError(_(
"Error while validating view near:\n\n%(fivelines)s\n%(error)s",
fivelines=fivelines, error=tools.ustr(e),
))
err.context = e.context
raise err.with_traceback(e.__traceback__) from None
return True
@api.constrains('type', 'groups_id', 'inherit_id')
def _check_groups(self):
for view in self:
if (view.type == 'qweb' and
view.groups_id and
view.inherit_id and
view.mode != 'primary'):
raise ValidationError(_("Inherited Qweb view cannot have 'Groups' define on the record. Use 'groups' attributes inside the view definition"))
@api.constrains('inherit_id')
def _check_000_inheritance(self):
# NOTE: constraints methods are check alphabetically. Always ensure this method will be
# called before other constraint methods to avoid infinite loop in `_get_combined_arch`.
if not self._check_recursion(parent='inherit_id'):
raise ValidationError(_('You cannot create recursive inherited views.'))
_sql_constraints = [
('inheritance_mode',
"CHECK (mode != 'extension' OR inherit_id IS NOT NULL)",
"Invalid inheritance mode: if the mode is 'extension', the view must"
" extend an other view"),
('qweb_required_key',
"CHECK (type != 'qweb' OR key IS NOT NULL)",
"Invalid key: QWeb view should have a key"),
]
def _auto_init(self):
res = super(View, self)._auto_init()
tools.create_index(self._cr, 'ir_ui_view_model_type_inherit_id',
self._table, ['model', 'inherit_id'])
return res
def _compute_defaults(self, values):
if 'inherit_id' in values:
# Do not automatically change the mode if the view already has an inherit_id,
# and the user change it to another.
if not values['inherit_id'] or all(not view.inherit_id for view in self):
values.setdefault('mode', 'extension' if values['inherit_id'] else 'primary')
return values
@api.model_create_multi
def create(self, vals_list):
for values in vals_list:
if not values.get('type'):
if values.get('inherit_id'):
values['type'] = self.browse(values['inherit_id']).type
else:
try:
if not values.get('arch') and not values.get('arch_base'):
raise ValidationError(_('Missing view architecture.'))
values['type'] = etree.fromstring(values.get('arch') or values.get('arch_base')).tag
except LxmlError:
# don't raise here, the constraint that runs `self._check_xml` will
# do the job properly.
pass
if not values.get('key') and values.get('type') == 'qweb':
values['key'] = "gen_key.%s" % str(uuid.uuid4())[:6]
if not values.get('name'):
values['name'] = "%s %s" % (values.get('model'), values['type'])
# Create might be called with either `arch` (xml files), `arch_base` (form view) or `arch_db`.
values['arch_prev'] = values.get('arch_base') or values.get('arch_db') or values.get('arch')
# write on arch: bypass _inverse_arch()
if 'arch' in values:
values['arch_db'] = values.pop('arch')
if 'install_filename' in self._context:
# we store the relative path to the resource instead of the absolute path, if found
# (it will be missing e.g. when importing data-only modules using base_import_module)
path_info = get_resource_from_path(self._context['install_filename'])
if path_info:
values['arch_fs'] = '/'.join(path_info[0:2])
values['arch_updated'] = False
values.update(self._compute_defaults(values))
self.clear_caches()
result = super(View, self.with_context(ir_ui_view_partial_validation=True)).create(vals_list)
return result.with_env(self.env)
def write(self, vals):
# Keep track if view was modified. That will be useful for the --dev mode
# to prefer modified arch over file arch.
if 'arch_updated' not in vals and ('arch' in vals or 'arch_base' in vals) and 'install_filename' not in self._context:
vals['arch_updated'] = True
# drop the corresponding view customizations (used for dashboards for example), otherwise
# not all users would see the updated views
custom_view = self.env['ir.ui.view.custom'].search([('ref_id', 'in', self.ids)])
if custom_view:
custom_view.unlink()
self.clear_caches()
if 'arch_db' in vals and not self.env.context.get('no_save_prev'):
vals['arch_prev'] = self.arch_db
res = super(View, self).write(self._compute_defaults(vals))
# Check the xml of the view if it gets re-activated.
# Ideally, `active` shoud have been added to the `api.constrains` of `_check_xml`,
# but the ORM writes and validates regular field (such as `active`) before inverse fields (such as `arch`),
# and therefore when writing `active` and `arch` at the same time, `_check_xml` is called twice,
# and the first time it tries to validate the view without the modification to the arch,
# which is problematic if the user corrects the view at the same time he re-enables it.
if vals.get('active'):
# Call `_validate_fields` instead of `_check_xml` to have the regular constrains error dialog
# instead of the traceback dialog.
self._validate_fields(['arch_db'])
return res
def unlink(self):
# if in uninstall mode and has children views, emulate an ondelete cascade
if self.env.context.get('_force_unlink', False) and self.inherit_children_ids:
self.inherit_children_ids.unlink()
return super(View, self).unlink()
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
self.ensure_one()
if self.key and default and 'key' not in default:
new_key = self.key + '_%s' % str(uuid.uuid4())[:6]
default = dict(default or {}, key=new_key)
return super(View, self).copy(default)
# default view selection
@api.model
def default_view(self, model, view_type):
""" Fetches the default view for the provided (model, view_type) pair:
primary view with the lowest priority.
:param str model:
:param int view_type:
:return: id of the default view of False if none found
:rtype: int
"""
domain = [('model', '=', model), ('type', '=', view_type), ('mode', '=', 'primary')]
return self.search(domain, limit=1).id
#------------------------------------------------------
# Inheritance mecanism
#------------------------------------------------------
@api.model
def _get_inheriting_views_domain(self):
""" Return a domain to filter the sub-views to inherit from. """
return [('active', '=', True)]
@api.model
def _get_filter_xmlid_query(self):
"""This method is meant to be overridden by other modules.
"""
return """SELECT res_id FROM ir_model_data
WHERE res_id IN %(res_ids)s AND model = 'ir.ui.view' AND module IN %(modules)s
"""
def _get_inheriting_views(self):
"""
Determine the views that inherit from the current recordset, and return
them as a recordset, ordered by priority then by id.
"""
self.check_access_rights('read')
domain = self._get_inheriting_views_domain()
e = expression(domain, self.env['ir.ui.view'])
from_clause, where_clause, where_params = e.query.get_sql()
assert from_clause == '"ir_ui_view"', f"Unexpected from clause: {from_clause}"
self._flush_search(domain, fields=['inherit_id', 'priority', 'model', 'mode'], order='id')
query = f"""
WITH RECURSIVE ir_ui_view_inherits AS (
SELECT id, inherit_id, priority, mode, model
FROM ir_ui_view
WHERE id IN %s AND ({where_clause})
UNION
SELECT ir_ui_view.id, ir_ui_view.inherit_id, ir_ui_view.priority,
ir_ui_view.mode, ir_ui_view.model
FROM ir_ui_view
INNER JOIN ir_ui_view_inherits parent ON parent.id = ir_ui_view.inherit_id
WHERE coalesce(ir_ui_view.model, '') = coalesce(parent.model, '')
AND ir_ui_view.mode = 'extension'
AND ({where_clause})
)
SELECT
v.id, v.inherit_id, v.mode,
ARRAY(SELECT r.group_id FROM ir_ui_view_group_rel r WHERE r.view_id=v.id)
FROM ir_ui_view_inherits v
ORDER BY v.priority, v.id
"""
# ORDER BY v.priority, v.id:
# 1/ sort by priority: abritrary value set by developers on some
# views to solve "dependency hell" problems and force a view
# to be combined earlier or later. e.g. all views created via
# studio have a priority=99 to be loaded last.
# 2/ sort by view id: the order the views were inserted in the
# database. e.g. base views are placed before stock ones.
self.env.cr.execute(query, [tuple(self.ids)] + where_params + where_params)
rows = self.env.cr.fetchall()
# filter out forbidden views
if any(row[3] for row in rows):
user_groups = set(self.env.user.groups_id.ids)
rows = [row for row in rows if not (row[3] and user_groups.isdisjoint(row[3]))]
views = self.browse(row[0] for row in rows)
# optimization: fill in cache of inherit_id and mode
self.env.cache.update(views, type(self).inherit_id, [row[1] for row in rows])
self.env.cache.update(views, type(self).mode, [row[2] for row in rows])
# During an upgrade, we can only use the views that have been
# fully upgraded already.
if self.pool._init and not self._context.get('load_all_views'):
views = views._filter_loaded_views()
return views
def _filter_loaded_views(self):
"""
During the module upgrade phase it may happen that a view is
present in the database but the fields it relies on are not
fully loaded yet. This method only considers views that belong
to modules whose code is already loaded. Custom views defined
directly in the database are loaded only after the module
initialization phase is completely finished.
"""
# check that all found ids have a corresponding xml_id in a loaded module
check_view_ids = self.env.context['check_view_ids']
ids_to_check = [vid for vid in self.ids if vid not in check_view_ids]
if not ids_to_check:
return self
loaded_modules = tuple(self.pool._init_modules) + (self._context.get('install_module'),)
query = self._get_filter_xmlid_query()
self.env.cr.execute(query, {'res_ids': tuple(ids_to_check), 'modules': loaded_modules})
valid_view_ids = {r[0] for r in self.env.cr.fetchall()} | set(check_view_ids)
return self.browse(vid for vid in self.ids if vid in valid_view_ids)
def _check_view_access(self):
""" Verify that a view is accessible by the current user based on the
groups attribute. Views with no groups are considered private.
"""
if self.inherit_id and self.mode != 'primary':
return self.inherit_id._check_view_access()
if self.groups_id & self.env.user.groups_id:
return True
if self.groups_id:
error = _(
"View '%(name)s' accessible only to groups %(groups)s ",
name=self.key,
groups=", ".join([g.name for g in self.groups_id]
))
else:
error = _("View '%(name)s' is private", name=self.key)
raise AccessError(error)
def _raise_view_error(self, message, node=None, *, from_exception=None, from_traceback=None):
""" Handle a view error by raising an exception.
:param str message: message to raise or log, augmented with contextual
view information
:param node: the lxml element where the error is located (if any)
:param BaseException from_exception:
when raising an exception, chain it to the provided one (default:
disable chaining)
:param types.TracebackType from_traceback:
when raising an exception, start with this traceback (default: start
at exception creation)
"""
err = ValueError(message).with_traceback(from_traceback)
err.context = {
'view': self,
'name': getattr(self, 'name', None),
'xmlid': self.env.context.get('install_xmlid') or self.xml_id,
'view.model': self.model,
'view.parent': self.inherit_id,
'file': self.env.context.get('install_filename'),
'line': node.sourceline if node is not None else 1,
}
raise err from from_exception
def _log_view_warning(self, message, node):
""" Handle a view issue by logging a warning.
:param str message: message to raise or log, augmented with contextual
view information
:param node: the lxml element where the error is located (if any)
"""
error_context = {
'view': self,
'name': getattr(self, 'name', None),
'xmlid': self.env.context.get('install_xmlid') or self.xml_id,
'view.model': self.model,
'view.parent': self.inherit_id,
'file': self.env.context.get('install_filename'),
'line': node.sourceline if node is not None else 1,
}
_logger.warning(
"%s\nView error context:\n%s",
message, pprint.pformat(error_context)
)
def locate_node(self, arch, spec):
""" Locate a node in a source (parent) architecture.
Given a complete source (parent) architecture (i.e. the field
`arch` in a view), and a 'spec' node (a node in an inheriting
view that specifies the location in the source view of what
should be changed), return (if it exists) the node in the
source view matching the specification.
:param arch: a parent architecture to modify
:param spec: a modifying node in an inheriting view
:return: a node in the source matching the spec
"""
return locate_node(arch, spec)
def inherit_branding(self, specs_tree):
for node in specs_tree.iterchildren(tag=etree.Element):
xpath = node.getroottree().getpath(node)
if node.tag == 'data' or node.tag == 'xpath' or node.get('position'):
self.inherit_branding(node)
elif node.get('t-field'):
node.set('data-oe-xpath', xpath)
self.inherit_branding(node)
else:
node.set('data-oe-id', str(self.id))
node.set('data-oe-xpath', xpath)
node.set('data-oe-model', 'ir.ui.view')
node.set('data-oe-field', 'arch')
return specs_tree
def _add_validation_flag(self, combined_arch, view=None, arch=None):
""" Add a validation flag on elements in ``combined_arch`` or ``arch``.
This is part of the partial validation of views.
:param Element combined_arch: the architecture to be modified by ``arch``
:param view: an optional view inheriting ``self``
:param Element arch: an optional modifying architecture from inheriting
view ``view``
"""
# validate_view_ids is either falsy (no validation), True (full
# validation) or a collection of ids (partial validation)
validate_view_ids = self.env.context.get('validate_view_ids')
if not validate_view_ids:
return
if validate_view_ids is True or self.id in validate_view_ids:
# optimization, flag the root node
combined_arch.set('__validate__', '1')
return
if view is None or view.id not in validate_view_ids:
return
for node in arch.xpath('//*[@position]'):
if node.get('position') in ('after', 'before', 'inside'):
# validate the elements being inserted, except the ones that
# specify a move, as in:
# <field name="foo" position="after">
# <field name="bar" position="move"/>
# </field>
for child in node.iterchildren(tag=etree.Element):
if not child.get('position'):
child.set('__validate__', '1')
if node.get('position') == 'replace':
# validate everything, since this impacts the whole arch
combined_arch.set('__validate__', '1')
break
if node.get('position') == 'attributes':
# validate the element being modified by adding
# attribute "__validate__" on it:
# <field name="foo" position="attributes">
# <attribute name="readonly">1</attribute>
# <attribute name="__validate__">1</attribute> <!-- add this -->
# </field>
node.append(E.attribute('1', name='__validate__'))
@api.model
def apply_inheritance_specs(self, source, specs_tree, pre_locate=lambda s: True):
""" Apply an inheriting view (a descendant of the base view)
Apply to a source architecture all the spec nodes (i.e. nodes
describing where and what changes to apply to some parent
architecture) given by an inheriting view.
:param Element source: a parent architecture to modify
:param Element specs_tree: a modifying architecture in an inheriting view
:param (optional) pre_locate: function that is execute before locating a node.
This function receives an arch as argument.
:return: a modified source where the specs are applied
:rtype: Element
"""
# Queue of specification nodes (i.e. nodes describing where and
# changes to apply to some parent architecture).
try:
source = apply_inheritance_specs(
source, specs_tree,
inherit_branding=self._context.get('inherit_branding'),
pre_locate=pre_locate,
)
except ValueError as e:
self._raise_view_error(str(e), specs_tree)
return source
def _combine(self, hierarchy: dict):
"""
Return self's arch combined with its inherited views archs.
:param hierarchy: mapping from parent views to their child views
:return: combined architecture
:rtype: Element
"""
self.ensure_one()
assert self.mode == 'primary'
# We achieve a pre-order depth-first hierarchy traversal where
# primary views (and their children) are traversed after all the
# extensions for the current primary view have been visited.
#
# https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search_of_binary_tree
#
# Example: hierarchy = {
# 1: [2, 3], # primary view
# 1* 2: [4, 5],
# / \ 3: [],
# 2 3 4: [6], # primary view
# / \ 5: [7, 8],
# 4* 5 6: [],
# / / \ 7: [],
# 6 7 8 8: [],
# }
#
# Tree traversal order (`view` and `queue` at the `while` stmt):
# 1 [2, 3]
# 2 [5, 3, 4]
# 5 [7, 8, 3, 4]
# 7 [8, 3, 4]
# 8 [3, 4]
# 3 [4]
# 4 [6]
# 6 []
combined_arch = etree.fromstring(self.arch)
if self.env.context.get('inherit_branding'):
combined_arch.attrib.update({
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(self.id),
'data-oe-field': 'arch',
})
self._add_validation_flag(combined_arch)
# The depth-first traversal is implemented with a double-ended queue.
# The queue is traversed from left to right, and after each view in the
# queue is processed, its children are pushed at the left of the queue,
# so that they are traversed in order. The queue is therefore mostly
# used as a stack. An exception is made for primary views, which are
# pushed at the other end of the queue, so that they are applied after
# all extensions have been applied.
queue = collections.deque(sorted(hierarchy[self], key=lambda v: v.mode))
while queue:
view = queue.popleft()
arch = etree.fromstring(view.arch)
if view.env.context.get('inherit_branding'):
view.inherit_branding(arch)
self._add_validation_flag(combined_arch, view, arch)
combined_arch = view.apply_inheritance_specs(combined_arch, arch)
for child_view in reversed(hierarchy[view]):
if child_view.mode == 'primary':
queue.append(child_view)
else:
queue.appendleft(child_view)
return combined_arch
def read_combined(self, fields=None):
"""
Utility function to get a view combined with its inherited views.
* Gets the top of the view tree if a sub-view is requested
* Applies all inherited archs on the root view
* Returns the view with all requested fields
.. note:: ``arch`` is always added to the fields list even if not
requested (similar to ``id``)
"""
warnings.warn("use get_combined_arch() instead", DeprecationWarning, stacklevel=2)
if fields:
fields = list({'arch', 'model'}.union(fields))
[result] = self.read(fields)
result['arch'] = self.get_combined_arch()
return result
def get_combined_arch(self):
""" Return the arch of ``self`` (as a string) combined with its inherited views. """
return etree.tostring(self._get_combined_arch(), encoding='unicode')
def _get_combined_arch(self):
""" Return the arch of ``self`` (as an etree) combined with its inherited views. """
root = self
view_ids = []
while True:
view_ids.append(root.id)
if not root.inherit_id:
break
root = root.inherit_id
views = self.browse(view_ids)
# Add inherited views to the list of loading forced views
# Otherwise, inherited views could not find elements created in
# their direct parents if that parent is defined in the same module
# introduce check_view_ids in context
if 'check_view_ids' not in views.env.context:
views = views.with_context(check_view_ids=[])
views.env.context['check_view_ids'].extend(view_ids)
# Map each node to its children nodes. Note that all children nodes are
# part of a single prefetch set, which is all views to combine.
tree_views = views._get_inheriting_views()
hierarchy = collections.defaultdict(list)
for view in tree_views:
hierarchy[view.inherit_id].append(view)
# optimization: make root part of the prefetch set, too
arch = root.with_prefetch(tree_views._prefetch_ids)._combine(hierarchy)
return arch
def _apply_groups(self, node, name_manager, node_info):
""" Apply group restrictions: elements with a 'groups' attribute should
be made invisible to people who are not members.
"""
if node.get('groups'):
can_see = self.user_has_groups(groups=node.get('groups'))
if not can_see:
node.set('invisible', '1')
node_info['modifiers']['invisible'] = True
if 'attrs' in node.attrib:
del node.attrib['attrs'] # avoid making field visible later
del node.attrib['groups']
#------------------------------------------------------
# Postprocessing: translation, groups and modifiers
#------------------------------------------------------
# TODO: remove group processing from ir_qweb
#------------------------------------------------------
def postprocess_and_fields(self, node, model=None):
""" Return an architecture and a description of all the fields.
The field description combines the result of fields_get() and
postprocess().
:param self: the view to postprocess
:param node: the architecture as an etree
:param model: the view's reference model name
:return: a tuple (arch, fields) where arch is the given node as a
string and fields is the description of all the fields.
"""
self and self.ensure_one() # self is at most one view
name_manager = self._postprocess_view(node, model or self.model)
arch = etree.tostring(node, encoding="unicode").replace('\t', '')
return arch, dict(name_manager.available_fields)
def _postprocess_view(self, node, model_name, editable=True):
""" Process the given architecture, modifying it in-place to add and
remove stuff.
:param self: the optional view to postprocess
:param node: the combined architecture as an etree
:param model_name: the view's reference model name
:param editable: whether the view is considered editable
:return: the processed architecture's NameManager
"""
root = node
if model_name not in self.env:
self._raise_view_error(_('Model not found: %(model)s', model=model_name), root)
model = self.env[model_name]
self._postprocess_on_change(root, model)
name_manager = NameManager(model)
# use a stack to recursively traverse the tree
stack = [(root, editable)]
while stack:
node, editable = stack.pop()
# compute default
tag = node.tag
parent = node.getparent()
node_info = {
'modifiers': {},
'editable': editable and self._editable_node(node, name_manager),
}
# tag-specific postprocessing
postprocessor = getattr(self, f"_postprocess_tag_{tag}", None)
if postprocessor is not None:
postprocessor(node, name_manager, node_info)
if node.getparent() is not parent:
# the node has been removed, stop processing here
continue
self._apply_groups(node, name_manager, node_info)
transfer_node_to_modifiers(node, node_info['modifiers'], self._context)
transfer_modifiers_to_node(node_info['modifiers'], node)
# if present, iterate on node_info['children'] instead of node
for child in reversed(node_info.get('children', node)):
stack.append((child, node_info['editable']))
name_manager.update_available_fields()
self._postprocess_access_rights(root, model.sudo(False))
return name_manager
def _postprocess_on_change(self, arch, model):
""" Add attribute on_change="1" on fields that are dependencies of
computed fields on the same view.
"""
# map each field object to its corresponding nodes in arch
field_nodes = collections.defaultdict(list)
def collect(node, model):
if node.tag == 'field':
field = model._fields.get(node.get('name'))
if field:
field_nodes[field].append(node)
if field.relational:
model = self.env[field.comodel_name]
for child in node:
collect(child, model)
collect(arch, model)
for field, nodes in field_nodes.items():
# if field should trigger an onchange, add on_change="1" on the
# nodes referring to field
model = self.env[field.model_name]
if model._has_onchange(field, field_nodes):
for node in nodes:
if not node.get('on_change'):
node.set('on_change', '1')
def _postprocess_access_rights(self, node, model):
""" Compute and set on node access rights based on view type. Specific
views can add additional specific rights like creating columns for
many2one-based grouping views. """
# testing ACL as real user
is_base_model = self.env.context.get('base_model_name', model._name) == model._name
if node.tag in ('kanban', 'tree', 'form', 'activity', 'calendar'):
for action, operation in (('create', 'create'), ('delete', 'unlink'), ('edit', 'write')):
if (not node.get(action) and
not model.check_access_rights(operation, raise_exception=False) or
not self._context.get(action, True) and is_base_model):
node.set(action, 'false')
if node.tag == 'kanban':
group_by_name = node.get('default_group_by')
group_by_field = model._fields.get(group_by_name)
if group_by_field and group_by_field.type == 'many2one':
group_by_model = model.env[group_by_field.comodel_name]
for action, operation in (('group_create', 'create'), ('group_delete', 'unlink'), ('group_edit', 'write')):
if (not node.get(action) and
not group_by_model.check_access_rights(operation, raise_exception=False) or
not self._context.get(action, True) and is_base_model):
node.set(action, 'false')
#------------------------------------------------------
# Specific node postprocessors
#------------------------------------------------------
def _postprocess_tag_calendar(self, node, name_manager, node_info):
for additional_field in ('date_start', 'date_delay', 'date_stop', 'color', 'all_day'):
if node.get(additional_field):
name_manager.has_field(node.get(additional_field).split('.', 1)[0])
for f in node:
if f.tag == 'filter':
name_manager.has_field(f.get('name'))
def _postprocess_tag_field(self, node, name_manager, node_info):
if node.get('name'):
attrs = {'id': node.get('id'), 'select': node.get('select')}
field = name_manager.model._fields.get(node.get('name'))
if field:
# apply groups (no tested)
if field.groups and not self.user_has_groups(groups=field.groups):
node.getparent().remove(node)
# no point processing view-level ``groups`` anymore, return
return
views = {}
for child in node:
if child.tag in ('form', 'tree', 'graph', 'kanban', 'calendar'):
node.remove(child)
sub_name_manager = self.with_context(
base_model_name=name_manager.model._name,
)._postprocess_view(
child, field.comodel_name, editable=node_info['editable'],
)
xarch = etree.tostring(child, encoding="unicode").replace('\t', '')
views[child.tag] = {
'arch': xarch,
'fields': dict(sub_name_manager.available_fields),
}
attrs['views'] = views
if field.type in ('many2one', 'many2many'):
comodel = self.env[field.comodel_name].sudo(False)
can_create = comodel.check_access_rights('create', raise_exception=False)
can_write = comodel.check_access_rights('write', raise_exception=False)
node.set('can_create', 'true' if can_create else 'false')
node.set('can_write', 'true' if can_write else 'false')
name_manager.has_field(node.get('name'), attrs)
field_info = name_manager.field_info.get(node.get('name'))
if field_info:
transfer_field_to_modifiers(field_info, node_info['modifiers'])
def _postprocess_tag_form(self, node, name_manager, node_info):
result = name_manager.model.view_header_get(False, node.tag)
if result:
node.set('string', result)
def _postprocess_tag_groupby(self, node, name_manager, node_info):
# groupby nodes should be considered as nested view because they may
# contain fields on the comodel
name = node.get('name')
field = name_manager.model._fields.get(name)
if not field or not field.comodel_name:
return
# move all children nodes into a new node <groupby>
groupby_node = E.groupby(*node)
# post-process the node as a nested view, and associate it to the field
sub_name_manager = self.with_context(
base_model_name=name_manager.model._name,
)._postprocess_view(groupby_node, field.comodel_name, editable=False)
xarch = etree.tostring(groupby_node, encoding="unicode").replace('\t', '')
name_manager.has_field(name, {'views': {
'groupby': {
'arch': xarch,
'fields': dict(sub_name_manager.available_fields),
}
}})
def _postprocess_tag_label(self, node, name_manager, node_info):
if node.get('for'):
field = name_manager.model._fields.get(node.get('for'))
if field and field.groups and not self.user_has_groups(groups=field.groups):
node.getparent().remove(node)
def _postprocess_tag_search(self, node, name_manager, node_info):
searchpanel = [child for child in node if child.tag == 'searchpanel']
if searchpanel:
self.with_context(
base_model_name=name_manager.model._name,
)._postprocess_view(
searchpanel[0], name_manager.model._name, editable=False,
)
node_info['children'] = [child for child in node if child.tag != 'searchpanel']
def _postprocess_tag_tree(self, node, name_manager, node_info):
# reuse form view post-processing
self._postprocess_tag_form(node, name_manager, node_info)
#-------------------------------------------------------------------
# view editability
#-------------------------------------------------------------------
def _editable_node(self, node, name_manager):
""" Return whether the given node must be considered editable. """
func = getattr(self, f"_editable_tag_{node.tag}", None)
if func is not None:
return func(node, name_manager)
# by default views are non-editable
return node.tag not in (item[0] for item in type(self).type.selection)
def _editable_tag_form(self, node, name_manager):
return True
def _editable_tag_tree(self, node, name_manager):
return node.get('editable')
def _editable_tag_field(self, node, name_manager):
field = name_manager.model._fields.get(node.get('name'))
return field is None or field.is_editable() and (
node.get('readonly') not in ('1', 'True')
or get_dict_asts(node.get('attrs') or "{}")
)
#-------------------------------------------------------------------
# view validation
#-------------------------------------------------------------------
def _validate_view(self, node, model_name, editable=True, full=False):
""" Validate the given architecture node, and return its corresponding
NameManager.
:param self: the view being validated
:param node: the combined architecture as an etree
:param model_name: the reference model name for the given architecture
:param editable: whether the view is considered editable
:param full: whether the whole view must be validated
:return: the combined architecture's NameManager
"""
self.ensure_one()
if model_name not in self.env:
self._raise_view_error(_('Model not found: %(model)s', model=model_name), node)
# fields_get() optimization: validation does not require translations
model = self.env[model_name].with_context(lang=None)
name_manager = NameManager(model)
# use a stack to recursively traverse the tree
stack = [(node, editable, full)]
while stack:
node, editable, validate = stack.pop()
# compute default
tag = node.tag
validate = validate or node.get('__validate__')
node_info = {
'editable': editable and self._editable_node(node, name_manager),
'validate': validate,
}
# tag-specific validation
validator = getattr(self, f"_validate_tag_{tag}", None)
if validator is not None:
validator(node, name_manager, node_info)
if validate:
self._validate_attrs(node, name_manager, node_info)
for child in reversed(node):
stack.append((child, node_info['editable'], validate))
name_manager.check(self)
return name_manager
#------------------------------------------------------
# Node validator
#------------------------------------------------------
def _validate_tag_form(self, node, name_manager, node_info):
pass
def _validate_tag_tree(self, node, name_manager, node_info):
# reuse form view validation
self._validate_tag_form(node, name_manager, node_info)
if not node_info['validate']:
return
allowed_tags = ('field', 'button', 'control', 'groupby', 'widget', 'header')
for child in node.iterchildren(tag=etree.Element):
if child.tag not in allowed_tags and not isinstance(child, etree._Comment):
msg = _(
'Tree child can only have one of %(tags)s tag (not %(wrong_tag)s)',
tags=', '.join(allowed_tags), wrong_tag=child.tag,
)
self._raise_view_error(msg, child)
def _validate_tag_graph(self, node, name_manager, node_info):
if not node_info['validate']:
return
for child in node.iterchildren(tag=etree.Element):
if child.tag != 'field' and not isinstance(child, etree._Comment):
msg = _('A <graph> can only contains <field> nodes, found a <%s>', child.tag)
self._raise_view_error(msg, child)
def _validate_tag_calendar(self, node, name_manager, node_info):
for additional_field in ('date_start', 'date_delay', 'date_stop', 'color', 'all_day'):
if node.get(additional_field):
name_manager.has_field(node.get(additional_field).split('.', 1)[0])
for f in node:
if f.tag == 'filter':
name_manager.has_field(f.get('name'))
def _validate_tag_search(self, node, name_manager, node_info):
if node_info['validate'] and not node.iterdescendants(tag="field"):
# the field of the search view may be within a group node, which is why we must check
# for all descendants containing a node with a field tag, if this is not the case
# then a search is not possible.
self._log_view_warning('Search tag requires at least one field element', node)
searchpanels = [child for child in node if child.tag == 'searchpanel']
if searchpanels:
if len(searchpanels) > 1:
self._raise_view_error(_('Search tag can only contain one search panel'), node)
node.remove(searchpanels[0])
self._validate_view(searchpanels[0], name_manager.model._name,
editable=False, full=node_info['validate'])
def _validate_tag_field(self, node, name_manager, node_info):
validate = node_info['validate']
name = node.get('name')
if not name:
self._raise_view_error(_("Field tag must have a \"name\" attribute defined"), node)
field = name_manager.model._fields.get(name)
if field:
if validate and field.relational:
domain = (
node.get('domain')
or node_info['editable'] and field._description_domain(self.env)
)
if isinstance(domain, str):
# dynamic domain: in [('foo', '=', bar)], field 'foo' must
# exist on the comodel and field 'bar' must be in the view
desc = (f'domain of <field name="{name}">' if node.get('domain')
else f"domain of field '{name}'")
fnames, vnames = self._get_domain_identifiers(node, domain, desc)
self._check_field_paths(node, fnames, field.comodel_name, f"{desc} ({domain})")
if vnames:
name_manager.must_have_fields(vnames, f"{desc} ({domain})")
elif validate and node.get('domain'):
msg = _(
'Domain on non-relational field "%(name)s" makes no sense (domain:%(domain)s)',
name=name, domain=node.get('domain'),
)
self._raise_view_error(msg, node)
for child in node:
if child.tag not in ('form', 'tree', 'graph', 'kanban', 'calendar'):
continue
node.remove(child)
sub_manager = self._validate_view(
child, field.comodel_name, editable=node_info['editable'], full=validate,
)
for fname, use in sub_manager.mandatory_parent_fields.items():
name_manager.must_have_field(fname, use)
elif validate and name not in name_manager.field_info:
msg = _(
'Field "%(field_name)s" does not exist in model "%(model_name)s"',
field_name=name, model_name=name_manager.model._name,
)
self._raise_view_error(msg, node)
name_manager.has_field(name, {'id': node.get('id'), 'select': node.get('select')})
if validate:
for attribute in ('invisible', 'readonly', 'required'):
val = node.get(attribute)
if val:
res = quick_eval(val, {'context': self._context})
if res not in (1, 0, True, False, None):
msg = _(
'Attribute %(attribute)s evaluation expects a boolean, got %(value)s',
attribute=attribute, value=val,
)
self._raise_view_error(msg, node)
def _validate_tag_filter(self, node, name_manager, node_info):
if not node_info['validate']:
return
domain = node.get('domain')
if domain:
name = node.get('name')
desc = f'domain of <filter name="{name}">' if name else 'domain of <filter>'
fnames, vnames = self._get_domain_identifiers(node, domain, desc)
self._check_field_paths(node, fnames, name_manager.model._name, f"{desc} ({domain})")
if vnames:
name_manager.must_have_fields(vnames, f"{desc} ({domain})")
def _validate_tag_button(self, node, name_manager, node_info):
if not node_info['validate']:
return
name = node.get('name')
special = node.get('special')
type_ = node.get('type')
if special:
if special not in ('cancel', 'save', 'add'):
self._raise_view_error(_("Invalid special '%(value)s' in button", value=special), node)
elif type_:
if type_ == 'edit': # list_renderer, used in kanban view
return
elif not name:
self._raise_view_error(_("Button must have a name"), node)
elif type_ == 'object':
func = getattr(type(name_manager.model), name, None)
if not func:
msg = _(
"%(action_name)s is not a valid action on %(model_name)s",
action_name=name, model_name=name_manager.model._name,
)
self._raise_view_error(msg, node)
try:
check_method_name(name)
except AccessError:
msg = _(
"%(method)s on %(model)s is private and cannot be called from a button",
method=name, model=name_manager.model._name,
)
self._raise_view_error(msg, node)
try:
inspect.signature(func).bind(self=name_manager.model)
except TypeError:
msg = "%s on %s has parameters and cannot be called from a button"
self._log_view_warning(msg % (name, name_manager.model._name), node)
elif type_ == 'action':
# logic mimics /web/action/load behaviour
action = False
try:
action_id = int(name)
except ValueError:
model, action_id = self.env['ir.model.data']._xmlid_to_res_model_res_id(name, raise_if_not_found=False)
if not action_id:
msg = _("Invalid xmlid %(xmlid)s for button of type action.", xmlid=name)
self._raise_view_error(msg, node)
if not issubclass(self.pool[model], self.pool['ir.actions.actions']):
msg = _(
"%(xmlid)s is of type %(xmlid_model)s, expected a subclass of ir.actions.actions",
xmlid=name, xmlid_model=model,
)
self._raise_view_error(msg, node)
action = self.env['ir.actions.actions'].browse(action_id).exists()
if not action:
msg = _(
"Action %(action_reference)s (id: %(action_id)s) does not exist for button of type action.",
action_reference=name, action_id=action_id,
)
self._raise_view_error(msg, node)
name_manager.has_action(name)
elif node.get('icon'):
description = 'A button with icon attribute (%s)' % node.get('icon')
self._validate_fa_class_accessibility(node, description)
def _validate_tag_groupby(self, node, name_manager, node_info):
# groupby nodes should be considered as nested view because they may
# contain fields on the comodel
name = node.get('name')
if not name:
return
field = name_manager.model._fields.get(name)
if field:
if node_info['validate']:
if field.type != 'many2one':
msg = _(
"Field '%(name)s' found in 'groupby' node can only be of type many2one, found %(type)s",
name=field.name, type=field.type,
)
self._raise_view_error(msg, node)
domain = node_info['editable'] and field._description_domain(self.env)
if isinstance(domain, str):
desc = f"domain of field '{name}'"
fnames, vnames = self._get_domain_identifiers(node, domain, desc)
self._check_field_paths(node, fnames, field.comodel_name, f"{desc} ({domain})")
if vnames:
name_manager.must_have_fields(vnames, f"{desc} ({domain})")
# move all children nodes into a new node <groupby>
groupby_node = E.groupby(*node)
# validate the node as a nested view
sub_manager = self._validate_view(
groupby_node, field.comodel_name, editable=False, full=node_info['validate'],
)
name_manager.has_field(name)
for fname, use in sub_manager.mandatory_parent_fields.items():
name_manager.must_have_field(fname, use)
elif node_info['validate']:
msg = _(
"Field '%(field)s' found in 'groupby' node does not exist in model %(model)s",
field=name, model=name_manager.model._name,
)
self._raise_view_error(msg, node)
def _validate_tag_searchpanel(self, node, name_manager, node_info):
if not node_info['validate']:
return
for child in node.iterchildren(tag=etree.Element):
if child.get('domain') and child.get('select') != 'multi':
msg = _('Searchpanel item with select multi cannot have a domain.')
self._raise_view_error(msg, child)
def _validate_tag_label(self, node, name_manager, node_info):
if not node_info['validate']:
return
# replace return not arch.xpath('//label[not(@for) and not(descendant::input)]')
for_ = node.get('for')
if not for_:
msg = _('Label tag must contain a "for". To match label style '
'without corresponding field or button, use \'class="o_form_label"\'.')
self._raise_view_error(msg, node)
else:
name_manager.must_have_name(for_, '<label for="...">')
def _validate_tag_page(self, node, name_manager, node_info):
if not node_info['validate']:
return
if node.getparent() is None or node.getparent().tag != 'notebook':
self._raise_view_error(_('Page direct ancestor must be notebook'), node)
def _validate_tag_img(self, node, name_manager, node_info):
if node_info['validate'] and not any(node.get(alt) for alt in att_names('alt')):
self._log_view_warning('<img> tag must contain an alt attribute', node)
def _validate_tag_a(self, node, name_manager, node_info):
#('calendar', 'form', 'graph', 'kanban', 'pivot', 'search', 'tree', 'activity')
if node_info['validate'] and any('btn' in node.get(cl, '') for cl in att_names('class')):
if node.get('role') != 'button':
msg = '"<a>" tag with "btn" class must have "button" role'
self._log_view_warning(msg, node)
def _validate_tag_ul(self, node, name_manager, node_info):
if node_info['validate']:
# was applied to all nodes, but in practice only used on div and ul
self._check_dropdown_menu(node)
def _validate_tag_div(self, node, name_manager, node_info):
if node_info['validate']:
self._check_dropdown_menu(node)
self._check_progress_bar(node)
#------------------------------------------------------
# Validation tools
#------------------------------------------------------
def _check_dropdown_menu(self, node):
#('calendar', 'form', 'graph', 'kanban', 'pivot', 'search', 'tree', 'activity')
if any('dropdown-menu' in node.get(cl, '') for cl in att_names('class')):
if node.get('role') != 'menu':
msg = 'dropdown-menu class must have menu role'
self._log_view_warning(msg, node)
def _check_progress_bar(self, node):
if any('o_progressbar' in node.get(cl, '') for cl in att_names('class')):
if node.get('role') != 'progressbar':
msg = 'o_progressbar class must have progressbar role'
self._log_view_warning(msg, node)
if not any(node.get(at) for at in att_names('aria-valuenow')):
msg = 'o_progressbar class must have aria-valuenow attribute'
self._log_view_warning(msg, node)
if not any(node.get(at) for at in att_names('aria-valuemin')):
msg = 'o_progressbar class must have aria-valuemin attribute'
self._log_view_warning(msg, node)
if not any(node.get(at) for at in att_names('aria-valuemax')):
msg = 'o_progressbar class must have aria-valuemaxattribute'
self._log_view_warning(msg, node)
def _validate_attrs(self, node, name_manager, node_info):
""" Generic validation of node attrs. """
for attr, expr in node.items():
if attr in ('class', 't-att-class', 't-attf-class'):
self._validate_classes(node, expr)
elif attr == 'attrs':
for key, val_ast in get_dict_asts(expr).items():
if isinstance(val_ast, ast.List):
# domains in attrs are used for readonly, invisible, ...
# and thus are only executed client side
fnames, vnames = self._get_domain_identifiers(node, val_ast, attr, expr)
name_manager.must_have_fields(fnames | vnames, f"attrs ({expr})")
else:
vnames = get_variable_names(val_ast)
if vnames:
name_manager.must_have_fields(vnames, f"attrs ({expr})")
elif attr == 'context':
for key, val_ast in get_dict_asts(expr).items():
if key == 'group_by': # only in context
if not isinstance(val_ast, ast.Str):
msg = _(
'"group_by" value must be a string %(attribute)s=%(value)r',
attribute=attr, value=expr,
)
self._raise_view_error(msg, node)
group_by = val_ast.s
fname = group_by.split(':')[0]
if fname not in name_manager.model._fields:
msg = _(
'Unknown field "%(field)s" in "group_by" value in %(attribute)s=%(value)r',
field=fname, attribute=attr, value=expr,
)
self._raise_view_error(msg, node)
else:
vnames = get_variable_names(val_ast)
if vnames:
name_manager.must_have_fields(vnames, f"context ({expr})")
elif attr == 'groups':
for group in expr.replace('!', '').split(','):
# further improvement: add all groups to name_manager in
# order to batch check them at the end
if not self.env['ir.model.data']._xmlid_to_res_id(group.strip(), raise_if_not_found=False):
msg = "The group %r defined in view does not exist!"
self._log_view_warning(msg % group, node)
elif attr in ('col', 'colspan'):
# col check is mainly there for the tag 'group', but previous
# check was generic in view form
if not expr.isdigit():
self._raise_view_error(
_('%(attribute)r value must be an integer (%(value)s)',
attribute=attr, value=expr),
node,
)
elif attr.startswith('decoration-'):
vnames = get_variable_names(expr)
if vnames:
name_manager.must_have_fields(vnames, f"{attr}={expr}")
elif attr == 'data-toggle' and expr == 'tab':
if node.get('role') != 'tab':
msg = 'tab link (data-toggle="tab") must have "tab" role'
self._log_view_warning(msg, node)
aria_control = node.get('aria-controls') or node.get('t-att-aria-controls')
if not aria_control and not node.get('t-attf-aria-controls'):
msg = 'tab link (data-toggle="tab") must have "aria_control" defined'
self._log_view_warning(msg, node)
if aria_control and '#' in aria_control:
msg = 'aria-controls in tablink cannot contains "#"'
self._log_view_warning(msg, node)
elif attr == "role" and expr in ('presentation', 'none'):
msg = ("A role cannot be `none` or `presentation`. "
"All your elements must be accessible with screen readers, describe it.")
self._log_view_warning(msg, node)
elif attr == 'group':
msg = "attribute 'group' is not valid. Did you mean 'groups'?"
self._log_view_warning(msg, node)
def _validate_classes(self, node, expr):
""" Validate the classes present on node. """
classes = set(expr.split(' '))
# Be careful: not always true if it is an expression
# example: <div t-attf-class="{{!selection_mode ? 'oe_kanban_color_' + kanban_getcolor(record.color.raw_value) : ''}} oe_kanban_card oe_kanban_global_click oe_applicant_kanban oe_semantic_html_override">
if 'modal' in classes and node.get('role') != 'dialog':
msg = '"modal" class should only be used with "dialog" role'
self._log_view_warning(msg, node)
if 'modal-header' in classes and node.tag != 'header':
msg = '"modal-header" class should only be used in "header" tag'
self._log_view_warning(msg, node)
if 'modal-body' in classes and node.tag != 'main':
msg = '"modal-body" class should only be used in "main" tag'
self._log_view_warning(msg, node)
if 'modal-footer' in classes and node.tag != 'footer':
msg = '"modal-footer" class should only be used in "footer" tag'
self._log_view_warning(msg, node)
if 'tab-pane' in classes and node.get('role') != 'tabpanel':
msg = '"tab-pane" class should only be used with "tabpanel" role'
self._log_view_warning(msg, node)
if 'nav-tabs' in classes and node.get('role') != 'tablist':
msg = 'A tab list with class nav-tabs must have role="tablist"'
self._log_view_warning(msg, node)
if any(klass.startswith('alert-') for klass in classes):
if (
node.get('role') not in ('alert', 'alertdialog', 'status')
and 'alert-link' not in classes
):
msg = ("An alert (class alert-*) must have an alert, alertdialog or "
"status role or an alert-link class. Please use alert and "
"alertdialog only for what expects to stop any activity to "
"be read immediately.")
self._log_view_warning(msg, node)
if any(klass.startswith('fa-') for klass in classes):
description = 'A <%s> with fa class (%s)' % (node.tag, expr)
self._validate_fa_class_accessibility(node, description)
if any(klass.startswith('btn') for klass in classes):
if node.tag in ('a', 'button', 'select'):
pass
elif node.tag == 'input' and node.get('type') in ('button', 'submit', 'reset'):
pass
elif any(klass in classes for klass in ('btn-group', 'btn-toolbar', 'btn-ship')):
pass
else:
msg = ("A simili button must be in tag a/button/select or tag `input` "
"with type button/submit/reset or have class in "
"btn-group/btn-toolbar/btn-ship")
self._log_view_warning(msg, node)
def _validate_fa_class_accessibility(self, node, description):
valid_aria_attrs = {
*att_names('title'), *att_names('aria-label'), *att_names('aria-labelledby'),
}
valid_t_attrs = {'t-value', 't-raw', 't-field', 't-esc'}
## Following or preceding text
if (node.tail or '').strip() or (node.getparent().text or '').strip():
# text<i class="fa-..."/> or <i class="fa-..."/>text or
return
## Following or preceding text in span
def has_text(elem):
if elem is None:
return False
if elem.tag == 'span' and elem.text:
return True
if elem.tag == 't' and (elem.get('t-esc') or elem.get('t-raw')):
return True
return False
if has_text(node.getnext()) or has_text(node.getprevious()):
return
## Aria label can be on ancestors
def has_title_or_aria_label(node):
return any(node.get(attr) for attr in valid_aria_attrs)
parent = node.getparent()
while parent is not None:
if has_title_or_aria_label(parent):
return
parent = parent.getparent()
## And we ignore all elements with describing in children
def contains_description(node, depth=0):
if depth > 2:
_logger.warning('excessive depth in fa')
if any(node.get(attr) for attr in valid_t_attrs):
return True
if has_title_or_aria_label(node):
return True
if node.tag in ('label', 'field'):
return True
if node.tag == 'button' and node.get('string'):
return True
if node.text: # not sure, does it match *[text()]
return True
return any(contains_description(child, depth+1) for child in node)
if contains_description(node):
return
msg = ('%s must have title in its tag, parents, descendants or have text')
self._log_view_warning(msg % description, node)
def _get_domain_identifiers(self, node, domain, use, expr=None):
try:
return get_domain_identifiers(domain)
except ValueError:
msg = _("Invalid domain format %(expr)s in %(use)s", expr=expr or domain, use=use)
self._raise_view_error(msg, node)
def _check_field_paths(self, node, field_paths, model_name, use):
""" Check whether the given field paths (dot-separated field names)
correspond to actual sequences of fields on the given model.
"""
for field_path in field_paths:
names = field_path.split('.')
Model = self.pool[model_name]
for index, name in enumerate(names):
if Model is None:
msg = _(
'Non-relational field %(field)r in path %(field_path)r in %(use)s)',
field=names[index - 1], field_path=field_path, use=use,
)
self._raise_view_error(msg, node)
try:
field = Model._fields[name]
except KeyError:
msg = _(
'Unknown field "%(model)s.%(field)s" in %(use)s)',
model=Model._name, field=name, use=use,
)
self._raise_view_error(msg, node)
if not field._description_searchable:
msg = _(
'Unsearchable field %(field)r in path %(field_path)r in %(use)s)',
field=name, field_path=field_path, use=use,
)
self._raise_view_error(msg, node)
Model = self.pool.get(field.comodel_name)
#------------------------------------------------------
# QWeb template views
#------------------------------------------------------
def _read_template_keys(self):
""" Return the list of context keys to use for caching ``_read_template``. """
return ['lang', 'inherit_branding', 'editable', 'translatable', 'edit_translations']
# apply ormcache_context decorator unless in dev mode...
@api.model
@tools.conditional(
'xml' not in config['dev_mode'],
tools.ormcache('frozenset(self.env.user.groups_id.ids)', 'view_id',
'tuple(self._context.get(k) for k in self._read_template_keys())'),
)
def _read_template(self, view_id):
arch_tree = self.browse(view_id)._get_combined_arch()
self.distribute_branding(arch_tree)
return etree.tostring(arch_tree, encoding='unicode')
@api.model
def get_view_id(self, template):
""" Return the view ID corresponding to ``template``, which may be a
view ID or an XML ID. Note that this method may be overridden for other
kinds of template values.
This method could return the ID of something that is not a view (when
using fallback to `_xmlid_to_res_id`).
"""
if isinstance(template, int):
return template
if '.' not in template:
raise ValueError('Invalid template id: %r' % template)
view = self.sudo().search([('key', '=', template)], limit=1)
return view and view.id or self.env['ir.model.data']._xmlid_to_res_id(template, raise_if_not_found=True)
def clear_cache(self):
""" Deprecated, use `clear_caches` instead. """
if 'xml' not in config['dev_mode']:
self.clear_caches()
def _contains_branded(self, node):
return node.tag == 't'\
or 't-raw' in node.attrib\
or 't-call' in node.attrib\
or any(self.is_node_branded(child) for child in node.iterdescendants())
def _pop_view_branding(self, element):
distributed_branding = dict(
(attribute, element.attrib.pop(attribute))
for attribute in MOVABLE_BRANDING
if element.get(attribute))
return distributed_branding
def distribute_branding(self, e, branding=None, parent_xpath='',
index_map=ConstantMapping(1)):
if e.get('t-ignore') or e.tag == 'head':
# remove any view branding possibly injected by inheritance
attrs = set(MOVABLE_BRANDING)
for descendant in e.iterdescendants(tag=etree.Element):
if not attrs.intersection(descendant.attrib):
continue
self._pop_view_branding(descendant)
# Remove the processing instructions indicating where nodes were
# removed (see apply_inheritance_specs)
for descendant in e.iterdescendants(tag=etree.ProcessingInstruction):
if descendant.target == 'apply-inheritance-specs-node-removal':
descendant.getparent().remove(descendant)
return
node_path = e.get('data-oe-xpath')
if node_path is None:
node_path = "%s/%s[%d]" % (parent_xpath, e.tag, index_map[e.tag])
if branding:
if e.get('t-field'):
e.set('data-oe-xpath', node_path)
elif not e.get('data-oe-model'):
e.attrib.update(branding)
e.set('data-oe-xpath', node_path)
if not e.get('data-oe-model'):
return
if {'t-esc', 't-raw', 't-out'}.intersection(e.attrib):
# nodes which fully generate their content and have no reason to
# be branded because they can not sensibly be edited
self._pop_view_branding(e)
elif self._contains_branded(e):
# if a branded element contains branded elements distribute own
# branding to children unless it's t-raw, then just remove branding
# on current element
distributed_branding = self._pop_view_branding(e)
if 't-raw' not in e.attrib:
# TODO: collections.Counter if remove p2.6 compat
# running index by tag type, for XPath query generation
indexes = collections.defaultdict(lambda: 0)
for child in e.iterchildren(etree.Element, etree.ProcessingInstruction):
if child.get('data-oe-xpath'):
# injected by view inheritance, skip otherwise
# generated xpath is incorrect
self.distribute_branding(child)
elif child.tag is etree.ProcessingInstruction:
# If a node is known to have been replaced during
# applying an inheritance, increment its index to
# compute an accurate xpath for subsequent nodes
if child.target == 'apply-inheritance-specs-node-removal':
indexes[child.text] += 1
e.remove(child)
else:
indexes[child.tag] += 1
self.distribute_branding(
child, distributed_branding,
parent_xpath=node_path, index_map=indexes)
def is_node_branded(self, node):
""" Finds out whether a node is branded or qweb-active (bears a
@data-oe-model or a @t-* *which is not t-field* as t-field does not
section out views)
:param node: an etree-compatible element to test
:type node: etree._Element
:rtype: boolean
"""
return any(
(attr in ('data-oe-model', 'groups') or (attr.startswith('t-')))
for attr in node.attrib
) or (
node.tag is etree.ProcessingInstruction
and node.target == 'apply-inheritance-specs-node-removal'
)
@tools.ormcache('self.id')
def get_view_xmlid(self):
domain = [('model', '=', 'ir.ui.view'), ('res_id', '=', self.id)]
xmlid = self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name'])[0]
return '%s.%s' % (xmlid['module'], xmlid['name'])
@api.model
def render_public_asset(self, template, values=None):
template = self.sudo().browse(self.get_view_id(template))
template._check_view_access()
return template.sudo()._render(values, engine="ir.qweb")
def _render_template(self, template, values=None, engine='ir.qweb'):
return self.browse(self.get_view_id(template))._render(values, engine)
def _render(self, values=None, engine='ir.qweb', minimal_qcontext=False):
assert isinstance(self.id, int)
qcontext = dict() if minimal_qcontext else self._prepare_qcontext()
qcontext.update(values or {})
return self.env[engine]._render(self.id, qcontext)
@api.model
def _prepare_qcontext(self):
""" Returns the qcontext : rendering context with website specific value (required
to render website layout template)
"""
qcontext = dict(
env=self.env,
user_id=self.env["res.users"].browse(self.env.user.id),
res_company=self.env.company.sudo(),
keep_query=keep_query,
request=request, # might be unbound if we're not in an httprequest context
debug=request.session.debug if request else '',
test_mode_enabled=bool(config['test_enable'] or config['test_file']),
json=json_scriptsafe,
quote_plus=werkzeug.urls.url_quote_plus,
time=safe_eval.time,
datetime=safe_eval.datetime,
relativedelta=relativedelta,
xmlid=self.sudo().key,
viewid=self.id,
to_text=pycompat.to_text,
image_data_uri=image_data_uri,
# specific 'math' functions to ease rounding in templates and lessen controller marshmalling
floor=math.floor,
ceil=math.ceil,
)
return qcontext
#------------------------------------------------------
# Misc
#------------------------------------------------------
def open_translations(self):
""" Open a view for editing the translations of field 'arch_db'. """
return self.env['ir.translation'].translate_fields('ir.ui.view', self.id, 'arch_db')
@api.model
def _validate_custom_views(self, model):
"""Validate architecture of custom views (= without xml id) for a given model.
This method is called at the end of registry update.
"""
query = """SELECT max(v.id)
FROM ir_ui_view v
LEFT JOIN ir_model_data md ON (md.model = 'ir.ui.view' AND md.res_id = v.id)
WHERE md.module IN (SELECT name FROM ir_module_module) IS NOT TRUE
AND v.model = %s
AND v.active = true
GROUP BY coalesce(v.inherit_id, v.id)"""
self._cr.execute(query, [model])
rec = self.browse(it[0] for it in self._cr.fetchall())
return rec.with_context({'load_all_views': True})._check_xml()
@api.model
def _validate_module_views(self, module):
""" Validate the architecture of all the views of a given module that
are impacted by view updates, but have not been checked yet.
"""
assert self.pool._init
# only validate the views that still exist...
prefix = module + '.'
prefix_len = len(prefix)
names = tuple(
xmlid[prefix_len:]
for xmlid in self.pool.loaded_xmlids
if xmlid.startswith(prefix)
)
if not names:
return
# retrieve the views with an XML id that has not been checked yet, i.e.,
# the views with noupdate=True on their xml id
query = """
SELECT v.id
FROM ir_ui_view v
JOIN ir_model_data md ON (md.model = 'ir.ui.view' AND md.res_id = v.id)
WHERE md.module = %s AND md.name IN %s AND md.noupdate
"""
self._cr.execute(query, (module, names))
views = self.browse([row[0] for row in self._cr.fetchall()])
for view in views:
view._check_xml()
def _create_all_specific_views(self, processed_modules):
"""To be overriden and have specific view behaviour on create"""
pass
def _get_specific_views(self):
""" Given a view, return a record set containing all the specific views
for that view's key.
"""
self.ensure_one()
# Only qweb views have a specific conterpart
if self.type != 'qweb':
return self.env['ir.ui.view']
# A specific view can have a xml_id if exported/imported but it will not be equals to it's key (only generic view will).
return self.with_context(active_test=False).search([('key', '=', self.key)]).filtered(lambda r: not r.xml_id == r.key)
def _load_records_write(self, values):
""" During module update, when updating a generic view, we should also
update its specific views (COW'd).
Note that we will only update unmodified fields. That will mimic the
noupdate behavior on views having an ir.model.data.
"""
if self.type == 'qweb':
for cow_view in self._get_specific_views():
authorized_vals = {}
for key in values:
if key != 'inherit_id' and cow_view[key] == self[key]:
authorized_vals[key] = values[key]
# if inherit_id update, replicate change on cow view but
# only if that cow view inherit_id wasn't manually changed
inherit_id = values.get('inherit_id')
if inherit_id and self.inherit_id.id != inherit_id and \
cow_view.inherit_id.key == self.inherit_id.key:
self._load_records_write_on_cow(cow_view, inherit_id, authorized_vals)
else:
cow_view.with_context(no_cow=True).write(authorized_vals)
super(View, self)._load_records_write(values)
def _load_records_write_on_cow(self, cow_view, inherit_id, values):
# for modules updated before `website`, we need to
# store the change to replay later on cow views
if not hasattr(self.pool, 'website_views_to_adapt'):
self.pool.website_views_to_adapt = []
self.pool.website_views_to_adapt.append((
cow_view.id,
inherit_id,
values,
))
class ResetViewArchWizard(models.TransientModel):
""" A wizard to compare and reset views architecture. """
_name = "reset.view.arch.wizard"
_description = "Reset View Architecture Wizard"
view_id = fields.Many2one('ir.ui.view', string='View')
view_name = fields.Char(related='view_id.name', string='View Name')
has_diff = fields.Boolean(compute='_compute_arch_diff')
arch_diff = fields.Html(string='Architecture Diff', readonly=True,
compute='_compute_arch_diff', sanitize_tags=False)
reset_mode = fields.Selection([
('soft', 'Restore previous version (soft reset).'),
('hard', 'Reset to file version (hard reset).'),
('other_view', 'Reset to another view.')],
string='Reset Mode', default='soft', required=True)
compare_view_id = fields.Many2one('ir.ui.view', string='Compare To View')
arch_to_compare = fields.Text('Arch To Compare To', compute='_compute_arch_diff')
@api.model
def default_get(self, fields):
view_ids = (self._context.get('active_model') == 'ir.ui.view' and
self._context.get('active_ids') or [])
if len(view_ids) > 2:
raise ValidationError(_("Can't compare more than two views."))
result = super().default_get(fields)
result['view_id'] = view_ids and view_ids[0]
if len(view_ids) == 2:
result['reset_mode'] = 'other_view'
result['compare_view_id'] = view_ids[1]
return result
@api.depends('reset_mode', 'view_id', 'compare_view_id')
def _compute_arch_diff(self):
""" Depending of `reset_mode`, return the differences between the
current view arch and either its previous arch, its initial arch or
another view arch.
"""
def get_table_name(view_id):
name = view_id.display_name
if view_id.key or view_id.xml_id:
span = '<span class="ml-1 font-weight-normal small">(%s)</span>'
name += span % (view_id.key or view_id.xml_id)
return name
for view in self:
diff_to = False
diff_to_name = False
if view.reset_mode == 'soft':
diff_to = view.view_id.arch_prev
diff_to_name = _("Previous Arch")
elif view.reset_mode == 'other_view':
diff_to = view.compare_view_id.with_context(lang=None).arch
diff_to_name = get_table_name(view.compare_view_id)
elif view.reset_mode == 'hard' and view.view_id.arch_fs:
diff_to = view.view_id.with_context(read_arch_from_file=True, lang=None).arch
diff_to_name = _("File Arch")
view.arch_to_compare = diff_to
if not diff_to:
view.arch_diff = False
view.has_diff = False
else:
view_arch = view.view_id.with_context(lang=None).arch
view.arch_diff = get_diff(
(view_arch, get_table_name(view.view_id) if view.reset_mode == 'other_view' else _("Current Arch")),
(diff_to, diff_to_name),
)
view.has_diff = view_arch != diff_to
def reset_view_button(self):
self.ensure_one()
if self.reset_mode == 'other_view':
self.view_id.write({'arch_db': self.arch_to_compare})
else:
self.view_id.reset_arch(self.reset_mode)
return {'type': 'ir.actions.act_window_close'}
class NameManager:
""" An object that manages all the named elements in a view. """
def __init__(self, model):
self.model = model
self.available_fields = collections.defaultdict(dict) # {field_name: field_info}
self.available_actions = set()
self.available_names = set()
self.mandatory_fields = dict() # {field_name: use}
self.mandatory_parent_fields = dict() # {field_name: use}
self.mandatory_names = dict() # {name: use}
@lazy_property
def field_info(self):
return self.model.fields_get()
def has_field(self, name, info=frozendict()):
self.available_fields[name].update(info)
self.available_names.add(info.get('id') or name)
def has_action(self, name):
self.available_actions.add(name)
def must_have_field(self, name, use):
if name.startswith('parent.'):
self.mandatory_parent_fields[name[7:]] = use
else:
self.mandatory_fields[name] = use
def must_have_fields(self, names, use):
for name in names:
self.must_have_field(name, use)
def must_have_name(self, name, use):
self.mandatory_names[name] = use
def check(self, view):
# context for translations below
context = view.env.context # pylint: disable=unused-variable
for name, use in self.mandatory_names.items():
if name not in self.available_actions and name not in self.available_names:
msg = _(
"Name or id %(name_or_id)r in %(use)s must be present in view but is missing.",
name_or_id=name, use=use,
)
view._raise_view_error(msg)
for name in self.available_fields:
if name not in self.model._fields and name not in self.field_info:
message = _("Field `%(name)s` does not exist", name=name)
view._raise_view_error(message)
for name, use in self.mandatory_fields.items():
if name == 'id': # always available
continue
if "." in name:
msg = _(
"Invalid composed field %(definition)s in %(use)s",
definition=name, use=use,
)
view._raise_view_error(msg)
info = self.available_fields.get(name)
if info is None:
msg = _(
"Field %(name)r used in %(use)s must be present in view but is missing.",
name=name, use=use,
)
view._raise_view_error(msg)
if info.get('select') == 'multi': # mainly for searchpanel, but can be a generic behaviour.
msg = _(
"Field %(name)r used in %(use)s is present in view but is in select multi.",
name=name, use=use,
)
view._raise_view_error(msg)
def update_available_fields(self):
for name, info in self.available_fields.items():
info.update(self.field_info.get(name, ()))
| 46.336094 | 106,295 |
18,499 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import os
from glob import glob
from logging import getLogger
from werkzeug import urls
import odoo
from odoo.tools import misc
from odoo import tools
from odoo import api, fields, http, models
from odoo.http import root
_logger = getLogger(__name__)
SCRIPT_EXTENSIONS = ('js',)
STYLE_EXTENSIONS = ('css', 'scss', 'sass', 'less')
TEMPLATE_EXTENSIONS = ('xml',)
DEFAULT_SEQUENCE = 16
# Directives are stored in variables for ease of use and syntax checks.
APPEND_DIRECTIVE = 'append'
PREPEND_DIRECTIVE = 'prepend'
AFTER_DIRECTIVE = 'after'
BEFORE_DIRECTIVE = 'before'
REMOVE_DIRECTIVE = 'remove'
REPLACE_DIRECTIVE = 'replace'
INCLUDE_DIRECTIVE = 'include'
# Those are the directives used with a 'target' argument/field.
DIRECTIVES_WITH_TARGET = [AFTER_DIRECTIVE, BEFORE_DIRECTIVE, REPLACE_DIRECTIVE]
WILDCARD_CHARACTERS = {'*', "?", "[", "]"}
def fs2web(path):
"""Converts a file system path to a web path"""
if os.path.sep == '/':
return path
return '/'.join(path.split(os.path.sep))
def can_aggregate(url):
parsed = urls.url_parse(url)
return not parsed.scheme and not parsed.netloc and not url.startswith('/web/content')
def is_wildcard_glob(path):
"""Determine whether a path is a wildcarded glob eg: "/web/file[14].*"
or a genuine single file path "/web/myfile.scss"""
return not WILDCARD_CHARACTERS.isdisjoint(path)
class IrAsset(models.Model):
"""This model contributes to two things:
1. It provides a function returning a list of all file paths declared
in a given list of addons (see _get_addon_paths);
2. It allows to create 'ir.asset' records to add additional directives
to certain bundles.
"""
_name = 'ir.asset'
_description = 'Asset'
_order = 'sequence, id'
@api.model_create_multi
def create(self, vals_list):
self.clear_caches()
return super().create(vals_list)
def write(self, values):
self.clear_caches()
return super().write(values)
def unlink(self):
self.clear_caches()
return super().unlink()
name = fields.Char(string='Name', required=True)
bundle = fields.Char(string='Bundle name', required=True)
directive = fields.Selection(string='Directive', selection=[
(APPEND_DIRECTIVE, 'Append'),
(PREPEND_DIRECTIVE, 'Prepend'),
(AFTER_DIRECTIVE, 'After'),
(BEFORE_DIRECTIVE, 'Before'),
(REMOVE_DIRECTIVE, 'Remove'),
(REPLACE_DIRECTIVE, 'Replace'),
(INCLUDE_DIRECTIVE, 'Include')], default=APPEND_DIRECTIVE)
path = fields.Char(string='Path (or glob pattern)', required=True)
target = fields.Char(string='Target')
active = fields.Boolean(string='active', default=True)
sequence = fields.Integer(string="Sequence", default=DEFAULT_SEQUENCE, required=True)
def _get_asset_paths(self, bundle, addons=None, css=False, js=False, xml=False):
"""
Fetches all asset file paths from a given list of addons matching a
certain bundle. The returned list is composed of tuples containing the
file path [1], the first addon calling it [0] and the bundle name.
Asset loading is performed as follows:
1. All 'ir.asset' records matching the given bundle and with a sequence
strictly less than 16 are applied.
3. The manifests of the given addons are checked for assets declaration
for the given bundle. If any, they are read sequentially and their
operations are applied to the current list.
4. After all manifests have been parsed, the remaining 'ir.asset'
records matching the bundle are also applied to the current list.
:param bundle: name of the bundle from which to fetch the file paths
:param addons: list of addon names as strings. The files returned will
only be contained in the given addons.
:param css: boolean: whether or not to include style files
:param js: boolean: whether or not to include script files
:param xml: boolean: whether or not to include template files
:returns: the list of tuples (path, addon, bundle)
"""
installed = self._get_installed_addons_list()
if addons is None:
addons = self._get_active_addons_list()
asset_paths = AssetPaths()
self._fill_asset_paths(bundle, addons, installed, css, js, xml, asset_paths, [])
return asset_paths.list
def _fill_asset_paths(self, bundle, addons, installed, css, js, xml, asset_paths, seen):
"""
Fills the given AssetPaths instance by applying the operations found in
the matching bundle of the given addons manifests.
See `_get_asset_paths` for more information.
:param bundle: name of the bundle from which to fetch the file paths
:param addons: list of addon names as strings
:param css: boolean: whether or not to include style files
:param js: boolean: whether or not to include script files
:param xml: boolean: whether or not to include template files
:param asset_paths: the AssetPath object to fill
:param seen: a list of bundles already checked to avoid circularity
"""
if bundle in seen:
raise Exception("Circular assets bundle declaration: %s" % " > ".join(seen + [bundle]))
if not root._loaded:
root.load_addons()
root._loaded = True
manifest_cache = http.addons_manifest
exts = []
if js:
exts += SCRIPT_EXTENSIONS
if css:
exts += STYLE_EXTENSIONS
if xml:
exts += TEMPLATE_EXTENSIONS
# this index is used for prepending: files are inserted at the beginning
# of the CURRENT bundle.
bundle_start_index = len(asset_paths.list)
def process_path(directive, target, path_def):
"""
This sub function is meant to take a directive and a set of
arguments and apply them to the current asset_paths list
accordingly.
It is nested inside `_get_asset_paths` since we need the current
list of addons, extensions, asset_paths and manifest_cache.
:param directive: string
:param target: string or None or False
:param path_def: string
"""
if directive == INCLUDE_DIRECTIVE:
# recursively call this function for each INCLUDE_DIRECTIVE directive.
self._fill_asset_paths(path_def, addons, installed, css, js, xml, asset_paths, seen + [bundle])
return
addon, paths = self._get_paths(path_def, installed, exts)
# retrieve target index when it applies
if directive in DIRECTIVES_WITH_TARGET:
_, target_paths = self._get_paths(target, installed, exts)
if not target_paths and target.rpartition('.')[2] not in exts:
# nothing to do: the extension of the target is wrong
return
target_to_index = len(target_paths) and target_paths[0] or target
target_index = asset_paths.index(target_to_index, addon, bundle)
if directive == APPEND_DIRECTIVE:
asset_paths.append(paths, addon, bundle)
elif directive == PREPEND_DIRECTIVE:
asset_paths.insert(paths, addon, bundle, bundle_start_index)
elif directive == AFTER_DIRECTIVE:
asset_paths.insert(paths, addon, bundle, target_index + 1)
elif directive == BEFORE_DIRECTIVE:
asset_paths.insert(paths, addon, bundle, target_index)
elif directive == REMOVE_DIRECTIVE:
asset_paths.remove(paths, addon, bundle)
elif directive == REPLACE_DIRECTIVE:
asset_paths.insert(paths, addon, bundle, target_index)
asset_paths.remove(target_paths, addon, bundle)
else:
# this should never happen
raise ValueError("Unexpected directive")
# 1. Process the first sequence of 'ir.asset' records
assets = self._get_related_assets([('bundle', '=', bundle)]).filtered('active')
for asset in assets.filtered(lambda a: a.sequence < DEFAULT_SEQUENCE):
process_path(asset.directive, asset.target, asset.path)
# 2. Process all addons' manifests.
for addon in self._topological_sort(tuple(addons)):
manifest = manifest_cache.get(addon)
if not manifest:
continue
manifest_assets = manifest.get('assets', {})
for command in manifest_assets.get(bundle, []):
directive, target, path_def = self._process_command(command)
process_path(directive, target, path_def)
# 3. Process the rest of 'ir.asset' records
for asset in assets.filtered(lambda a: a.sequence >= DEFAULT_SEQUENCE):
process_path(asset.directive, asset.target, asset.path)
def _get_related_assets(self, domain):
"""
Returns a set of assets matching the domain, regardless of their
active state. This method can be overridden to filter the results.
:param domain: search domain
:returns: ir.asset recordset
"""
return self.with_context(active_test=False).sudo().search(domain, order='sequence, id')
def _get_related_bundle(self, target_path_def, root_bundle):
"""
Returns the first bundle directly defining a glob matching the target
path. This is useful when generating an 'ir.asset' record to override
a specific asset and target the right bundle, i.e. the first one
defining the target path.
:param target_path_def: string: path to match.
:root_bundle: string: bundle from which to initiate the search.
:returns: the first matching bundle or None
"""
ext = target_path_def.split('.')[-1]
installed = self._get_installed_addons_list()
target_path = self._get_paths(target_path_def, installed)[1][0]
css = ext in STYLE_EXTENSIONS
js = ext in SCRIPT_EXTENSIONS
xml = ext in TEMPLATE_EXTENSIONS
asset_paths = self._get_asset_paths(root_bundle, css=css, js=js, xml=xml)
for path, _, bundle in asset_paths:
if path == target_path:
return bundle
return root_bundle
def _get_active_addons_list(self):
"""Can be overridden to filter the returned list of active modules."""
return self._get_installed_addons_list()
@api.model
@tools.ormcache('addons_tuple')
def _topological_sort(self, addons_tuple):
"""Returns a list of sorted modules name accord to the spec in ir.module.module
that is, application desc, sequence, name then topologically sorted"""
IrModule = self.env['ir.module.module']
def mapper(addon):
manif = http.addons_manifest.get(addon, {})
from_terp = IrModule.get_values_from_terp(manif)
from_terp['name'] = addon
from_terp['depends'] = manif.get('depends', ['base'])
return from_terp
manifs = map(mapper, addons_tuple)
def sort_key(manif):
return (not manif['application'], int(manif['sequence']), manif['name'])
manifs = sorted(manifs, key=sort_key)
return misc.topological_sort({manif['name']: manif['depends'] for manif in manifs})
@api.model
@tools.ormcache_context(keys='install_module')
def _get_installed_addons_list(self):
"""
Returns the list of all installed addons.
:returns: string[]: list of module names
"""
# Main source: the current registry list
# Second source of modules: server wide modules
# Third source: the currently loading module from the context (similar to ir_ui_view)
return self.env.registry._init_modules | set(odoo.conf.server_wide_modules or []) | set(self.env.context.get('install_module', []))
def _get_paths(self, path_def, installed, extensions=None):
"""
Returns a list of file paths matching a given glob (path_def) as well as
the addon targeted by the path definition. If no file matches that glob,
the path definition is returned as is. This is either because the path is
not correctly written or because it points to a URL.
:param path_def: the definition (glob) of file paths to match
:param installed: the list of installed addons
:param extensions: a list of extensions that found files must match
:returns: a tuple: the addon targeted by the path definition [0] and the
list of file paths matching the definition [1] (or the glob itself if
none). Note that these paths are filtered on the given `extensions`.
"""
paths = []
path_url = fs2web(path_def)
path_parts = [part for part in path_url.split('/') if part]
addon = path_parts[0]
addon_manifest = http.addons_manifest.get(addon)
safe_path = True
if addon_manifest:
if addon not in installed:
# Assert that the path is in the installed addons
raise Exception("Unallowed to fetch files from addon %s" % addon)
addons_path = os.path.join(addon_manifest['addons_path'], '')[:-1]
full_path = os.path.normpath(os.path.join(addons_path, *path_parts))
# first security layer: forbid escape from the current addon
# "/mymodule/../myothermodule" is forbidden
# the condition after the or is to further guarantee that we won't access
# a directory that happens to be named like an addon (web....)
if addon not in full_path or addons_path not in full_path:
addon = None
safe_path = False
else:
paths = [
path for path in sorted(glob(full_path, recursive=True))
]
# second security layer: do we have the right to access the files
# that are grabbed by the glob ?
# In particular we don't want to expose data in xmls of the module
def is_safe_path(path):
try:
misc.file_path(path, SCRIPT_EXTENSIONS + STYLE_EXTENSIONS + TEMPLATE_EXTENSIONS)
except (ValueError, FileNotFoundError):
return False
if path.rpartition('.')[2] in TEMPLATE_EXTENSIONS:
# normpath will strip the trailing /, which is why it has to be added afterwards
static_path = os.path.normpath("%s/static" % addon) + os.path.sep
# Forbid xml to leak
return static_path in path
return True
len_paths = len(paths)
paths = list(filter(is_safe_path, paths))
safe_path = safe_path and len_paths == len(paths)
# When fetching template file paths, we need the full paths since xml
# files are read from the file system. But web assets (scripts and
# stylesheets) must be loaded using relative paths, hence the trimming
# for non-xml file paths.
paths = [path if path.split('.')[-1] in TEMPLATE_EXTENSIONS else fs2web(path[len(addons_path):]) for path in paths]
else:
addon = None
if not paths and (not can_aggregate(path_url) or (safe_path and not is_wildcard_glob(path_url))):
# No file matching the path; the path_def could be a url.
paths = [path_url]
if not paths:
msg = f'IrAsset: the path "{path_def}" did not resolve to anything.'
if not safe_path:
msg += " It may be due to security reasons."
_logger.warning(msg)
# Paths are filtered on the extensions (if any).
return addon, [
path
for path in paths
if not extensions or path.split('.')[-1] in extensions
]
def _process_command(self, command):
"""Parses a given command to return its directive, target and path definition."""
if isinstance(command, str):
# Default directive: append
directive, target, path_def = APPEND_DIRECTIVE, None, command
elif command[0] in DIRECTIVES_WITH_TARGET:
directive, target, path_def = command
else:
directive, path_def = command
target = None
return directive, target, path_def
class AssetPaths:
""" A list of asset paths (path, addon, bundle) with efficient operations. """
def __init__(self):
self.list = []
self.memo = set()
def index(self, path, addon, bundle):
"""Returns the index of the given path in the current assets list."""
if path not in self.memo:
self._raise_not_found(path, bundle)
for index, asset in enumerate(self.list):
if asset[0] == path:
return index
def append(self, paths, addon, bundle):
"""Appends the given paths to the current list."""
for path in paths:
if path not in self.memo:
self.list.append((path, addon, bundle))
self.memo.add(path)
def insert(self, paths, addon, bundle, index):
"""Inserts the given paths to the current list at the given position."""
to_insert = []
for path in paths:
if path not in self.memo:
to_insert.append((path, addon, bundle))
self.memo.add(path)
self.list[index:index] = to_insert
def remove(self, paths_to_remove, addon, bundle):
"""Removes the given paths from the current list."""
paths = {path for path in paths_to_remove if path in self.memo}
if paths:
self.list[:] = [asset for asset in self.list if asset[0] not in paths]
self.memo.difference_update(paths)
return
if paths_to_remove:
self._raise_not_found(paths_to_remove, bundle)
def _raise_not_found(self, path, bundle):
raise ValueError("File(s) %s not found in bundle %s" % (path, bundle))
| 42.23516 | 18,499 |
14,677 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import io
import logging
import os
import re
from odoo import api, fields, models, tools, _, Command
from odoo.exceptions import ValidationError, UserError
from odoo.modules.module import get_resource_path
from random import randrange
from PIL import Image
_logger = logging.getLogger(__name__)
class Company(models.Model):
_name = "res.company"
_description = 'Companies'
_order = 'sequence, name'
def copy(self, default=None):
raise UserError(_('Duplicating a company is not allowed. Please create a new company instead.'))
def _get_logo(self):
return base64.b64encode(open(os.path.join(tools.config['root_path'], 'addons', 'base', 'static', 'img', 'res_company_logo.png'), 'rb') .read())
def _default_currency_id(self):
return self.env.user.company_id.currency_id
def _get_default_favicon(self, original=False):
img_path = get_resource_path('web', 'static/img/favicon.ico')
with tools.file_open(img_path, 'rb') as f:
if original:
return base64.b64encode(f.read())
# Modify the source image to add a colored bar on the bottom
# This could seem overkill to modify the pixels 1 by 1, but
# Pillow doesn't provide an easy way to do it, and this
# is acceptable for a 16x16 image.
color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))
original = Image.open(f)
new_image = Image.new('RGBA', original.size)
height = original.size[1]
width = original.size[0]
bar_size = 1
for y in range(height):
for x in range(width):
pixel = original.getpixel((x, y))
if height - bar_size <= y + 1 <= height:
new_image.putpixel((x, y), (color[0], color[1], color[2], 255))
else:
new_image.putpixel((x, y), (pixel[0], pixel[1], pixel[2], pixel[3]))
stream = io.BytesIO()
new_image.save(stream, format="ICO")
return base64.b64encode(stream.getvalue())
name = fields.Char(related='partner_id.name', string='Company Name', required=True, store=True, readonly=False)
sequence = fields.Integer(help='Used to order Companies in the company switcher', default=10)
parent_id = fields.Many2one('res.company', string='Parent Company', index=True)
child_ids = fields.One2many('res.company', 'parent_id', string='Child Companies')
partner_id = fields.Many2one('res.partner', string='Partner', required=True)
report_header = fields.Html(string='Company Tagline', help="Appears by default on the top right corner of your printed documents (report header).")
report_footer = fields.Html(string='Report Footer', translate=True, help="Footer text displayed at the bottom of all reports.")
company_details = fields.Html(string='Company Details', help="Header text displayed at the top of all reports.")
logo = fields.Binary(related='partner_id.image_1920', default=_get_logo, string="Company Logo", readonly=False)
# logo_web: do not store in attachments, since the image is retrieved in SQL for
# performance reasons (see addons/web/controllers/main.py, Binary.company_logo)
logo_web = fields.Binary(compute='_compute_logo_web', store=True, attachment=False)
currency_id = fields.Many2one('res.currency', string='Currency', required=True, default=lambda self: self._default_currency_id())
user_ids = fields.Many2many('res.users', 'res_company_users_rel', 'cid', 'user_id', string='Accepted Users')
street = fields.Char(compute='_compute_address', inverse='_inverse_street')
street2 = fields.Char(compute='_compute_address', inverse='_inverse_street2')
zip = fields.Char(compute='_compute_address', inverse='_inverse_zip')
city = fields.Char(compute='_compute_address', inverse='_inverse_city')
state_id = fields.Many2one(
'res.country.state', compute='_compute_address', inverse='_inverse_state',
string="Fed. State", domain="[('country_id', '=?', country_id)]"
)
bank_ids = fields.One2many(related='partner_id.bank_ids', readonly=False)
country_id = fields.Many2one('res.country', compute='_compute_address', inverse='_inverse_country', string="Country")
email = fields.Char(related='partner_id.email', store=True, readonly=False)
phone = fields.Char(related='partner_id.phone', store=True, readonly=False)
mobile = fields.Char(related='partner_id.mobile', store=True, readonly=False)
website = fields.Char(related='partner_id.website', readonly=False)
vat = fields.Char(related='partner_id.vat', string="Tax ID", readonly=False)
company_registry = fields.Char(compute='_compute_company_registry', store=True, readonly=False)
paperformat_id = fields.Many2one('report.paperformat', 'Paper format', default=lambda self: self.env.ref('base.paperformat_euro', raise_if_not_found=False))
external_report_layout_id = fields.Many2one('ir.ui.view', 'Document Template')
base_onboarding_company_state = fields.Selection([
('not_done', "Not done"), ('just_done', "Just done"), ('done', "Done")], string="State of the onboarding company step", default='not_done')
favicon = fields.Binary(string="Company Favicon", help="This field holds the image used to display a favicon for a given company.", default=_get_default_favicon)
font = fields.Selection([("Lato", "Lato"), ("Roboto", "Roboto"), ("Open_Sans", "Open Sans"), ("Montserrat", "Montserrat"), ("Oswald", "Oswald"), ("Raleway", "Raleway")], default="Lato")
primary_color = fields.Char()
secondary_color = fields.Char()
layout_background = fields.Selection([('Blank', 'Blank'), ('Geometric', 'Geometric'), ('Custom', 'Custom')], default="Blank", required=True)
layout_background_image = fields.Binary("Background Image")
_sql_constraints = [
('name_uniq', 'unique (name)', 'The company name must be unique !')
]
def init(self):
for company in self.search([('paperformat_id', '=', False)]):
paperformat_euro = self.env.ref('base.paperformat_euro', False)
if paperformat_euro:
company.write({'paperformat_id': paperformat_euro.id})
sup = super(Company, self)
if hasattr(sup, 'init'):
sup.init()
def _get_company_address_field_names(self):
""" Return a list of fields coming from the address partner to match
on company address fields. Fields are labeled same on both models. """
return ['street', 'street2', 'city', 'zip', 'state_id', 'country_id']
def _get_company_address_update(self, partner):
return dict((fname, partner[fname])
for fname in self._get_company_address_field_names())
def _compute_company_registry(self):
# exists to allow overrides
for company in self:
company.company_registry = company.company_registry
# TODO @api.depends(): currently now way to formulate the dependency on the
# partner's contact address
def _compute_address(self):
for company in self.filtered(lambda company: company.partner_id):
address_data = company.partner_id.sudo().address_get(adr_pref=['contact'])
if address_data['contact']:
partner = company.partner_id.browse(address_data['contact']).sudo()
company.update(company._get_company_address_update(partner))
def _inverse_street(self):
for company in self:
company.partner_id.street = company.street
def _inverse_street2(self):
for company in self:
company.partner_id.street2 = company.street2
def _inverse_zip(self):
for company in self:
company.partner_id.zip = company.zip
def _inverse_city(self):
for company in self:
company.partner_id.city = company.city
def _inverse_state(self):
for company in self:
company.partner_id.state_id = company.state_id
def _inverse_country(self):
for company in self:
company.partner_id.country_id = company.country_id
@api.depends('partner_id.image_1920')
def _compute_logo_web(self):
for company in self:
company.logo_web = tools.image_process(company.partner_id.image_1920, size=(180, 0))
@api.onchange('state_id')
def _onchange_state(self):
if self.state_id.country_id:
self.country_id = self.state_id.country_id
@api.onchange('country_id')
def _onchange_country_id(self):
if self.country_id:
self.currency_id = self.country_id.currency_id
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
context = dict(self.env.context)
newself = self
if context.pop('user_preference', None):
# We browse as superuser. Otherwise, the user would be able to
# select only the currently visible companies (according to rules,
# which are probably to allow to see the child companies) even if
# she belongs to some other companies.
companies = self.env.user.company_ids
args = (args or []) + [('id', 'in', companies.ids)]
newself = newself.sudo()
return super(Company, newself.with_context(context))._name_search(name=name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
@api.model
@api.returns('self', lambda value: value.id)
def _company_default_get(self, object=False, field=False):
""" Returns the user's company
- Deprecated
"""
_logger.warning("The method '_company_default_get' on res.company is deprecated and shouldn't be used anymore")
return self.env.company
# deprecated, use clear_caches() instead
def cache_restart(self):
self.clear_caches()
@api.model
def create(self, vals):
if not vals.get('favicon'):
vals['favicon'] = self._get_default_favicon()
if not vals.get('name') or vals.get('partner_id'):
self.clear_caches()
return super(Company, self).create(vals)
partner = self.env['res.partner'].create({
'name': vals['name'],
'is_company': True,
'image_1920': vals.get('logo'),
'email': vals.get('email'),
'phone': vals.get('phone'),
'website': vals.get('website'),
'vat': vals.get('vat'),
'country_id': vals.get('country_id'),
})
# compute stored fields, for example address dependent fields
partner.flush()
vals['partner_id'] = partner.id
self.clear_caches()
company = super(Company, self).create(vals)
# The write is made on the user to set it automatically in the multi company group.
self.env.user.write({'company_ids': [Command.link(company.id)]})
# Make sure that the selected currency is enabled
if vals.get('currency_id'):
currency = self.env['res.currency'].browse(vals['currency_id'])
if not currency.active:
currency.write({'active': True})
return company
def write(self, values):
self.clear_caches()
# Make sure that the selected currency is enabled
if values.get('currency_id'):
currency = self.env['res.currency'].browse(values['currency_id'])
if not currency.active:
currency.write({'active': True})
res = super(Company, self).write(values)
# invalidate company cache to recompute address based on updated partner
company_address_fields = self._get_company_address_field_names()
company_address_fields_upd = set(company_address_fields) & set(values.keys())
if company_address_fields_upd:
self.invalidate_cache(fnames=company_address_fields)
return res
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('You cannot create recursive companies.'))
def open_company_edit_report(self):
self.ensure_one()
return self.env['res.config.settings'].open_company()
def write_company_and_print_report(self):
context = self.env.context
report_name = context.get('default_report_name')
active_ids = context.get('active_ids')
active_model = context.get('active_model')
if report_name and active_ids and active_model:
docids = self.env[active_model].browse(active_ids)
return (self.env['ir.actions.report'].search([('report_name', '=', report_name)], limit=1)
.report_action(docids))
@api.model
def action_open_base_onboarding_company(self):
""" Onboarding step for company basic information. """
action = self.env["ir.actions.actions"]._for_xml_id("base.action_open_base_onboarding_company")
action['res_id'] = self.env.company.id
return action
def set_onboarding_step_done(self, step_name):
if self[step_name] == 'not_done':
self[step_name] = 'just_done'
def get_and_update_onbarding_state(self, onboarding_state, steps_states):
""" Needed to display onboarding animations only one time. """
old_values = {}
all_done = True
for step_state in steps_states:
old_values[step_state] = self[step_state]
if self[step_state] == 'just_done':
self[step_state] = 'done'
all_done = all_done and self[step_state] == 'done'
if all_done:
if self[onboarding_state] == 'not_done':
# string `onboarding_state` instead of variable name is not an error
old_values['onboarding_state'] = 'just_done'
else:
old_values['onboarding_state'] = 'done'
self[onboarding_state] = 'done'
return old_values
def action_save_onboarding_company_step(self):
if bool(self.street):
self.set_onboarding_step_done('base_onboarding_company_state')
@api.model
def _get_main_company(self):
try:
main_company = self.sudo().env.ref('base.main_company')
except ValueError:
main_company = self.env['res.company'].sudo().search([], limit=1, order="id")
return main_company
| 47.345161 | 14,677 |
22,503 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import math
import re
import time
from lxml import etree
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
try:
from num2words import num2words
except ImportError:
_logger.warning("The num2words python library is not installed, amount-to-text features won't be fully available.")
num2words = None
CURRENCY_DISPLAY_PATTERN = re.compile(r'(\w+)\s*(?:\((.*)\))?')
class Currency(models.Model):
_name = "res.currency"
_description = "Currency"
_order = 'active desc, name'
# Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code.
name = fields.Char(string='Currency', size=3, required=True, help="Currency Code (ISO 4217)")
full_name = fields.Char(string='Name')
symbol = fields.Char(help="Currency sign, to be used when printing amounts.", required=True)
rate = fields.Float(compute='_compute_current_rate', string='Current Rate', digits=0,
help='The rate of the currency to the currency of rate 1.')
inverse_rate = fields.Float(compute='_compute_current_rate', digits=0, readonly=True,
help='The currency of rate 1 to the rate of the currency.')
rate_string = fields.Char(compute='_compute_current_rate')
rate_ids = fields.One2many('res.currency.rate', 'currency_id', string='Rates')
rounding = fields.Float(string='Rounding Factor', digits=(12, 6), default=0.01,
help='Amounts in this currency are rounded off to the nearest multiple of the rounding factor.')
decimal_places = fields.Integer(compute='_compute_decimal_places', store=True,
help='Decimal places taken into account for operations on amounts in this currency. It is determined by the rounding factor.')
active = fields.Boolean(default=True)
position = fields.Selection([('after', 'After Amount'), ('before', 'Before Amount')], default='after',
string='Symbol Position', help="Determines where the currency symbol should be placed after or before the amount.")
date = fields.Date(compute='_compute_date')
currency_unit_label = fields.Char(string="Currency Unit", help="Currency Unit Name")
currency_subunit_label = fields.Char(string="Currency Subunit", help="Currency Subunit Name")
is_current_company_currency = fields.Boolean(compute='_compute_is_current_company_currency')
_sql_constraints = [
('unique_name', 'unique (name)', 'The currency code must be unique!'),
('rounding_gt_zero', 'CHECK (rounding>0)', 'The rounding factor must be greater than 0!')
]
@api.model_create_multi
def create(self, vals_list):
res = super().create(vals_list)
self._toggle_group_multi_currency()
return res
def unlink(self):
res = super().unlink()
self._toggle_group_multi_currency()
return res
def write(self, vals):
res = super().write(vals)
if 'active' not in vals:
return res
self._toggle_group_multi_currency()
return res
@api.model
def _toggle_group_multi_currency(self):
"""
Automatically activate group_multi_currency if there is more than 1 active currency; deactivate it otherwise
"""
active_currency_count = self.search_count([('active', '=', True)])
if active_currency_count > 1:
self._activate_group_multi_currency()
elif active_currency_count <= 1:
self._deactivate_group_multi_currency()
@api.model
def _activate_group_multi_currency(self):
group_user = self.env.ref('base.group_user', raise_if_not_found=False)
group_mc = self.env.ref('base.group_multi_currency', raise_if_not_found=False)
if group_user and group_mc:
group_user.sudo()._apply_group(group_mc)
@api.model
def _deactivate_group_multi_currency(self):
group_user = self.env.ref('base.group_user', raise_if_not_found=False)
group_mc = self.env.ref('base.group_multi_currency', raise_if_not_found=False)
if group_user and group_mc:
group_user.sudo()._remove_group(group_mc.sudo())
@api.constrains('active')
def _check_company_currency_stays_active(self):
if self._context.get('install_mode') or self._context.get('force_deactivate'):
# install_mode : At install, when this check is run, the "active" field of a currency added to a company will
# still be evaluated as False, despite it's automatically set at True when added to the company.
# force_deactivate : Allows deactivation of a currency in tests to enable non multi_currency behaviors
return
currencies = self.filtered(lambda c: not c.active)
if self.env['res.company'].search([('currency_id', 'in', currencies.ids)]):
raise UserError(_("This currency is set on a company and therefore cannot be deactivated."))
def _get_rates(self, company, date):
if not self.ids:
return {}
self.env['res.currency.rate'].flush(['rate', 'currency_id', 'company_id', 'name'])
query = """SELECT c.id,
COALESCE((SELECT r.rate FROM res_currency_rate r
WHERE r.currency_id = c.id AND r.name <= %s
AND (r.company_id IS NULL OR r.company_id = %s)
ORDER BY r.company_id, r.name DESC
LIMIT 1), 1.0) AS rate
FROM res_currency c
WHERE c.id IN %s"""
self._cr.execute(query, (date, company.id, tuple(self.ids)))
currency_rates = dict(self._cr.fetchall())
return currency_rates
@api.depends_context('company')
def _compute_is_current_company_currency(self):
for currency in self:
currency.is_current_company_currency = self.env.company.currency_id == currency
@api.depends('rate_ids.rate')
def _compute_current_rate(self):
date = self._context.get('date') or fields.Date.context_today(self)
company = self.env['res.company'].browse(self._context.get('company_id')) or self.env.company
# the subquery selects the last rate before 'date' for the given currency/company
currency_rates = self._get_rates(company, date)
last_rate = self.env['res.currency.rate']._get_last_rates_for_companies(company)
for currency in self:
currency.rate = (currency_rates.get(currency.id) or 1.0) / last_rate[company]
currency.inverse_rate = 1 / currency.rate
if currency != company.currency_id:
currency.rate_string = '1 %s = %.6f %s' % (company.currency_id.name, currency.rate, currency.name)
else:
currency.rate_string = ''
@api.depends('rounding')
def _compute_decimal_places(self):
for currency in self:
if 0 < currency.rounding < 1:
currency.decimal_places = int(math.ceil(math.log10(1/currency.rounding)))
else:
currency.decimal_places = 0
@api.depends('rate_ids.name')
def _compute_date(self):
for currency in self:
currency.date = currency.rate_ids[:1].name
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
results = super(Currency, self)._name_search(name, args, operator=operator, limit=limit, name_get_uid=name_get_uid)
if not results:
name_match = CURRENCY_DISPLAY_PATTERN.match(name)
if name_match:
results = super(Currency, self)._name_search(name_match.group(1), args, operator=operator, limit=limit, name_get_uid=name_get_uid)
return results
def name_get(self):
return [(currency.id, tools.ustr(currency.name)) for currency in self]
def amount_to_text(self, amount):
self.ensure_one()
def _num2words(number, lang):
try:
return num2words(number, lang=lang).title()
except NotImplementedError:
return num2words(number, lang='en').title()
if num2words is None:
logging.getLogger(__name__).warning("The library 'num2words' is missing, cannot render textual amounts.")
return ""
formatted = "%.{0}f".format(self.decimal_places) % amount
parts = formatted.partition('.')
integer_value = int(parts[0])
fractional_value = int(parts[2] or 0)
lang = tools.get_lang(self.env)
amount_words = tools.ustr('{amt_value} {amt_word}').format(
amt_value=_num2words(integer_value, lang=lang.iso_code),
amt_word=self.currency_unit_label,
)
if not self.is_zero(amount - integer_value):
amount_words += ' ' + _('and') + tools.ustr(' {amt_value} {amt_word}').format(
amt_value=_num2words(fractional_value, lang=lang.iso_code),
amt_word=self.currency_subunit_label,
)
return amount_words
def round(self, amount):
"""Return ``amount`` rounded according to ``self``'s rounding rules.
:param float amount: the amount to round
:return: rounded float
"""
self.ensure_one()
return tools.float_round(amount, precision_rounding=self.rounding)
def compare_amounts(self, amount1, amount2):
"""Compare ``amount1`` and ``amount2`` after rounding them according to the
given currency's precision..
An amount is considered lower/greater than another amount if their rounded
value is different. This is not the same as having a non-zero difference!
For example 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0.
However 0.006 and 0.002 are considered different (returns 1) because
they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
:param float amount1: first amount to compare
:param float amount2: second amount to compare
:return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than,
equal to, or greater than ``amount2``, according to
``currency``'s rounding.
With the new API, call it like: ``currency.compare_amounts(amount1, amount2)``.
"""
self.ensure_one()
return tools.float_compare(amount1, amount2, precision_rounding=self.rounding)
def is_zero(self, amount):
"""Returns true if ``amount`` is small enough to be treated as
zero according to current currency's rounding rules.
Warning: ``is_zero(amount1-amount2)`` is not always equivalent to
``compare_amounts(amount1,amount2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param float amount: amount to compare with currency's zero
With the new API, call it like: ``currency.is_zero(amount)``.
"""
self.ensure_one()
return tools.float_is_zero(amount, precision_rounding=self.rounding)
@api.model
def _get_conversion_rate(self, from_currency, to_currency, company, date):
currency_rates = (from_currency + to_currency)._get_rates(company, date)
res = currency_rates.get(to_currency.id) / currency_rates.get(from_currency.id)
return res
def _convert(self, from_amount, to_currency, company, date, round=True):
"""Returns the converted amount of ``from_amount``` from the currency
``self`` to the currency ``to_currency`` for the given ``date`` and
company.
:param company: The company from which we retrieve the convertion rate
:param date: The nearest date from which we retriev the conversion rate.
:param round: Round the result or not
"""
self, to_currency = self or to_currency, to_currency or self
assert self, "convert amount from unknown currency"
assert to_currency, "convert amount to unknown currency"
assert company, "convert amount from unknown company"
assert date, "convert amount from unknown date"
# apply conversion rate
if self == to_currency:
to_amount = from_amount
else:
to_amount = from_amount * self._get_conversion_rate(self, to_currency, company, date)
# apply rounding
return to_currency.round(to_amount) if round else to_amount
@api.model
def _compute(self, from_currency, to_currency, from_amount, round=True):
_logger.warning('The `_compute` method is deprecated. Use `_convert` instead')
date = self._context.get('date') or fields.Date.today()
company = self.env['res.company'].browse(self._context.get('company_id')) or self.env.company
return from_currency._convert(from_amount, to_currency, company, date)
def compute(self, from_amount, to_currency, round=True):
_logger.warning('The `compute` method is deprecated. Use `_convert` instead')
date = self._context.get('date') or fields.Date.today()
company = self.env['res.company'].browse(self._context.get('company_id')) or self.env.company
return self._convert(from_amount, to_currency, company, date)
def _select_companies_rates(self):
return """
SELECT
r.currency_id,
COALESCE(r.company_id, c.id) as company_id,
r.rate,
r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id AND
(r2.company_id is null or r2.company_id = c.id)
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
JOIN res_company c ON (r.company_id is null or r.company_id = c.id)
ORDER BY date_end
"""
@api.model
def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
result = super(Currency, self)._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type in ('tree', 'form'):
currency_name = (self.env['res.company'].browse(self._context.get('company_id')) or self.env.company).currency_id.name
doc = etree.XML(result['arch'])
for field in [['company_rate', _('Unit per %s', currency_name)],
['inverse_company_rate', _('%s per Unit', currency_name)]]:
node = doc.xpath("//tree//field[@name='%s']" % field[0])
if node:
node[0].set('string', field[1])
result['arch'] = etree.tostring(doc, encoding='unicode')
return result
class CurrencyRate(models.Model):
_name = "res.currency.rate"
_description = "Currency Rate"
_order = "name desc"
name = fields.Date(string='Date', required=True, index=True,
default=fields.Date.context_today)
rate = fields.Float(
digits=0,
group_operator="avg",
help='The rate of the currency to the currency of rate 1',
string='Technical Rate'
)
company_rate = fields.Float(
digits=0,
compute="_compute_company_rate",
inverse="_inverse_company_rate",
group_operator="avg",
help="The currency of rate 1 to the rate of the currency.",
)
inverse_company_rate = fields.Float(
digits=0,
compute="_compute_inverse_company_rate",
inverse="_inverse_inverse_company_rate",
group_operator="avg",
help="The rate of the currency to the currency of rate 1 ",
)
currency_id = fields.Many2one('res.currency', string='Currency', readonly=True, required=True, ondelete="cascade")
company_id = fields.Many2one('res.company', string='Company',
default=lambda self: self.env.company)
_sql_constraints = [
('unique_name_per_day', 'unique (name,currency_id,company_id)', 'Only one currency rate per day allowed!'),
('currency_rate_check', 'CHECK (rate>0)', 'The currency rate must be strictly positive.'),
]
def _sanitize_vals(self, vals):
if 'inverse_company_rate' in vals and ('company_rate' in vals or 'rate' in vals):
del vals['inverse_company_rate']
if 'company_rate' in vals and 'rate' in vals:
del vals['company_rate']
return vals
def write(self, vals):
return super().write(self._sanitize_vals(vals))
@api.model_create_multi
def create(self, vals_list):
return super().create([self._sanitize_vals(vals) for vals in vals_list])
def _get_latest_rate(self):
# Make sure 'name' is defined when creating a new rate.
if not self.name:
raise UserError(_("The name for the current rate is empty.\nPlease set it."))
return self.currency_id.rate_ids.sudo().filtered(lambda x: (
x.rate
and x.company_id == (self.company_id or self.env.company)
and x.name < (self.name or fields.Date.today())
)).sorted('name')[-1:]
def _get_last_rates_for_companies(self, companies):
return {
company: company.currency_id.rate_ids.sudo().filtered(lambda x: (
x.rate
and x.company_id == company or not x.company_id
)).sorted('name')[-1:].rate or 1
for company in companies
}
@api.depends('currency_id', 'company_id', 'name')
def _compute_rate(self):
for currency_rate in self:
currency_rate.rate = currency_rate.rate or currency_rate._get_latest_rate().rate or 1.0
@api.depends('rate', 'name', 'currency_id', 'company_id', 'currency_id.rate_ids.rate')
@api.depends_context('company')
def _compute_company_rate(self):
last_rate = self.env['res.currency.rate']._get_last_rates_for_companies(self.company_id | self.env.company)
for currency_rate in self:
company = currency_rate.company_id or self.env.company
currency_rate.company_rate = (currency_rate.rate or currency_rate._get_latest_rate().rate or 1.0) / last_rate[company]
@api.onchange('company_rate')
def _inverse_company_rate(self):
last_rate = self.env['res.currency.rate']._get_last_rates_for_companies(self.company_id | self.env.company)
for currency_rate in self:
company = currency_rate.company_id or self.env.company
currency_rate.rate = currency_rate.company_rate * last_rate[company]
@api.depends('company_rate')
def _compute_inverse_company_rate(self):
for currency_rate in self:
if not currency_rate.company_rate:
currency_rate.company_rate = 1.0
currency_rate.inverse_company_rate = 1.0 / currency_rate.company_rate
@api.onchange('inverse_company_rate')
def _inverse_inverse_company_rate(self):
for currency_rate in self:
if not currency_rate.inverse_company_rate:
currency_rate.inverse_company_rate = 1.0
currency_rate.company_rate = 1.0 / currency_rate.inverse_company_rate
@api.onchange('company_rate')
def _onchange_rate_warning(self):
latest_rate = self._get_latest_rate()
if latest_rate:
diff = (latest_rate.rate - self.rate) / latest_rate.rate
if abs(diff) > 0.2:
return {
'warning': {
'title': _("Warning for %s", self.currency_id.name),
'message': _(
"The new rate is quite far from the previous rate.\n"
"Incorrect currency rates may cause critical problems, make sure the rate is correct !"
)
}
}
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
if operator in ['=', '!=']:
try:
date_format = '%Y-%m-%d'
if self._context.get('lang'):
lang_id = self.env['res.lang']._search([('code', '=', self._context['lang'])], access_rights_uid=name_get_uid)
if lang_id:
date_format = self.browse(lang_id).date_format
name = time.strftime('%Y-%m-%d', time.strptime(name, date_format))
except ValueError:
try:
args.append(('rate', operator, float(name)))
except ValueError:
return []
name = ''
operator = 'ilike'
return super(CurrencyRate, self)._name_search(name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
@api.model
def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
result = super(CurrencyRate, self)._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type in ('tree'):
names = {
'company_currency_name': (self.env['res.company'].browse(self._context.get('company_id')) or self.env.company).currency_id.name,
'rate_currency_name': self.env['res.currency'].browse(self._context.get('active_id')).name or 'Unit',
}
doc = etree.XML(result['arch'])
for field in [['company_rate', _('%(rate_currency_name)s per %(company_currency_name)s', **names)],
['inverse_company_rate', _('%(company_currency_name)s per %(rate_currency_name)s', **names)]]:
node = doc.xpath("//tree//field[@name='%s']" % field[0])
if node:
node[0].set('string', field[1])
result['arch'] = etree.tostring(doc, encoding='unicode')
return result
| 47.27521 | 22,503 |
19,066 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
from __future__ import print_function
from textwrap import dedent
import copy
import io
import logging
import re
import markupsafe
import tokenize
from lxml import html, etree
from odoo import api, models, tools
from odoo.tools.safe_eval import check_values, assert_valid_codeobj, _BUILTINS, to_opcodes, _EXPR_OPCODES, _BLACKLIST
from odoo.tools.misc import get_lang
from odoo.http import request
from odoo.modules.module import get_resource_path
from odoo.tools.profiler import QwebTracker
from odoo.addons.base.models.qweb import QWeb
from odoo.addons.base.models.assetsbundle import AssetsBundle
from odoo.addons.base.models.ir_asset import can_aggregate, STYLE_EXTENSIONS, SCRIPT_EXTENSIONS
_logger = logging.getLogger(__name__)
_SAFE_QWEB_OPCODES = _EXPR_OPCODES.union(to_opcodes([
'MAKE_FUNCTION', 'CALL_FUNCTION', 'CALL_FUNCTION_KW', 'CALL_FUNCTION_EX',
'CALL_METHOD', 'LOAD_METHOD',
'GET_ITER', 'FOR_ITER', 'YIELD_VALUE',
'JUMP_FORWARD', 'JUMP_ABSOLUTE',
'JUMP_IF_FALSE_OR_POP', 'JUMP_IF_TRUE_OR_POP', 'POP_JUMP_IF_FALSE', 'POP_JUMP_IF_TRUE',
'LOAD_NAME', 'LOAD_ATTR',
'LOAD_FAST', 'STORE_FAST', 'UNPACK_SEQUENCE',
'STORE_SUBSCR',
'LOAD_GLOBAL',
])) - _BLACKLIST
class IrQWeb(models.AbstractModel, QWeb):
""" Base QWeb rendering engine
* to customize ``t-field`` rendering, subclass ``ir.qweb.field`` and
create new models called :samp:`ir.qweb.field.{widget}`
Beware that if you need extensions or alterations which could be
incompatible with other subsystems, you should create a local object
inheriting from ``ir.qweb`` and customize that.
"""
_name = 'ir.qweb'
_description = 'Qweb'
_available_objects = dict(_BUILTINS)
_empty_lines = re.compile(r'\n\s*\n')
@QwebTracker.wrap_render
@api.model
def _render(self, template, values=None, **options):
""" render(template, values, **options)
Render the template specified by the given name.
:param template: etree, xml_id, template name (see _get_template)
* Call the method ``load`` is not an etree.
:param dict values: template values to be used for rendering
:param options: used to compile the template (the dict available for the rendering is frozen)
* ``load`` (function) overrides the load method
:returns: bytes marked as markup-safe (decode to :class:`markupsafe.Markup`
instead of `str`)
:rtype: MarkupSafe
"""
compile_options = dict(self.env.context, dev_mode='qweb' in tools.config['dev_mode'])
compile_options.update(options)
result = super()._render(template, values=values, **compile_options)
if not values or not values.get('__keep_empty_lines'):
result = markupsafe.Markup(IrQWeb._empty_lines.sub('\n', result.strip()))
if 'data-pagebreak=' not in result:
return result
fragments = html.fragments_fromstring(result)
for fragment in fragments:
for row in fragment.iterfind('.//tr[@data-pagebreak]'):
table = next(row.iterancestors('table'))
newtable = html.Element('table', attrib=dict(table.attrib))
thead = table.find('thead')
if thead:
newtable.append(copy.deepcopy(thead))
# TODO: copy caption & tfoot as well?
# TODO: move rows in a tbody if row.getparent() is one?
pos = row.get('data-pagebreak')
assert pos in ('before', 'after')
for sibling in row.getparent().iterchildren('tr'):
if sibling is row:
if pos == 'after':
newtable.append(sibling)
break
newtable.append(sibling)
table.addprevious(newtable)
table.addprevious(html.Element('div', attrib={
'style': 'page-break-after: always'
}))
return markupsafe.Markup(''.join(html.tostring(f).decode() for f in fragments))
# assume cache will be invalidated by third party on write to ir.ui.view
def _get_template_cache_keys(self):
""" Return the list of context keys to use for caching ``_get_template``. """
return ['lang', 'inherit_branding', 'editable', 'translatable', 'edit_translations', 'website_id', 'profile', 'raise_on_code']
# apply ormcache_context decorator unless in dev mode...
@tools.conditional(
'xml' not in tools.config['dev_mode'],
tools.ormcache('id_or_xml_id', 'tuple(options.get(k) for k in self._get_template_cache_keys())'),
)
@QwebTracker.wrap_compile
def _compile(self, id_or_xml_id, options):
try:
id_or_xml_id = int(id_or_xml_id)
except:
pass
return super()._compile(id_or_xml_id, options=options)
def _load(self, name, options):
lang = options.get('lang', get_lang(self.env).code)
env = self.env
if lang != env.context.get('lang'):
env = env(context=dict(env.context, lang=lang))
view_id = self.env['ir.ui.view'].get_view_id(name)
template = env['ir.ui.view'].sudo()._read_template(view_id)
# QWeb's `_read_template` will check if one of the first children of
# what we send to it has a "t-name" attribute having `name` as value
# to consider it has found it. As it'll never be the case when working
# with view ids or children view or children primary views, force it here.
def is_child_view(view_name):
view_id = self.env['ir.ui.view'].get_view_id(view_name)
view = self.env['ir.ui.view'].sudo().browse(view_id)
return view.inherit_id is not None
if isinstance(name, int) or is_child_view(name):
view = etree.fromstring(template)
for node in view:
if node.get('t-name'):
node.set('t-name', str(name))
return (view, view_id)
else:
return (template, view_id)
# order
def _directives_eval_order(self):
directives = super()._directives_eval_order()
directives.insert(directives.index('foreach'), 'groups')
directives.insert(directives.index('call'), 'lang')
directives.insert(directives.index('field'), 'call-assets')
return directives
# compile
def _compile_node(self, el, options, indent):
if el.get("groups"):
el.set("t-groups", el.attrib.pop("groups"))
return super()._compile_node(el, options, indent)
# compile directives
@QwebTracker.wrap_compile_directive
def _compile_directive(self, el, options, directive, indent):
return super()._compile_directive(el, options, directive, indent)
def _compile_directive_groups(self, el, options, indent):
"""Compile `t-groups` expressions into a python code as a list of
strings.
The code will contain the condition `if self.user_has_groups(groups)`
part that wrap the rest of the compiled code of this element.
"""
groups = el.attrib.pop('t-groups')
code = self._flushText(options, indent)
code.append(self._indent(f"if self.user_has_groups({repr(groups)}):", indent))
code.extend(self._compile_directives(el, options, indent + 1) + self._flushText(options, indent + 1) or [self._indent('pass', indent + 1)])
return code
def _compile_directive_lang(self, el, options, indent):
el.attrib['t-options-lang'] = el.attrib.pop('t-lang')
return self._compile_node(el, options, indent)
def _compile_directive_call_assets(self, el, options, indent):
""" This special 't-call' tag can be used in order to aggregate/minify javascript and css assets"""
if len(el):
raise SyntaxError("t-call-assets cannot contain children nodes")
code = self._flushText(options, indent)
code.append(self._indent(dedent("""
t_call_assets_nodes = self._get_asset_nodes(%(xmlid)s, css=%(css)s, js=%(js)s, debug=values.get("debug"), async_load=%(async_load)s, defer_load=%(defer_load)s, lazy_load=%(lazy_load)s, media=%(media)s)
for index, (tagName, attrs, content) in enumerate(t_call_assets_nodes):
if index:
yield '\\n '
yield '<'
yield tagName
""").strip() % {
'xmlid': repr(el.get('t-call-assets')),
'css': self._compile_bool(el.get('t-css', True)),
'js': self._compile_bool(el.get('t-js', True)),
'async_load': self._compile_bool(el.get('async_load', False)),
'defer_load': self._compile_bool(el.get('defer_load', False)),
'lazy_load': self._compile_bool(el.get('lazy_load', False)),
'media': repr(el.get('media')) if el.get('media') else False,
}, indent))
code.extend(self._compile_attributes(options, indent + 1))
code.append(self._indent(dedent("""
if not content and tagName in self._void_elements:
yield '/>'
else:
yield '>'
if content:
yield content
yield '</'
yield tagName
yield '>'
""").strip(), indent + 1))
return code
# method called by computing code
def get_asset_bundle(self, bundle_name, files, env=None, css=True, js=True):
return AssetsBundle(bundle_name, files, env=env, css=css, js=js)
def _get_asset_nodes(self, bundle, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, media=None):
"""Generates asset nodes.
If debug=assets, the assets will be regenerated when a file which composes them has been modified.
Else, the assets will be generated only once and then stored in cache.
"""
if debug and 'assets' in debug:
return self._generate_asset_nodes(bundle, css, js, debug, async_load, defer_load, lazy_load, media)
else:
return self._generate_asset_nodes_cache(bundle, css, js, debug, async_load, defer_load, lazy_load, media)
@tools.conditional(
# in non-xml-debug mode we want assets to be cached forever, and the admin can force a cache clear
# by restarting the server after updating the source code (or using the "Clear server cache" in debug tools)
'xml' not in tools.config['dev_mode'],
tools.ormcache_context('bundle', 'css', 'js', 'debug', 'async_load', 'defer_load', 'lazy_load', keys=("website_id", "lang")),
)
def _generate_asset_nodes_cache(self, bundle, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, media=None):
return self._generate_asset_nodes(bundle, css, js, debug, async_load, defer_load, lazy_load, media)
def _generate_asset_nodes(self, bundle, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, media=None):
nodeAttrs = None
if css and media:
nodeAttrs = {
'media': media,
}
files, remains = self._get_asset_content(bundle, nodeAttrs, defer_load=defer_load, lazy_load=lazy_load)
asset = self.get_asset_bundle(bundle, files, env=self.env, css=css, js=js)
remains = [node for node in remains if (css and node[0] == 'link') or (js and node[0] == 'script')]
return remains + asset.to_node(css=css, js=js, debug=debug, async_load=async_load, defer_load=defer_load, lazy_load=lazy_load)
def _get_asset_link_urls(self, bundle):
asset_nodes = self._get_asset_nodes(bundle, js=False)
return [node[1]['href'] for node in asset_nodes if node[0] == 'link']
@tools.ormcache_context('bundle', 'nodeAttrs and nodeAttrs.get("media")', 'defer_load', 'lazy_load', keys=("website_id", "lang"))
def _get_asset_content(self, bundle, nodeAttrs=None, defer_load=False, lazy_load=False):
asset_paths = self.env['ir.asset']._get_asset_paths(bundle=bundle, css=True, js=True)
files = []
remains = []
for path, *_ in asset_paths:
ext = path.split('.')[-1]
is_js = ext in SCRIPT_EXTENSIONS
is_css = ext in STYLE_EXTENSIONS
if not is_js and not is_css:
continue
mimetype = 'text/javascript' if is_js else 'text/%s' % ext
if can_aggregate(path):
segments = [segment for segment in path.split('/') if segment]
files.append({
'atype': mimetype,
'url': path,
'filename': get_resource_path(*segments) if segments else None,
'content': '',
'media': nodeAttrs and nodeAttrs.get('media'),
})
else:
if is_js:
tag = 'script'
attributes = {
"type": mimetype,
}
attributes["data-src" if lazy_load else "src"] = path
if defer_load or lazy_load:
attributes["defer"] = "defer"
else:
tag = 'link'
attributes = {
"type": mimetype,
"rel": "stylesheet",
"href": path,
'media': nodeAttrs and nodeAttrs.get('media'),
}
remains.append((tag, attributes, ''))
return (files, remains)
def _get_field(self, record, field_name, expression, tagName, field_options, options, values):
field = record._fields[field_name]
# adds template compile options for rendering fields
field_options['template_options'] = options
# adds generic field options
field_options['tagName'] = tagName
field_options['expression'] = expression
field_options['type'] = field_options.get('widget', field.type)
inherit_branding = options.get('inherit_branding', options.get('inherit_branding_auto') and record.check_access_rights('write', False))
field_options['inherit_branding'] = inherit_branding
translate = options.get('edit_translations') and options.get('translatable') and field.translate
field_options['translate'] = translate
# field converter
model = 'ir.qweb.field.' + field_options['type']
converter = self.env[model] if model in self.env else self.env['ir.qweb.field']
# get content (the return values from fields are considered to be markup safe)
content = converter.record_to_html(record, field_name, field_options)
attributes = converter.attributes(record, field_name, field_options, values)
return (attributes, content, inherit_branding or translate)
def _get_widget(self, value, expression, tagName, field_options, options, values):
# adds template compile options for rendering fields
field_options['template_options'] = options
field_options['type'] = field_options['widget']
field_options['tagName'] = tagName
field_options['expression'] = expression
# field converter
model = 'ir.qweb.field.' + field_options['type']
converter = self.env[model] if model in self.env else self.env['ir.qweb.field']
# get content (the return values from widget are considered to be markup safe)
content = converter.value_to_html(value, field_options)
attributes = {}
attributes['data-oe-type'] = field_options['type']
attributes['data-oe-expression'] = field_options['expression']
return (attributes, content, None)
def _prepare_values(self, values, options):
""" Prepare the context that will be sent to the evaluated function.
:param values: template values to be used for rendering
:param options: frozen dict of compilation parameters.
"""
check_values(values)
values['true'] = True
values['false'] = False
if 'request' not in values:
values['request'] = request
return super()._prepare_values(values, options)
def _compile_expr(self, expr, raise_on_missing=False):
""" Compiles a purported Python expression to compiled code, verifies
that it's safe (according to safe_eval's semantics) and alter its
variable references to access values data instead
:param expr: string
"""
readable = io.BytesIO(expr.strip().encode('utf-8'))
try:
tokens = list(tokenize.tokenize(readable.readline))
except tokenize.TokenError:
raise ValueError(f"Cannot compile expression: {expr}")
namespace_expr = self._compile_expr_tokens(tokens, self._allowed_keyword + list(self._available_objects.keys()), raise_on_missing=raise_on_missing)
assert_valid_codeobj(_SAFE_QWEB_OPCODES, compile(namespace_expr, '<>', 'eval'), expr)
return namespace_expr
def render(template_name, values, load, **options):
""" Rendering of a qweb template without database and outside the registry.
(Widget, field, or asset rendering is not implemented.)
:param (string|int) template_name: template identifier
:param dict values: template values to be used for rendering
:param def load: function like `load(template_name, options)` which
returns an etree from the given template name (from initial rendering
or template `t-call`).
:param options: used to compile the template (the dict available for the
rendering is frozen)
:returns: bytes marked as markup-safe (decode to :class:`markupsafe.Markup`
instead of `str`)
:rtype: MarkupSafe
"""
class MockPool:
db_name = None
_Registry__cache = {}
class MockIrQWeb(IrQWeb):
_register = False # not visible in real registry
pool = MockPool()
def _get_field(self, *args):
raise NotImplementedError("Fields are not allowed in this rendering mode. Please use \"env['ir.qweb']._render\" method")
def _get_widget(self, *args):
raise NotImplementedError("Widgets are not allowed in this rendering mode. Please use \"env['ir.qweb']._render\" method")
def _get_asset_nodes(self, *args):
raise NotImplementedError("Assets are not allowed in this rendering mode. Please use \"env['ir.qweb']._render\" method")
class MockEnv(dict):
def __init__(self):
super().__init__()
self.context = {}
renderer = object.__new__(MockIrQWeb)
renderer.env = MockEnv()
return renderer._render(template_name, values, load=load, **options)
| 44.134259 | 19,066 |
48,720 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
from collections import defaultdict, OrderedDict
from decorator import decorator
from operator import attrgetter
import importlib
import io
import logging
import os
import pkg_resources
import shutil
import tempfile
import threading
import zipfile
import requests
import werkzeug.urls
from docutils import nodes
from docutils.core import publish_string
from docutils.transforms import Transform, writer_aux
from docutils.writers.html4css1 import Writer
import lxml.html
import psycopg2
import odoo
from odoo import api, fields, models, modules, tools, _
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
from odoo.exceptions import AccessDenied, UserError
from odoo.osv import expression
from odoo.tools.parse_version import parse_version
from odoo.tools.misc import topological_sort
from odoo.http import request
_logger = logging.getLogger(__name__)
ACTION_DICT = {
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'target': 'new',
'type': 'ir.actions.act_window',
}
def backup(path, raise_exception=True):
path = os.path.normpath(path)
if not os.path.exists(path):
if not raise_exception:
return None
raise OSError('path does not exists')
cnt = 1
while True:
bck = '%s~%d' % (path, cnt)
if not os.path.exists(bck):
shutil.move(path, bck)
return bck
cnt += 1
def assert_log_admin_access(method):
"""Decorator checking that the calling user is an administrator, and logging the call.
Raises an AccessDenied error if the user does not have administrator privileges, according
to `user._is_admin()`.
"""
def check_and_log(method, self, *args, **kwargs):
user = self.env.user
origin = request.httprequest.remote_addr if request else 'n/a'
log_data = (method.__name__, self.sudo().mapped('display_name'), user.login, user.id, origin)
if not self.env.is_admin():
_logger.warning('DENY access to module.%s on %s to user %s ID #%s via %s', *log_data)
raise AccessDenied()
_logger.info('ALLOW access to module.%s on %s to user %s #%s via %s', *log_data)
return method(self, *args, **kwargs)
return decorator(check_and_log, method)
class ModuleCategory(models.Model):
_name = "ir.module.category"
_description = "Application"
_order = 'name'
@api.depends('module_ids')
def _compute_module_nr(self):
cr = self._cr
cr.execute('SELECT category_id, COUNT(*) \
FROM ir_module_module \
WHERE category_id IN %(ids)s \
OR category_id IN (SELECT id \
FROM ir_module_category \
WHERE parent_id IN %(ids)s) \
GROUP BY category_id', {'ids': tuple(self.ids)}
)
result = dict(cr.fetchall())
for cat in self.filtered('id'):
cr.execute('SELECT id FROM ir_module_category WHERE parent_id=%s', (cat.id,))
cat.module_nr = sum([result.get(c, 0) for (c,) in cr.fetchall()], result.get(cat.id, 0))
name = fields.Char(string='Name', required=True, translate=True, index=True)
parent_id = fields.Many2one('ir.module.category', string='Parent Application', index=True)
child_ids = fields.One2many('ir.module.category', 'parent_id', string='Child Applications')
module_nr = fields.Integer(string='Number of Apps', compute='_compute_module_nr')
module_ids = fields.One2many('ir.module.module', 'category_id', string='Modules')
description = fields.Text(string='Description', translate=True)
sequence = fields.Integer(string='Sequence')
visible = fields.Boolean(string='Visible', default=True)
exclusive = fields.Boolean(string='Exclusive')
xml_id = fields.Char(string='External ID', compute='_compute_xml_id')
def _compute_xml_id(self):
xml_ids = defaultdict(list)
domain = [('model', '=', self._name), ('res_id', 'in', self.ids)]
for data in self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name', 'res_id']):
xml_ids[data['res_id']].append("%s.%s" % (data['module'], data['name']))
for cat in self:
cat.xml_id = xml_ids.get(cat.id, [''])[0]
class MyFilterMessages(Transform):
"""
Custom docutils transform to remove `system message` for a document and
generate warnings.
(The standard filter removes them based on some `report_level` passed in
the `settings_override` dictionary, but if we use it, we can't see them
and generate warnings.)
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
_logger.warning("docutils' system message present: %s", str(node))
node.parent.remove(node)
class MyWriter(Writer):
"""
Custom docutils html4ccs1 writer that doesn't add the warnings to the
output document.
"""
def get_transforms(self):
return [MyFilterMessages, writer_aux.Admonitions]
STATES = [
('uninstallable', 'Uninstallable'),
('uninstalled', 'Not Installed'),
('installed', 'Installed'),
('to upgrade', 'To be upgraded'),
('to remove', 'To be removed'),
('to install', 'To be installed'),
]
class Module(models.Model):
_name = "ir.module.module"
_rec_name = "shortdesc"
_description = "Module"
_order = 'application desc,sequence,name'
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(Module, self).fields_view_get(view_id, view_type, toolbar=toolbar, submenu=False)
if view_type == 'form' and res.get('toolbar',False):
install_id = self.env.ref('base.action_server_module_immediate_install').id
action = [rec for rec in res['toolbar']['action'] if rec.get('id', False) != install_id]
res['toolbar'] = {'action': action}
return res
@classmethod
def get_module_info(cls, name):
try:
return modules.load_information_from_description_file(name)
except Exception:
_logger.debug('Error when trying to fetch information for module %s', name, exc_info=True)
return {}
@api.depends('name', 'description')
def _get_desc(self):
for module in self:
if not module.name:
module.description_html = False
continue
module_path = modules.get_module_path(module.name, display_warning=False) # avoid to log warning for fake community module
if module_path:
path = modules.check_resource_path(module_path, 'static/description/index.html')
if module_path and path:
with tools.file_open(path, 'rb') as desc_file:
doc = desc_file.read()
html = lxml.html.document_fromstring(doc)
for element, attribute, link, pos in html.iterlinks():
if element.get('src') and not '//' in element.get('src') and not 'static/' in element.get('src'):
element.set('src', "/%s/static/description/%s" % (module.name, element.get('src')))
module.description_html = tools.html_sanitize(lxml.html.tostring(html))
else:
overrides = {
'embed_stylesheet': False,
'doctitle_xform': False,
'output_encoding': 'unicode',
'xml_declaration': False,
'file_insertion_enabled': False,
}
output = publish_string(source=module.description if not module.application and module.description else '', settings_overrides=overrides, writer=MyWriter())
module.description_html = tools.html_sanitize(output)
@api.depends('name')
def _get_latest_version(self):
default_version = modules.adapt_version('1.0')
for module in self:
module.installed_version = self.get_module_info(module.name).get('version', default_version)
@api.depends('name', 'state')
def _get_views(self):
IrModelData = self.env['ir.model.data'].with_context(active_test=True)
dmodels = ['ir.ui.view', 'ir.actions.report', 'ir.ui.menu']
for module in self:
# Skip uninstalled modules below, no data to find anyway.
if module.state not in ('installed', 'to upgrade', 'to remove'):
module.views_by_module = ""
module.reports_by_module = ""
module.menus_by_module = ""
continue
# then, search and group ir.model.data records
imd_models = defaultdict(list)
imd_domain = [('module', '=', module.name), ('model', 'in', tuple(dmodels))]
for data in IrModelData.sudo().search(imd_domain):
imd_models[data.model].append(data.res_id)
def browse(model):
# as this method is called before the module update, some xmlid
# may be invalid at this stage; explictly filter records before
# reading them
return self.env[model].browse(imd_models[model]).exists()
def format_view(v):
return '%s%s (%s)' % (v.inherit_id and '* INHERIT ' or '', v.name, v.type)
module.views_by_module = "\n".join(sorted(format_view(v) for v in browse('ir.ui.view')))
module.reports_by_module = "\n".join(sorted(r.name for r in browse('ir.actions.report')))
module.menus_by_module = "\n".join(sorted(m.complete_name for m in browse('ir.ui.menu')))
@api.depends('icon')
def _get_icon_image(self):
for module in self:
module.icon_image = ''
if module.icon:
path_parts = module.icon.split('/')
path = modules.get_module_resource(path_parts[1], *path_parts[2:])
elif module.id:
path = modules.module.get_module_icon_path(module)
else:
path = ''
if path:
with tools.file_open(path, 'rb') as image_file:
module.icon_image = base64.b64encode(image_file.read())
name = fields.Char('Technical Name', readonly=True, required=True, index=True)
category_id = fields.Many2one('ir.module.category', string='Category', readonly=True, index=True)
shortdesc = fields.Char('Module Name', readonly=True, translate=True)
summary = fields.Char('Summary', readonly=True, translate=True)
description = fields.Text('Description', readonly=True, translate=True)
description_html = fields.Html('Description HTML', compute='_get_desc')
author = fields.Char("Author", readonly=True)
maintainer = fields.Char('Maintainer', readonly=True)
contributors = fields.Text('Contributors', readonly=True)
website = fields.Char("Website", readonly=True)
# attention: Incorrect field names !!
# installed_version refers the latest version (the one on disk)
# latest_version refers the installed version (the one in database)
# published_version refers the version available on the repository
installed_version = fields.Char('Latest Version', compute='_get_latest_version')
latest_version = fields.Char('Installed Version', readonly=True)
published_version = fields.Char('Published Version', readonly=True)
url = fields.Char('URL', readonly=True)
sequence = fields.Integer('Sequence', default=100)
dependencies_id = fields.One2many('ir.module.module.dependency', 'module_id',
string='Dependencies', readonly=True)
exclusion_ids = fields.One2many('ir.module.module.exclusion', 'module_id',
string='Exclusions', readonly=True)
auto_install = fields.Boolean('Automatic Installation',
help='An auto-installable module is automatically installed by the '
'system when all its dependencies are satisfied. '
'If the module has no dependency, it is always installed.')
state = fields.Selection(STATES, string='Status', default='uninstallable', readonly=True, index=True)
demo = fields.Boolean('Demo Data', default=False, readonly=True)
license = fields.Selection([
('GPL-2', 'GPL Version 2'),
('GPL-2 or any later version', 'GPL-2 or later version'),
('GPL-3', 'GPL Version 3'),
('GPL-3 or any later version', 'GPL-3 or later version'),
('AGPL-3', 'Affero GPL-3'),
('LGPL-3', 'LGPL Version 3'),
('Other OSI approved licence', 'Other OSI Approved License'),
('OEEL-1', 'Odoo Enterprise Edition License v1.0'),
('OPL-1', 'Odoo Proprietary License v1.0'),
('Other proprietary', 'Other Proprietary')
], string='License', default='LGPL-3', readonly=True)
menus_by_module = fields.Text(string='Menus', compute='_get_views', store=True)
reports_by_module = fields.Text(string='Reports', compute='_get_views', store=True)
views_by_module = fields.Text(string='Views', compute='_get_views', store=True)
application = fields.Boolean('Application', readonly=True)
icon = fields.Char('Icon URL')
icon_image = fields.Binary(string='Icon', compute='_get_icon_image')
to_buy = fields.Boolean('Odoo Enterprise Module', default=False)
has_iap = fields.Boolean(compute='_compute_has_iap')
_sql_constraints = [
('name_uniq', 'UNIQUE (name)', 'The name of the module must be unique!'),
]
def _compute_has_iap(self):
for module in self:
module.has_iap = bool(module.id) and 'iap' in module.upstream_dependencies(exclude_states=('',)).mapped('name')
@api.ondelete(at_uninstall=False)
def _unlink_except_installed(self):
for module in self:
if module.state in ('installed', 'to upgrade', 'to remove', 'to install'):
raise UserError(_('You are trying to remove a module that is installed or will be installed.'))
def unlink(self):
self.clear_caches()
return super(Module, self).unlink()
def _get_modules_to_load_domain(self):
""" Domain to retrieve the modules that should be loaded by the registry. """
return [('state', '=', 'installed')]
@staticmethod
def _check_python_external_dependency(pydep):
try:
pkg_resources.get_distribution(pydep)
except pkg_resources.DistributionNotFound as e:
try:
importlib.import_module(pydep)
_logger.info("python external dependency on '%s' does not appear to be a valid PyPI package. Using a PyPI package name is recommended.", pydep)
except ImportError:
# backward compatibility attempt failed
_logger.warning("DistributionNotFound: %s", e)
raise Exception('Python library not installed: %s' % (pydep,))
except pkg_resources.VersionConflict as e:
_logger.warning("VersionConflict: %s", e)
raise Exception('Python library version conflict: %s' % (pydep,))
except Exception as e:
_logger.warning("get_distribution(%s) failed: %s", pydep, e)
raise Exception('Error finding python library %s' % (pydep,))
@staticmethod
def _check_external_dependencies(terp):
depends = terp.get('external_dependencies')
if not depends:
return
for pydep in depends.get('python', []):
Module._check_python_external_dependency(pydep)
for binary in depends.get('bin', []):
try:
tools.find_in_path(binary)
except IOError:
raise Exception('Unable to find %r in path' % (binary,))
@classmethod
def check_external_dependencies(cls, module_name, newstate='to install'):
terp = cls.get_module_info(module_name)
try:
cls._check_external_dependencies(terp)
except Exception as e:
if newstate == 'to install':
msg = _('Unable to install module "%s" because an external dependency is not met: %s')
elif newstate == 'to upgrade':
msg = _('Unable to upgrade module "%s" because an external dependency is not met: %s')
else:
msg = _('Unable to process module "%s" because an external dependency is not met: %s')
raise UserError(msg % (module_name, e.args[0]))
def _state_update(self, newstate, states_to_update, level=100):
if level < 1:
raise UserError(_('Recursion error in modules dependencies !'))
# whether some modules are installed with demo data
demo = False
for module in self:
if module.state not in states_to_update:
demo = demo or module.demo
continue
# determine dependency modules to update/others
update_mods, ready_mods = self.browse(), self.browse()
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise UserError(_("You try to install module '%s' that depends on module '%s'.\nBut the latter module is not available in your system.") % (module.name, dep.name,))
if dep.depend_id.state == newstate:
ready_mods += dep.depend_id
else:
update_mods += dep.depend_id
# update dependency modules that require it, and determine demo for module
update_demo = update_mods._state_update(newstate, states_to_update, level=level-1)
module_demo = module.demo or update_demo or any(mod.demo for mod in ready_mods)
demo = demo or module_demo
if module.state in states_to_update:
# check dependencies and update module itself
self.check_external_dependencies(module.name, newstate)
module.write({'state': newstate, 'demo': module_demo})
return demo
@assert_log_admin_access
def button_install(self):
# domain to select auto-installable (but not yet installed) modules
auto_domain = [('state', '=', 'uninstalled'), ('auto_install', '=', True)]
# determine whether an auto-install module must be installed:
# - all its dependencies are installed or to be installed,
# - at least one dependency is 'to install'
install_states = frozenset(('installed', 'to install', 'to upgrade'))
def must_install(module):
states = {dep.state for dep in module.dependencies_id if dep.auto_install_required}
return states <= install_states and 'to install' in states
modules = self
while modules:
# Mark the given modules and their dependencies to be installed.
modules._state_update('to install', ['uninstalled'])
# Determine which auto-installable modules must be installed.
modules = self.search(auto_domain).filtered(must_install)
# the modules that are installed/to install/to upgrade
install_mods = self.search([('state', 'in', list(install_states))])
# check individual exclusions
install_names = {module.name for module in install_mods}
for module in install_mods:
for exclusion in module.exclusion_ids:
if exclusion.name in install_names:
msg = _('Modules "%s" and "%s" are incompatible.')
raise UserError(msg % (module.shortdesc, exclusion.exclusion_id.shortdesc))
# check category exclusions
def closure(module):
todo = result = module
while todo:
result |= todo
todo = todo.dependencies_id.depend_id
return result
exclusives = self.env['ir.module.category'].search([('exclusive', '=', True)])
for category in exclusives:
# retrieve installed modules in category and sub-categories
categories = category.search([('id', 'child_of', category.ids)])
modules = install_mods.filtered(lambda mod: mod.category_id in categories)
# the installation is valid if all installed modules in categories
# belong to the transitive dependencies of one of them
if modules and not any(modules <= closure(module) for module in modules):
msg = _('You are trying to install incompatible modules in category "%s":')
labels = dict(self.fields_get(['state'])['state']['selection'])
raise UserError("\n".join([msg % category.name] + [
"- %s (%s)" % (module.shortdesc, labels[module.state])
for module in modules
]))
return dict(ACTION_DICT, name=_('Install'))
@assert_log_admin_access
def button_immediate_install(self):
""" Installs the selected module(s) immediately and fully,
returns the next res.config action to execute
:returns: next res.config item to execute
:rtype: dict[str, object]
"""
_logger.info('User #%d triggered module installation', self.env.uid)
# We use here the request object (which is thread-local) as a kind of
# "global" env because the env is not usable in the following use case.
# When installing a Chart of Account, I would like to send the
# allowed companies to configure it on the correct company.
# Otherwise, the SUPERUSER won't be aware of that and will try to
# configure the CoA on his own company, which makes no sense.
if request:
request.allowed_company_ids = self.env.companies.ids
return self._button_immediate_function(type(self).button_install)
@assert_log_admin_access
def button_install_cancel(self):
self.write({'state': 'uninstalled', 'demo': False})
return True
@assert_log_admin_access
def module_uninstall(self):
""" Perform the various steps required to uninstall a module completely
including the deletion of all database structures created by the module:
tables, columns, constraints, etc.
"""
modules_to_remove = self.mapped('name')
self.env['ir.model.data']._module_data_uninstall(modules_to_remove)
# we deactivate prefetching to not try to read a column that has been deleted
self.with_context(prefetch_fields=False).write({'state': 'uninstalled', 'latest_version': False})
return True
def _remove_copied_views(self):
""" Remove the copies of the views installed by the modules in `self`.
Those copies do not have an external id so they will not be cleaned by
`_module_data_uninstall`. This is why we rely on `key` instead.
It is important to remove these copies because using them will crash if
they rely on data that don't exist anymore if the module is removed.
"""
domain = expression.OR([[('key', '=like', m.name + '.%')] for m in self])
orphans = self.env['ir.ui.view'].with_context(**{'active_test': False, MODULE_UNINSTALL_FLAG: True}).search(domain)
orphans.unlink()
@api.returns('self')
def downstream_dependencies(self, known_deps=None,
exclude_states=('uninstalled', 'uninstallable', 'to remove')):
""" Return the modules that directly or indirectly depend on the modules
in `self`, and that satisfy the `exclude_states` filter.
"""
if not self:
return self
known_deps = known_deps or self.browse()
query = """ SELECT DISTINCT m.id
FROM ir_module_module_dependency d
JOIN ir_module_module m ON (d.module_id=m.id)
WHERE
d.name IN (SELECT name from ir_module_module where id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s """
self._cr.execute(query, (tuple(self.ids), tuple(exclude_states), tuple(known_deps.ids or self.ids)))
new_deps = self.browse([row[0] for row in self._cr.fetchall()])
missing_mods = new_deps - known_deps
known_deps |= new_deps
if missing_mods:
known_deps |= missing_mods.downstream_dependencies(known_deps, exclude_states)
return known_deps
@api.returns('self')
def upstream_dependencies(self, known_deps=None,
exclude_states=('installed', 'uninstallable', 'to remove')):
""" Return the dependency tree of modules of the modules in `self`, and
that satisfy the `exclude_states` filter.
"""
if not self:
return self
known_deps = known_deps or self.browse()
query = """ SELECT DISTINCT m.id
FROM ir_module_module_dependency d
JOIN ir_module_module m ON (d.module_id=m.id)
WHERE
m.name IN (SELECT name from ir_module_module_dependency where module_id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s """
self._cr.execute(query, (tuple(self.ids), tuple(exclude_states), tuple(known_deps.ids or self.ids)))
new_deps = self.browse([row[0] for row in self._cr.fetchall()])
missing_mods = new_deps - known_deps
known_deps |= new_deps
if missing_mods:
known_deps |= missing_mods.upstream_dependencies(known_deps, exclude_states)
return known_deps
def next(self):
"""
Return the action linked to an ir.actions.todo is there exists one that
should be executed. Otherwise, redirect to /web
"""
Todos = self.env['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todo = Todos.search([('state', '=', 'open')], limit=1)
if active_todo:
_logger.info('next action is "%s"', active_todo.name)
return active_todo.action_launch()
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': '/web',
}
def _button_immediate_function(self, function):
if getattr(threading.current_thread(), 'testing', False):
raise RuntimeError(
"Module operations inside tests are not transactional and thus forbidden.\n"
"If you really need to perform module operations to test a specific behavior, it "
"is best to write it as a standalone script, and ask the runbot/metastorm team "
"for help."
)
try:
# This is done because the installation/uninstallation/upgrade can modify a currently
# running cron job and prevent it from finishing, and since the ir_cron table is locked
# during execution, the lock won't be released until timeout.
self._cr.execute("SELECT * FROM ir_cron FOR UPDATE NOWAIT")
except psycopg2.OperationalError:
raise UserError(_("Odoo is currently processing a scheduled action.\n"
"Module operations are not possible at this time, "
"please try again later or contact your system administrator."))
function(self)
self._cr.commit()
registry = modules.registry.Registry.new(self._cr.dbname, update_module=True)
self._cr.commit()
self._cr.reset()
assert self.env.registry is registry
# pylint: disable=next-method-called
config = self.env['ir.module.module'].next() or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# reload the client; open the first available root menu
menu = self.env['ir.ui.menu'].search([('parent_id', '=', False)])[:1]
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu.id},
}
@assert_log_admin_access
def button_immediate_uninstall(self):
"""
Uninstall the selected module(s) immediately and fully,
returns the next res.config action to execute
"""
_logger.info('User #%d triggered module uninstallation', self.env.uid)
return self._button_immediate_function(type(self).button_uninstall)
@assert_log_admin_access
def button_uninstall(self):
un_installable_modules = set(odoo.conf.server_wide_modules) & set(self.mapped('name'))
if un_installable_modules:
raise UserError(_("Those modules cannot be uninstalled: %s", ', '.join(un_installable_modules)))
if any(state not in ('installed', 'to upgrade') for state in self.mapped('state')):
raise UserError(_(
"One or more of the selected modules have already been uninstalled, if you "
"believe this to be an error, you may try again later or contact support."
))
deps = self.downstream_dependencies()
(self + deps).write({'state': 'to remove'})
return dict(ACTION_DICT, name=_('Uninstall'))
@assert_log_admin_access
def button_uninstall_wizard(self):
""" Launch the wizard to uninstall the given module. """
return {
'type': 'ir.actions.act_window',
'target': 'new',
'name': _('Uninstall module'),
'view_mode': 'form',
'res_model': 'base.module.uninstall',
'context': {'default_module_id': self.id},
}
def button_uninstall_cancel(self):
self.write({'state': 'installed'})
return True
@assert_log_admin_access
def button_immediate_upgrade(self):
"""
Upgrade the selected module(s) immediately and fully,
return the next res.config action to execute
"""
return self._button_immediate_function(type(self).button_upgrade)
@assert_log_admin_access
def button_upgrade(self):
if not self:
return
Dependency = self.env['ir.module.module.dependency']
self.update_list()
todo = list(self)
if 'base' in self.mapped('name'):
# If an installed module is only present in the dependency graph through
# a new, uninstalled dependency, it will not have been selected yet.
# An update of 'base' should also update these modules, and as a consequence,
# install the new dependency.
todo.extend(self.search([
('state', '=', 'installed'),
('name', '!=', 'studio_customization'),
('id', 'not in', self.ids),
]))
i = 0
while i < len(todo):
module = todo[i]
i += 1
if module.state not in ('installed', 'to upgrade'):
raise UserError(_("Can not upgrade module '%s'. It is not installed.") % (module.name,))
if self.get_module_info(module.name).get("installable", True):
self.check_external_dependencies(module.name, 'to upgrade')
for dep in Dependency.search([('name', '=', module.name)]):
if (
dep.module_id.state == 'installed'
and dep.module_id not in todo
and dep.module_id.name != 'studio_customization'
):
todo.append(dep.module_id)
self.browse(module.id for module in todo).write({'state': 'to upgrade'})
to_install = []
for module in todo:
if not self.get_module_info(module.name).get("installable", True):
continue
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise UserError(_('You try to upgrade the module %s that depends on the module: %s.\nBut this module is not available in your system.') % (module.name, dep.name,))
if dep.state == 'uninstalled':
to_install += self.search([('name', '=', dep.name)]).ids
self.browse(to_install).button_install()
return dict(ACTION_DICT, name=_('Apply Schedule Upgrade'))
@assert_log_admin_access
def button_upgrade_cancel(self):
self.write({'state': 'installed'})
return True
@staticmethod
def get_values_from_terp(terp):
return {
'description': terp.get('description', ''),
'shortdesc': terp.get('name', ''),
'author': terp.get('author', 'Unknown'),
'maintainer': terp.get('maintainer', False),
'contributors': ', '.join(terp.get('contributors', [])) or False,
'website': terp.get('website', ''),
'license': terp.get('license', 'LGPL-3'),
'sequence': terp.get('sequence', 100),
'application': terp.get('application', False),
'auto_install': terp.get('auto_install', False) is not False,
'icon': terp.get('icon', False),
'summary': terp.get('summary', ''),
'url': terp.get('url') or terp.get('live_test_url', ''),
'to_buy': False
}
@api.model
def create(self, vals):
new = super(Module, self).create(vals)
module_metadata = {
'name': 'module_%s' % vals['name'],
'model': 'ir.module.module',
'module': 'base',
'res_id': new.id,
'noupdate': True,
}
self.env['ir.model.data'].create(module_metadata)
return new
# update the list of available packages
@assert_log_admin_access
@api.model
def update_list(self):
res = [0, 0] # [update, add]
default_version = modules.adapt_version('1.0')
known_mods = self.with_context(lang=None).search([])
known_mods_names = {mod.name: mod for mod in known_mods}
# iterate through detected modules and update/create them in db
for mod_name in modules.get_modules():
mod = known_mods_names.get(mod_name)
terp = self.get_module_info(mod_name)
values = self.get_values_from_terp(terp)
if mod:
updated_values = {}
for key in values:
old = getattr(mod, key)
if (old or values[key]) and values[key] != old:
updated_values[key] = values[key]
if terp.get('installable', True) and mod.state == 'uninstallable':
updated_values['state'] = 'uninstalled'
if parse_version(terp.get('version', default_version)) > parse_version(mod.latest_version or default_version):
res[0] += 1
if updated_values:
mod.write(updated_values)
else:
mod_path = modules.get_module_path(mod_name)
if not mod_path or not terp:
continue
state = "uninstalled" if terp.get('installable', True) else "uninstallable"
mod = self.create(dict(name=mod_name, state=state, **values))
res[1] += 1
mod._update_dependencies(terp.get('depends', []), terp.get('auto_install'))
mod._update_exclusions(terp.get('excludes', []))
mod._update_category(terp.get('category', 'Uncategorized'))
return res
@assert_log_admin_access
def download(self, download=True):
return []
@assert_log_admin_access
@api.model
def install_from_urls(self, urls):
if not self.env.user.has_group('base.group_system'):
raise AccessDenied()
# One-click install is opt-in - cfr Issue #15225
ad_dir = tools.config.addons_data_dir
if not os.access(ad_dir, os.W_OK):
msg = (_("Automatic install of downloaded Apps is currently disabled.") + "\n\n" +
_("To enable it, make sure this directory exists and is writable on the server:") +
"\n%s" % ad_dir)
_logger.warning(msg)
raise UserError(msg)
apps_server = werkzeug.urls.url_parse(self.get_apps_server())
OPENERP = odoo.release.product_name.lower()
tmp = tempfile.mkdtemp()
_logger.debug('Install from url: %r', urls)
try:
# 1. Download & unzip missing modules
for module_name, url in urls.items():
if not url:
continue # nothing to download, local version is already the last one
up = werkzeug.urls.url_parse(url)
if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc:
raise AccessDenied()
try:
_logger.info('Downloading module `%s` from OpenERP Apps', module_name)
response = requests.get(url)
response.raise_for_status()
content = response.content
except Exception:
_logger.exception('Failed to fetch module %s', module_name)
raise UserError(_('The `%s` module appears to be unavailable at the moment, please try again later.', module_name))
else:
zipfile.ZipFile(io.BytesIO(content)).extractall(tmp)
assert os.path.isdir(os.path.join(tmp, module_name))
# 2a. Copy/Replace module source in addons path
for module_name, url in urls.items():
if module_name == OPENERP or not url:
continue # OPENERP is special case, handled below, and no URL means local module
module_path = modules.get_module_path(module_name, downloaded=True, display_warning=False)
bck = backup(module_path, False)
_logger.info('Copy downloaded module `%s` to `%s`', module_name, module_path)
shutil.move(os.path.join(tmp, module_name), module_path)
if bck:
shutil.rmtree(bck)
# 2b. Copy/Replace server+base module source if downloaded
if urls.get(OPENERP):
# special case. it contains the server and the base module.
# extract path is not the same
base_path = os.path.dirname(modules.get_module_path('base'))
# copy all modules in the SERVER/odoo/addons directory to the new "odoo" module (except base itself)
for d in os.listdir(base_path):
if d != 'base' and os.path.isdir(os.path.join(base_path, d)):
destdir = os.path.join(tmp, OPENERP, 'addons', d) # XXX 'odoo' subdirectory ?
shutil.copytree(os.path.join(base_path, d), destdir)
# then replace the server by the new "base" module
server_dir = tools.config['root_path'] # XXX or dirname()
bck = backup(server_dir)
_logger.info('Copy downloaded module `odoo` to `%s`', server_dir)
shutil.move(os.path.join(tmp, OPENERP), server_dir)
#if bck:
# shutil.rmtree(bck)
self.update_list()
with_urls = [module_name for module_name, url in urls.items() if url]
downloaded = self.search([('name', 'in', with_urls)])
installed = self.search([('id', 'in', downloaded.ids), ('state', '=', 'installed')])
to_install = self.search([('name', 'in', list(urls)), ('state', '=', 'uninstalled')])
post_install_action = to_install.button_immediate_install()
if installed or to_install:
# in this case, force server restart to reload python code...
self._cr.commit()
odoo.service.server.restart()
return {
'type': 'ir.actions.client',
'tag': 'home',
'params': {'wait': True},
}
return post_install_action
finally:
shutil.rmtree(tmp)
@api.model
def get_apps_server(self):
return tools.config.get('apps_server', 'https://apps.odoo.com/apps')
def _update_dependencies(self, depends=None, auto_install_requirements=()):
existing = set(dep.name for dep in self.dependencies_id)
needed = set(depends or [])
for dep in (needed - existing):
self._cr.execute('INSERT INTO ir_module_module_dependency (module_id, name) values (%s, %s)', (self.id, dep))
for dep in (existing - needed):
self._cr.execute('DELETE FROM ir_module_module_dependency WHERE module_id = %s and name = %s', (self.id, dep))
self._cr.execute('UPDATE ir_module_module_dependency SET auto_install_required = (name = any(%s)) WHERE module_id = %s',
(list(auto_install_requirements or ()), self.id))
self.invalidate_cache(['dependencies_id'], self.ids)
def _update_exclusions(self, excludes=None):
existing = set(excl.name for excl in self.exclusion_ids)
needed = set(excludes or [])
for name in (needed - existing):
self._cr.execute('INSERT INTO ir_module_module_exclusion (module_id, name) VALUES (%s, %s)', (self.id, name))
for name in (existing - needed):
self._cr.execute('DELETE FROM ir_module_module_exclusion WHERE module_id=%s AND name=%s', (self.id, name))
self.invalidate_cache(['exclusion_ids'], self.ids)
def _update_category(self, category='Uncategorized'):
current_category = self.category_id
current_category_path = []
while current_category:
current_category_path.insert(0, current_category.name)
current_category = current_category.parent_id
categs = category.split('/')
if categs != current_category_path:
cat_id = modules.db.create_categories(self._cr, categs)
self.write({'category_id': cat_id})
def _update_translations(self, filter_lang=None, overwrite=False):
if not filter_lang:
langs = self.env['res.lang'].get_installed()
filter_lang = [code for code, _ in langs]
elif not isinstance(filter_lang, (list, tuple)):
filter_lang = [filter_lang]
update_mods = self.filtered(lambda r: r.state in ('installed', 'to install', 'to upgrade'))
mod_dict = {
mod.name: mod.dependencies_id.mapped('name')
for mod in update_mods
}
mod_names = topological_sort(mod_dict)
self.env['ir.translation']._load_module_terms(mod_names, filter_lang, overwrite)
def _check(self):
for module in self:
if not module.description_html:
_logger.warning('module %s: description is empty !', module.name)
@api.model
@tools.ormcache()
def _installed(self):
""" Return the set of installed modules as a dictionary {name: id} """
return {
module.name: module.id
for module in self.sudo().search([('state', '=', 'installed')])
}
@api.model
def search_panel_select_range(self, field_name, **kwargs):
if field_name == 'category_id':
enable_counters = kwargs.get('enable_counters', False)
domain = [('parent_id', '=', False), ('child_ids.module_ids', '!=', False)]
excluded_xmlids = [
'base.module_category_website_theme',
'base.module_category_theme',
]
if not self.user_has_groups('base.group_no_one'):
excluded_xmlids.append('base.module_category_hidden')
excluded_category_ids = []
for excluded_xmlid in excluded_xmlids:
categ = self.env.ref(excluded_xmlid, False)
if not categ:
continue
excluded_category_ids.append(categ.id)
if excluded_category_ids:
domain = expression.AND([
domain,
[('id', 'not in', excluded_category_ids)],
])
records = self.env['ir.module.category'].search_read(domain, ['display_name'], order="sequence")
values_range = OrderedDict()
for record in records:
record_id = record['id']
if enable_counters:
model_domain = expression.AND([
kwargs.get('search_domain', []),
kwargs.get('category_domain', []),
kwargs.get('filter_domain', []),
[('category_id', 'child_of', record_id), ('category_id', 'not in', excluded_category_ids)]
])
record['__count'] = self.env['ir.module.module'].search_count(model_domain)
values_range[record_id] = record
return {
'parent_field': 'parent_id',
'values': list(values_range.values()),
}
return super(Module, self).search_panel_select_range(field_name, **kwargs)
DEP_STATES = STATES + [('unknown', 'Unknown')]
class ModuleDependency(models.Model):
_name = "ir.module.module.dependency"
_description = "Module dependency"
# the dependency name
name = fields.Char(index=True)
# the module that depends on it
module_id = fields.Many2one('ir.module.module', 'Module', ondelete='cascade')
# the module corresponding to the dependency, and its status
depend_id = fields.Many2one('ir.module.module', 'Dependency',
compute='_compute_depend', search='_search_depend')
state = fields.Selection(DEP_STATES, string='Status', compute='_compute_state')
auto_install_required = fields.Boolean(
default=True,
help="Whether this dependency blocks automatic installation "
"of the dependent")
@api.depends('name')
def _compute_depend(self):
# retrieve all modules corresponding to the dependency names
names = list(set(dep.name for dep in self))
mods = self.env['ir.module.module'].search([('name', 'in', names)])
# index modules by name, and assign dependencies
name_mod = dict((mod.name, mod) for mod in mods)
for dep in self:
dep.depend_id = name_mod.get(dep.name)
def _search_depend(self, operator, value):
assert operator == 'in'
modules = self.env['ir.module.module'].browse(set(value))
return [('name', 'in', modules.mapped('name'))]
@api.depends('depend_id.state')
def _compute_state(self):
for dependency in self:
dependency.state = dependency.depend_id.state or 'unknown'
class ModuleExclusion(models.Model):
_name = "ir.module.module.exclusion"
_description = "Module exclusion"
# the exclusion name
name = fields.Char(index=True)
# the module that excludes it
module_id = fields.Many2one('ir.module.module', 'Module', ondelete='cascade')
# the module corresponding to the exclusion, and its status
exclusion_id = fields.Many2one('ir.module.module', 'Exclusion Module',
compute='_compute_exclusion', search='_search_exclusion')
state = fields.Selection(DEP_STATES, string='Status', compute='_compute_state')
@api.depends('name')
def _compute_exclusion(self):
# retrieve all modules corresponding to the exclusion names
names = list(set(excl.name for excl in self))
mods = self.env['ir.module.module'].search([('name', 'in', names)])
# index modules by name, and assign dependencies
name_mod = {mod.name: mod for mod in mods}
for excl in self:
excl.exclusion_id = name_mod.get(excl.name)
def _search_exclusion(self, operator, value):
assert operator == 'in'
modules = self.env['ir.module.module'].browse(set(value))
return [('name', 'in', modules.mapped('name'))]
@api.depends('exclusion_id.state')
def _compute_state(self):
for exclusion in self:
exclusion.state = exclusion.exclusion_id.state or 'unknown'
| 44.779412 | 48,720 |
860 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields
class ImageMixin(models.AbstractModel):
_name = 'image.mixin'
_description = "Image Mixin"
# all image fields are base64 encoded and PIL-supported
image_1920 = fields.Image("Image", max_width=1920, max_height=1920)
# resized fields stored (as attachment) for performance
image_1024 = fields.Image("Image 1024", related="image_1920", max_width=1024, max_height=1024, store=True)
image_512 = fields.Image("Image 512", related="image_1920", max_width=512, max_height=512, store=True)
image_256 = fields.Image("Image 256", related="image_1920", max_width=256, max_height=256, store=True)
image_128 = fields.Image("Image 128", related="image_1920", max_width=128, max_height=128, store=True)
| 45.263158 | 860 |
3,953 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Store database-specific configuration parameters
"""
import uuid
import logging
from odoo import api, fields, models
from odoo.tools import config, ormcache, mute_logger
_logger = logging.getLogger(__name__)
"""
A dictionary holding some configuration parameters to be initialized when the database is created.
"""
_default_parameters = {
"database.secret": lambda: str(uuid.uuid4()),
"database.uuid": lambda: str(uuid.uuid1()),
"database.create_date": fields.Datetime.now,
"web.base.url": lambda: "http://localhost:%s" % config.get('http_port'),
"base.login_cooldown_after": lambda: 10,
"base.login_cooldown_duration": lambda: 60,
}
class IrConfigParameter(models.Model):
"""Per-database storage of configuration key-value pairs."""
_name = 'ir.config_parameter'
_description = 'System Parameter'
_rec_name = 'key'
_order = 'key'
key = fields.Char(required=True, index=True)
value = fields.Text(required=True)
_sql_constraints = [
('key_uniq', 'unique (key)', 'Key must be unique.')
]
@mute_logger('odoo.addons.base.models.ir_config_parameter')
def init(self, force=False):
"""
Initializes the parameters listed in _default_parameters.
It overrides existing parameters if force is ``True``.
"""
# avoid prefetching during module installation, as the res_users table
# may not have all prescribed columns
self = self.with_context(prefetch_fields=False)
for key, func in _default_parameters.items():
# force=True skips search and always performs the 'if' body (because ids=False)
params = self.sudo().search([('key', '=', key)])
if force or not params:
params.set_param(key, func())
@api.model
def get_param(self, key, default=False):
"""Retrieve the value for a given key.
:param string key: The key of the parameter value to retrieve.
:param string default: default value if parameter is missing.
:return: The value of the parameter, or ``default`` if it does not exist.
:rtype: string
"""
self.check_access_rights('read')
return self._get_param(key) or default
@api.model
@ormcache('key')
def _get_param(self, key):
# we bypass the ORM because get_param() is used in some field's depends,
# and must therefore work even when the ORM is not ready to work
self.flush(['key', 'value'])
self.env.cr.execute("SELECT value FROM ir_config_parameter WHERE key = %s", [key])
result = self.env.cr.fetchone()
return result and result[0]
@api.model
def set_param(self, key, value):
"""Sets the value of a parameter.
:param string key: The key of the parameter value to set.
:param string value: The value to set.
:return: the previous value of the parameter or False if it did
not exist.
:rtype: string
"""
param = self.search([('key', '=', key)])
if param:
old = param.value
if value is not False and value is not None:
if str(value) != old:
param.write({'value': value})
else:
param.unlink()
return old
else:
if value is not False and value is not None:
self.create({'key': key, 'value': value})
return False
@api.model_create_multi
def create(self, vals_list):
self.clear_caches()
return super(IrConfigParameter, self).create(vals_list)
def write(self, vals):
self.clear_caches()
return super(IrConfigParameter, self).write(vals)
def unlink(self):
self.clear_caches()
return super(IrConfigParameter, self).unlink()
| 34.675439 | 3,953 |
80,125 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import binascii
import contextlib
import datetime
import hmac
import ipaddress
import itertools
import json
import logging
import os
import time
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, repeat
import passlib.context
import pytz
from lxml import etree
from lxml.builder import E
from psycopg2 import sql
from odoo import api, fields, models, tools, SUPERUSER_ID, _, Command
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
from odoo.exceptions import AccessDenied, AccessError, UserError, ValidationError
from odoo.http import request
from odoo.modules.module import get_module_resource
from odoo.osv import expression
from odoo.service.db import check_super
from odoo.tools import partition, collections, frozendict, lazy_property, image_process
_logger = logging.getLogger(__name__)
# Only users who can modify the user (incl. the user herself) see the real contents of these fields
USER_PRIVATE_FIELDS = []
DEFAULT_CRYPT_CONTEXT = passlib.context.CryptContext(
# kdf which can be verified by the context. The default encryption kdf is
# the first of the list
['pbkdf2_sha512', 'plaintext'],
# deprecated algorithms are still verified as usual, but ``needs_update``
# will indicate that the stored hash should be replaced by a more recent
# algorithm. Passlib 1.6 supports an `auto` value which deprecates any
# algorithm but the default, but Ubuntu LTS only provides 1.5 so far.
deprecated=['plaintext'],
)
concat = chain.from_iterable
#
# Functions for manipulating boolean and selection pseudo-fields
#
def name_boolean_group(id):
return 'in_group_' + str(id)
def name_selection_groups(ids):
return 'sel_groups_' + '_'.join(str(it) for it in sorted(ids))
def is_boolean_group(name):
return name.startswith('in_group_')
def is_selection_groups(name):
return name.startswith('sel_groups_')
def is_reified_group(name):
return is_boolean_group(name) or is_selection_groups(name)
def get_boolean_group(name):
return int(name[9:])
def get_selection_groups(name):
return [int(v) for v in name[11:].split('_')]
def parse_m2m(commands):
"return a list of ids corresponding to a many2many value"
ids = []
for command in commands:
if isinstance(command, (tuple, list)):
if command[0] in (Command.UPDATE, Command.LINK):
ids.append(command[1])
elif command[0] == Command.CLEAR:
ids = []
elif command[0] == Command.SET:
ids = list(command[2])
else:
ids.append(command)
return ids
def _jsonable(o):
try: json.dumps(o)
except TypeError: return False
else: return True
def check_identity(fn):
""" Wrapped method should be an *action method* (called from a button
type=object), and requires extra security to be executed. This decorator
checks if the identity (password) has been checked in the last 10mn, and
pops up an identity check wizard if not.
Prevents access outside of interactive contexts (aka with a request)
"""
@wraps(fn)
def wrapped(self):
if not request:
raise UserError(_("This method can only be accessed over HTTP"))
if request.session.get('identity-check-last', 0) > time.time() - 10 * 60:
# update identity-check-last like github?
return fn(self)
w = self.sudo().env['res.users.identitycheck'].create({
'request': json.dumps([
{ # strip non-jsonable keys (e.g. mapped to recordsets like binary_field_real_user)
k: v for k, v in self.env.context.items()
if _jsonable(v)
},
self._name,
self.ids,
fn.__name__
])
})
return {
'type': 'ir.actions.act_window',
'res_model': 'res.users.identitycheck',
'res_id': w.id,
'name': _("Security Control"),
'target': 'new',
'views': [(False, 'form')],
}
wrapped.__has_check_identity = True
return wrapped
#----------------------------------------------------------
# Basic res.groups and res.users
#----------------------------------------------------------
class Groups(models.Model):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
_order = 'name'
name = fields.Char(required=True, translate=True)
users = fields.Many2many('res.users', 'res_groups_users_rel', 'gid', 'uid')
model_access = fields.One2many('ir.model.access', 'group_id', string='Access Controls', copy=True)
rule_groups = fields.Many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', string='Rules', domain=[('global', '=', False)])
menu_access = fields.Many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', string='Access Menu')
view_access = fields.Many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', string='Views')
comment = fields.Text(translate=True)
category_id = fields.Many2one('ir.module.category', string='Application', index=True)
color = fields.Integer(string='Color Index')
full_name = fields.Char(compute='_compute_full_name', string='Group Name', search='_search_full_name')
share = fields.Boolean(string='Share Group', help="Group created to set access rights for sharing data with some users.")
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!')
]
@api.constrains('users')
def _check_one_user_type(self):
self.users._check_one_user_type()
@api.ondelete(at_uninstall=False)
def _unlink_except_settings_group(self):
classified = self.env['res.config.settings']._get_classified_fields()
for _name, _groups, implied_group in classified['group']:
if implied_group.id in self.ids:
raise ValidationError(_('You cannot delete a group linked with a settings field.'))
@api.depends('category_id.name', 'name')
def _compute_full_name(self):
# Important: value must be stored in environment of group, not group1!
for group, group1 in zip(self, self.sudo()):
if group1.category_id:
group.full_name = '%s / %s' % (group1.category_id.name, group1.name)
else:
group.full_name = group1.name
def _search_full_name(self, operator, operand):
lst = True
if isinstance(operand, bool):
domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]]
if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand):
return expression.AND(domains)
else:
return expression.OR(domains)
if isinstance(operand, str):
lst = False
operand = [operand]
where = []
for group in operand:
values = [v for v in group.split('/') if v]
group_name = values.pop().strip()
category_name = values and '/'.join(values).strip() or group_name
group_domain = [('name', operator, lst and [group_name] or group_name)]
category_domain = [('category_id.name', operator, lst and [category_name] or category_name)]
if operator in expression.NEGATIVE_TERM_OPERATORS and not values:
category_domain = expression.OR([category_domain, [('category_id', '=', False)]])
if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values):
sub_where = expression.AND([group_domain, category_domain])
else:
sub_where = expression.OR([group_domain, category_domain])
if operator in expression.NEGATIVE_TERM_OPERATORS:
where = expression.AND([where, sub_where])
else:
where = expression.OR([where, sub_where])
return where
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
groups = super(Groups, self).search(args)
groups = groups.sorted('full_name', reverse=order.endswith('DESC'))
groups = groups[offset:offset+limit] if limit else groups[offset:]
return len(groups) if count else groups.ids
return super(Groups, self)._search(args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)
def copy(self, default=None):
self.ensure_one()
chosen_name = default.get('name') if default else ''
default_name = chosen_name or _('%s (copy)', self.name)
default = dict(default or {}, name=default_name)
return super(Groups, self).copy(default)
def write(self, vals):
if 'name' in vals:
if vals['name'].startswith('-'):
raise UserError(_('The name of the group can not start with "-"'))
# invalidate caches before updating groups, since the recomputation of
# field 'share' depends on method has_group()
# DLE P139
if self.ids:
self.env['ir.model.access'].call_cache_clearing_methods()
return super(Groups, self).write(vals)
class ResUsersLog(models.Model):
_name = 'res.users.log'
_order = 'id desc'
_description = 'Users Log'
# Currenly only uses the magical fields: create_uid, create_date,
# for recording logins. To be extended for other uses (chat presence, etc.)
@api.autovacuum
def _gc_user_logs(self):
self._cr.execute("""
DELETE FROM res_users_log log1 WHERE EXISTS (
SELECT 1 FROM res_users_log log2
WHERE log1.create_uid = log2.create_uid
AND log1.create_date < log2.create_date
)
""")
_logger.info("GC'd %d user log entries", self._cr.rowcount)
class Users(models.Model):
""" User class. A res.users record models an OpenERP user and is different
from an employee.
res.users class now inherits from res.partner. The partner model is
used to store the data related to the partner: lang, name, address,
avatar, ... The user model is now dedicated to technical data.
"""
_name = "res.users"
_description = 'Users'
_inherits = {'res.partner': 'partner_id'}
_order = 'name, login'
@property
def SELF_READABLE_FIELDS(self):
""" The list of fields a user can read on their own user record.
In order to add fields, please override this property on model extensions.
"""
return [
'signature', 'company_id', 'login', 'email', 'name', 'image_1920',
'image_1024', 'image_512', 'image_256', 'image_128', 'lang', 'tz',
'tz_offset', 'groups_id', 'partner_id', '__last_update', 'action_id',
'avatar_1920', 'avatar_1024', 'avatar_512', 'avatar_256', 'avatar_128',
]
@property
def SELF_WRITEABLE_FIELDS(self):
""" The list of fields a user can write on their own user record.
In order to add fields, please override this property on model extensions.
"""
return ['signature', 'action_id', 'company_id', 'email', 'name', 'image_1920', 'lang', 'tz']
def _default_groups(self):
default_user_id = self.env['ir.model.data']._xmlid_to_res_id('base.default_user', raise_if_not_found=False)
return self.env['res.users'].browse(default_user_id).sudo().groups_id if default_user_id else []
partner_id = fields.Many2one('res.partner', required=True, ondelete='restrict', auto_join=True, index=True,
string='Related Partner', help='Partner-related data of the user')
login = fields.Char(required=True, help="Used to log into the system")
password = fields.Char(
compute='_compute_password', inverse='_set_password',
invisible=True, copy=False,
help="Keep empty if you don't want the user to be able to connect on the system.")
new_password = fields.Char(string='Set Password',
compute='_compute_password', inverse='_set_new_password',
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again.")
signature = fields.Html(string="Email Signature", default="")
active = fields.Boolean(default=True)
active_partner = fields.Boolean(related='partner_id.active', readonly=True, string="Partner is Active")
action_id = fields.Many2one('ir.actions.actions', string='Home Action',
help="If specified, this action will be opened at log on for this user, in addition to the standard menu.")
groups_id = fields.Many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', string='Groups', default=_default_groups)
log_ids = fields.One2many('res.users.log', 'create_uid', string='User log entries')
login_date = fields.Datetime(related='log_ids.create_date', string='Latest authentication', readonly=False)
share = fields.Boolean(compute='_compute_share', compute_sudo=True, string='Share User', store=True,
help="External user with limited access, created only for the purpose of sharing data.")
companies_count = fields.Integer(compute='_compute_companies_count', string="Number of Companies")
tz_offset = fields.Char(compute='_compute_tz_offset', string='Timezone offset', invisible=True)
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company.id,
help='The default company for this user.', context={'user_preference': True})
company_ids = fields.Many2many('res.company', 'res_company_users_rel', 'user_id', 'cid',
string='Companies', default=lambda self: self.env.company.ids)
# overridden inherited fields to bypass access rights, in case you have
# access to the user but not its corresponding partner
name = fields.Char(related='partner_id.name', inherited=True, readonly=False)
email = fields.Char(related='partner_id.email', inherited=True, readonly=False)
accesses_count = fields.Integer('# Access Rights', help='Number of access rights that apply to the current user',
compute='_compute_accesses_count', compute_sudo=True)
rules_count = fields.Integer('# Record Rules', help='Number of record rules that apply to the current user',
compute='_compute_accesses_count', compute_sudo=True)
groups_count = fields.Integer('# Groups', help='Number of groups that apply to the current user',
compute='_compute_accesses_count', compute_sudo=True)
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def init(self):
cr = self.env.cr
# allow setting plaintext passwords via SQL and have them
# automatically encrypted at startup: look for passwords which don't
# match the "extended" MCF and pass those through passlib.
# Alternative: iterate on *all* passwords and use CryptContext.identify
cr.execute("""
SELECT id, password FROM res_users
WHERE password IS NOT NULL
AND password !~ '^\$[^$]+\$[^$]+\$.'
""")
if self.env.cr.rowcount:
Users = self.sudo()
for uid, pw in cr.fetchall():
Users.browse(uid).password = pw
def _set_password(self):
ctx = self._crypt_context()
hash_password = ctx.hash if hasattr(ctx, 'hash') else ctx.encrypt
for user in self:
self._set_encrypted_password(user.id, hash_password(user.password))
def _set_encrypted_password(self, uid, pw):
assert self._crypt_context().identify(pw) != 'plaintext'
self.env.cr.execute(
'UPDATE res_users SET password=%s WHERE id=%s',
(pw, uid)
)
self.invalidate_cache(['password'], [uid])
def _check_credentials(self, password, env):
""" Validates the current user's password.
Override this method to plug additional authentication methods.
Overrides should:
* call `super` to delegate to parents for credentials-checking
* catch AccessDenied and perform their own checking
* (re)raise AccessDenied if the credentials are still invalid
according to their own validation method
When trying to check for credentials validity, call _check_credentials
instead.
"""
""" Override this method to plug additional authentication methods"""
assert password
self.env.cr.execute(
"SELECT COALESCE(password, '') FROM res_users WHERE id=%s",
[self.env.user.id]
)
[hashed] = self.env.cr.fetchone()
valid, replacement = self._crypt_context()\
.verify_and_update(password, hashed)
if replacement is not None:
self._set_encrypted_password(self.env.user.id, replacement)
if not valid:
raise AccessDenied()
def _compute_password(self):
for user in self:
user.password = ''
user.new_password = ''
def _set_new_password(self):
for user in self:
if not user.new_password:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
continue
if user == self.env.user:
# To change their own password, users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise UserError(_('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
else:
user.password = user.new_password
@api.depends('groups_id')
def _compute_share(self):
user_group_id = self.env['ir.model.data']._xmlid_to_res_id('base.group_user')
internal_users = self.filtered_domain([('groups_id', 'in', [user_group_id])])
internal_users.share = False
(self - internal_users).share = True
def _compute_companies_count(self):
self.companies_count = self.env['res.company'].sudo().search_count([])
@api.depends('tz')
def _compute_tz_offset(self):
for user in self:
user.tz_offset = datetime.datetime.now(pytz.timezone(user.tz or 'GMT')).strftime('%z')
@api.depends('groups_id')
def _compute_accesses_count(self):
for user in self:
groups = user.groups_id
user.accesses_count = len(groups.model_access)
user.rules_count = len(groups.rule_groups)
user.groups_count = len(groups)
@api.onchange('login')
def on_change_login(self):
if self.login and tools.single_email_re.match(self.login):
self.email = self.login
@api.onchange('parent_id')
def onchange_parent_id(self):
return self.partner_id.onchange_parent_id()
def _read(self, fields):
super(Users, self)._read(fields)
if set(USER_PRIVATE_FIELDS).intersection(fields):
if self.check_access_rights('write', raise_exception=False):
return
for record in self:
for f in USER_PRIVATE_FIELDS:
try:
record._cache[f]
record._cache[f] = '********'
except Exception:
# skip SpecialValue (e.g. for missing record or access right)
pass
@api.constrains('company_id', 'company_ids')
def _check_company(self):
for user in self:
if user.company_id not in user.company_ids:
raise ValidationError(
_('Company %(company_name)s is not in the allowed companies for user %(user_name)s (%(company_allowed)s).',
company_name=user.company_id.name,
user_name=user.name,
company_allowed=', '.join(user.mapped('company_ids.name')))
)
@api.constrains('action_id')
def _check_action_id(self):
action_open_website = self.env.ref('base.action_open_website', raise_if_not_found=False)
if action_open_website and any(user.action_id.id == action_open_website.id for user in self):
raise ValidationError(_('The "App Switcher" action cannot be selected as home action.'))
# Prevent using reload actions.
# We use sudo() because "Access rights" admins can't read action models
for user in self.sudo():
if user.action_id.type == "ir.actions.client":
action = self.env["ir.actions.client"].browse(user.action_id.id) # magic
if action.tag == "reload":
raise ValidationError(_('The "%s" action cannot be selected as home action.', action.name))
@api.constrains('groups_id')
def _check_one_user_type(self):
"""We check that no users are both portal and users (same with public).
This could typically happen because of implied groups.
"""
user_types_category = self.env.ref('base.module_category_user_type', raise_if_not_found=False)
user_types_groups = self.env['res.groups'].search(
[('category_id', '=', user_types_category.id)]) if user_types_category else False
if user_types_groups: # needed at install
if self._has_multiple_groups(user_types_groups.ids):
raise ValidationError(_('The user cannot have more than one user types.'))
def _has_multiple_groups(self, group_ids):
"""The method is not fast if the list of ids is very long;
so we rather check all users than limit to the size of the group
:param group_ids: list of group ids
:return: boolean: is there at least a user in at least 2 of the provided groups
"""
if group_ids:
args = [tuple(group_ids)]
if len(self.ids) == 1:
where_clause = "AND r.uid = %s"
args.append(self.id)
else:
where_clause = "" # default; we check ALL users (actually pretty efficient)
query = """
SELECT 1 FROM res_groups_users_rel WHERE EXISTS(
SELECT r.uid
FROM res_groups_users_rel r
WHERE r.gid IN %s""" + where_clause + """
GROUP BY r.uid HAVING COUNT(r.gid) > 1
)
"""
self.env.cr.execute(query, args)
return bool(self.env.cr.fetchall())
else:
return False
def toggle_active(self):
for user in self:
if not user.active and not user.partner_id.active:
user.partner_id.toggle_active()
super(Users, self).toggle_active()
def read(self, fields=None, load='_classic_read'):
if fields and self == self.env.user:
readable = self.SELF_READABLE_FIELDS
for key in fields:
if not (key in readable or key.startswith('context_')):
break
else:
# safe fields only, so we read as super-user to bypass access rights
self = self.sudo()
return super(Users, self).read(fields=fields, load=load)
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
groupby_fields = set([groupby] if isinstance(groupby, str) else groupby)
if groupby_fields.intersection(USER_PRIVATE_FIELDS):
raise AccessError(_("Invalid 'group by' parameter"))
return super(Users, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
if not self.env.su and args:
domain_fields = {term[0] for term in args if isinstance(term, (tuple, list))}
if domain_fields.intersection(USER_PRIVATE_FIELDS):
raise AccessError(_('Invalid search criterion'))
return super(Users, self)._search(args, offset=offset, limit=limit, order=order, count=count,
access_rights_uid=access_rights_uid)
@api.model_create_multi
def create(self, vals_list):
users = super(Users, self).create(vals_list)
for user in users:
# if partner is global we keep it that way
if user.partner_id.company_id:
user.partner_id.company_id = user.company_id
user.partner_id.active = user.active
return users
def write(self, values):
if values.get('active') and SUPERUSER_ID in self._ids:
raise UserError(_("You cannot activate the superuser."))
if values.get('active') == False and self._uid in self._ids:
raise UserError(_("You cannot deactivate the user you're currently logged in as."))
if values.get('active'):
for user in self:
if not user.active and not user.partner_id.active:
user.partner_id.toggle_active()
if self == self.env.user:
writeable = self.SELF_WRITEABLE_FIELDS
for key in list(values):
if not (key in writeable or key.startswith('context_')):
break
else:
if 'company_id' in values:
if values['company_id'] not in self.env.user.company_ids.ids:
del values['company_id']
# safe fields only, so we write as super-user to bypass access rights
self = self.sudo().with_context(binary_field_real_user=self.env.user)
res = super(Users, self).write(values)
if 'company_id' in values:
for user in self:
# if partner is global we keep it that way
if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']:
user.partner_id.write({'company_id': user.company_id.id})
if 'company_id' in values or 'company_ids' in values:
# Reset lazy properties `company` & `companies` on all envs
# This is unlikely in a business code to change the company of a user and then do business stuff
# but in case it happens this is handled.
# e.g. `account_test_savepoint.py` `setup_company_data`, triggered by `test_account_invoice_report.py`
for env in list(self.env.transaction.envs):
if env.user in self:
lazy_property.reset_all(env)
# clear caches linked to the users
if self.ids and 'groups_id' in values:
# DLE P139: Calling invalidate_cache on a new, well you lost everything as you wont be able to take it back from the cache
# `test_00_equipment_multicompany_user`
self.env['ir.model.access'].call_cache_clearing_methods()
# per-method / per-model caches have been removed so the various
# clear_cache/clear_caches methods pretty much just end up calling
# Registry._clear_cache
invalidation_fields = {
'groups_id', 'active', 'lang', 'tz', 'company_id',
*USER_PRIVATE_FIELDS,
*self._get_session_token_fields()
}
if (invalidation_fields & values.keys()) or any(key.startswith('context_') for key in values):
self.clear_caches()
return res
@api.ondelete(at_uninstall=True)
def _unlink_except_superuser(self):
if SUPERUSER_ID in self.ids:
raise UserError(_('You can not remove the admin user as it is used internally for resources created by Odoo (updates, module installation, ...)'))
self.clear_caches()
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
user_ids = []
if operator not in expression.NEGATIVE_TERM_OPERATORS:
if operator == 'ilike' and not (name or '').strip():
domain = []
else:
domain = [('login', '=', name)]
user_ids = self._search(expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid)
if not user_ids:
user_ids = self._search(expression.AND([[('name', operator, name)], args]), limit=limit, access_rights_uid=name_get_uid)
return user_ids
def copy(self, default=None):
self.ensure_one()
default = dict(default or {})
if ('name' not in default) and ('partner_id' not in default):
default['name'] = _("%s (copy)", self.name)
if 'login' not in default:
default['login'] = _("%s (copy)", self.login)
return super(Users, self).copy(default)
@api.model
@tools.ormcache('self._uid')
def context_get(self):
user = self.env.user
# determine field names to read
name_to_key = {
name: name[8:] if name.startswith('context_') else name
for name in self._fields
if name.startswith('context_') or name in ('lang', 'tz')
}
# use read() to not read other fields: this must work while modifying
# the schema of models res.users or res.partner
values = user.read(list(name_to_key), load=False)[0]
return frozendict({
key: values[name]
for name, key in name_to_key.items()
})
@api.model
def action_get(self):
return self.sudo().env.ref('base.action_res_users_my').read()[0]
def check_super(self, passwd):
return check_super(passwd)
@api.model
def _update_last_login(self):
# only create new records to avoid any side-effect on concurrent transactions
# extra records will be deleted by the periodical garbage collection
self.env['res.users.log'].create({}) # populated by defaults
@api.model
def _get_login_domain(self, login):
return [('login', '=', login)]
@api.model
def _get_login_order(self):
return self._order
@classmethod
def _login(cls, db, login, password, user_agent_env):
if not password:
raise AccessDenied()
ip = request.httprequest.environ['REMOTE_ADDR'] if request else 'n/a'
try:
with cls.pool.cursor() as cr:
self = api.Environment(cr, SUPERUSER_ID, {})[cls._name]
with self._assert_can_auth():
user = self.search(self._get_login_domain(login), order=self._get_login_order(), limit=1)
if not user:
raise AccessDenied()
user = user.with_user(user)
user._check_credentials(password, user_agent_env)
tz = request.httprequest.cookies.get('tz') if request else None
if tz in pytz.all_timezones and (not user.tz or not user.login_date):
# first login or missing tz -> set tz to browser tz
user.tz = tz
user._update_last_login()
except AccessDenied:
_logger.info("Login failed for db:%s login:%s from %s", db, login, ip)
raise
_logger.info("Login successful for db:%s login:%s from %s", db, login, ip)
return user.id
@classmethod
def authenticate(cls, db, login, password, user_agent_env):
"""Verifies and returns the user ID corresponding to the given
``login`` and ``password`` combination, or False if there was
no matching user.
:param str db: the database on which user is trying to authenticate
:param str login: username
:param str password: user password
:param dict user_agent_env: environment dictionary describing any
relevant environment attributes
"""
uid = cls._login(db, login, password, user_agent_env=user_agent_env)
if user_agent_env and user_agent_env.get('base_location'):
with cls.pool.cursor() as cr:
env = api.Environment(cr, uid, {})
if env.user.has_group('base.group_system'):
# Successfully logged in as system user!
# Attempt to guess the web base url...
try:
base = user_agent_env['base_location']
ICP = env['ir.config_parameter']
if not ICP.get_param('web.base.url.freeze'):
ICP.set_param('web.base.url', base)
except Exception:
_logger.exception("Failed to update web.base.url configuration parameter")
return uid
@classmethod
@tools.ormcache('uid', 'passwd')
def check(cls, db, uid, passwd):
"""Verifies that the given (uid, password) is authorized for the database ``db`` and
raise an exception if it is not."""
if not passwd:
# empty passwords disallowed for obvious security reasons
raise AccessDenied()
with contextlib.closing(cls.pool.cursor()) as cr:
self = api.Environment(cr, uid, {})[cls._name]
with self._assert_can_auth():
if not self.env.user.active:
raise AccessDenied()
self._check_credentials(passwd, {'interactive': False})
def _get_session_token_fields(self):
return {'id', 'login', 'password', 'active'}
@tools.ormcache('sid')
def _compute_session_token(self, sid):
""" Compute a session token given a session id and a user id """
# retrieve the fields used to generate the session token
session_fields = ', '.join(sorted(self._get_session_token_fields()))
self.env.cr.execute("""SELECT %s, (SELECT value FROM ir_config_parameter WHERE key='database.secret')
FROM res_users
WHERE id=%%s""" % (session_fields), (self.id,))
if self.env.cr.rowcount != 1:
self.clear_caches()
return False
data_fields = self.env.cr.fetchone()
# generate hmac key
key = (u'%s' % (data_fields,)).encode('utf-8')
# hmac the session id
data = sid.encode('utf-8')
h = hmac.new(key, data, sha256)
# keep in the cache the token
return h.hexdigest()
@api.model
def change_password(self, old_passwd, new_passwd):
"""Change current user password. Old password must be provided explicitly
to prevent hijacking an existing user session, or for cases where the cleartext
password is not used to authenticate requests.
:return: True
:raise: odoo.exceptions.AccessDenied when old password is wrong
:raise: odoo.exceptions.UserError when new password is not set or empty
"""
if not old_passwd:
raise AccessDenied()
if not new_passwd:
raise UserError(_("Setting empty passwords is not allowed for security reasons!"))
# alternatively: use identitycheck wizard?
self._check_credentials(old_passwd, {'interactive': True})
ip = request.httprequest.environ['REMOTE_ADDR'] if request else 'n/a'
_logger.info("Password change for '%s' (#%s) from %s", self.env.user.login, self.env.uid, ip)
# use self.env.user here, because it has uid=SUPERUSER_ID
return self.env.user.write({'password': new_passwd})
def preference_save(self):
return {
'type': 'ir.actions.client',
'tag': 'reload_context',
}
def preference_change_password(self):
return {
'type': 'ir.actions.client',
'tag': 'change_password',
'target': 'new',
}
@api.model
def has_group(self, group_ext_id):
# use singleton's id if called on a non-empty recordset, otherwise
# context uid
uid = self.id
if uid and uid != self._uid:
self = self.with_user(uid)
return self._has_group(group_ext_id)
@api.model
@tools.ormcache('self._uid', 'group_ext_id')
def _has_group(self, group_ext_id):
"""Checks whether user belongs to given group.
:param str group_ext_id: external ID (XML ID) of the group.
Must be provided in fully-qualified form (``module.ext_id``), as there
is no implicit module to use..
:return: True if the current user is a member of the group with the
given external ID (XML ID), else False.
"""
assert group_ext_id and '.' in group_ext_id, "External ID '%s' must be fully qualified" % group_ext_id
module, ext_id = group_ext_id.split('.')
self._cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN
(SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s AND model='res.groups')""",
(self._uid, module, ext_id))
return bool(self._cr.fetchone())
def _action_show(self):
"""If self is a singleton, directly access the form view. If it is a recordset, open a tree view"""
view_id = self.env.ref('base.view_users_form').id
action = {
'type': 'ir.actions.act_window',
'res_model': 'res.users',
'context': {'create': False},
}
if len(self) > 1:
action.update({
'name': _('Users'),
'view_mode': 'list,form',
'views': [[None, 'list'], [view_id, 'form']],
'domain': [('id', 'in', self.ids)],
})
else:
action.update({
'view_mode': 'form',
'views': [[view_id, 'form']],
'res_id': self.id,
})
return action
def action_show_groups(self):
self.ensure_one()
return {
'name': _('Groups'),
'view_mode': 'tree,form',
'res_model': 'res.groups',
'type': 'ir.actions.act_window',
'context': {'create': False, 'delete': False},
'domain': [('id','in', self.groups_id.ids)],
'target': 'current',
}
def action_show_accesses(self):
self.ensure_one()
return {
'name': _('Access Rights'),
'view_mode': 'tree,form',
'res_model': 'ir.model.access',
'type': 'ir.actions.act_window',
'context': {'create': False, 'delete': False},
'domain': [('id', 'in', self.groups_id.model_access.ids)],
'target': 'current',
}
def action_show_rules(self):
self.ensure_one()
return {
'name': _('Record Rules'),
'view_mode': 'tree,form',
'res_model': 'ir.rule',
'type': 'ir.actions.act_window',
'context': {'create': False, 'delete': False},
'domain': [('id', 'in', self.groups_id.rule_groups.ids)],
'target': 'current',
}
def _is_internal(self):
self.ensure_one()
return not self.share
def _is_public(self):
self.ensure_one()
return self.has_group('base.group_public')
def _is_system(self):
self.ensure_one()
return self.has_group('base.group_system')
def _is_admin(self):
self.ensure_one()
return self._is_superuser() or self.has_group('base.group_erp_manager')
def _is_superuser(self):
self.ensure_one()
return self.id == SUPERUSER_ID
@api.model
def get_company_currency_id(self):
return self.env.company.currency_id.id
def _crypt_context(self):
""" Passlib CryptContext instance used to encrypt and verify
passwords. Can be overridden if technical, legal or political matters
require different kdfs than the provided default.
Requires a CryptContext as deprecation and upgrade notices are used
internally
"""
return DEFAULT_CRYPT_CONTEXT
@contextlib.contextmanager
def _assert_can_auth(self):
""" Checks that the current environment even allows the current auth
request to happen.
The baseline implementation is a simple linear login cooldown: after
a number of failures trying to log-in, the user (by login) is put on
cooldown. During the cooldown period, login *attempts* are ignored
and logged.
.. warning::
The login counter is not shared between workers and not
specifically thread-safe, the feature exists mostly for
rate-limiting on large number of login attempts (brute-forcing
passwords) so that should not be much of an issue.
For a more complex strategy (e.g. database or distribute storage)
override this method. To simply change the cooldown criteria
(configuration, ...) override _on_login_cooldown instead.
.. note::
This is a *context manager* so it can be called around the login
procedure without having to call it itself.
"""
# needs request for remote address
if not request:
yield
return
reg = self.env.registry
failures_map = getattr(reg, '_login_failures', None)
if failures_map is None:
failures_map = reg._login_failures = collections.defaultdict(lambda : (0, datetime.datetime.min))
source = request.httprequest.remote_addr
(failures, previous) = failures_map[source]
if self._on_login_cooldown(failures, previous):
_logger.warning(
"Login attempt ignored for %s on %s: "
"%d failures since last success, last failure at %s. "
"You can configure the number of login failures before a "
"user is put on cooldown as well as the duration in the "
"System Parameters. Disable this feature by setting "
"\"base.login_cooldown_after\" to 0.",
source, self.env.cr.dbname, failures, previous)
if ipaddress.ip_address(source).is_private:
_logger.warning(
"The rate-limited IP address %s is classified as private "
"and *might* be a proxy. If your Odoo is behind a proxy, "
"it may be mis-configured. Check that you are running "
"Odoo in Proxy Mode and that the proxy is properly configured, see "
"https://www.odoo.com/documentation/15.0/administration/install/deploy.html#https for details.",
source
)
raise AccessDenied(_("Too many login failures, please wait a bit before trying again."))
try:
yield
except AccessDenied:
(failures, __) = reg._login_failures[source]
reg._login_failures[source] = (failures + 1, datetime.datetime.now())
raise
else:
reg._login_failures.pop(source, None)
def _on_login_cooldown(self, failures, previous):
""" Decides whether the user trying to log in is currently
"on cooldown" and not even allowed to attempt logging in.
The default cooldown function simply puts the user on cooldown for
<login_cooldown_duration> seconds after each failure following the
<login_cooldown_after>th (0 to disable).
Can be overridden to implement more complex backoff strategies, or
e.g. wind down or reset the cooldown period as the previous failure
recedes into the far past.
:param int failures: number of recorded failures (since last success)
:param previous: timestamp of previous failure
:type previous: datetime.datetime
:returns: whether the user is currently in cooldown phase (true if cooldown, false if no cooldown and login can continue)
:rtype: bool
"""
cfg = self.env['ir.config_parameter'].sudo()
min_failures = int(cfg.get_param('base.login_cooldown_after', 5))
if min_failures == 0:
return False
delay = int(cfg.get_param('base.login_cooldown_duration', 60))
return failures >= min_failures and (datetime.datetime.now() - previous) < datetime.timedelta(seconds=delay)
def _register_hook(self):
if hasattr(self, 'check_credentials'):
_logger.warning("The check_credentials method of res.users has been renamed _check_credentials. One of your installed modules defines one, but it will not be called anymore.")
def _mfa_type(self):
""" If an MFA method is enabled, returns its type as a string. """
return
def _mfa_url(self):
""" If an MFA method is enabled, returns the URL for its second step. """
return
#
# Implied groups
#
# Extension of res.groups and res.users with a relation for "implied" or
# "inherited" groups. Once a user belongs to a group, it automatically belongs
# to the implied groups (transitively).
#
class GroupsImplied(models.Model):
_inherit = 'res.groups'
implied_ids = fields.Many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid',
string='Inherits', help='Users of this group automatically inherit those groups')
trans_implied_ids = fields.Many2many('res.groups', string='Transitively inherits',
compute='_compute_trans_implied', recursive=True)
@api.depends('implied_ids.trans_implied_ids')
def _compute_trans_implied(self):
# Compute the transitive closure recursively. Note that the performance
# is good, because the record cache behaves as a memo (the field is
# never computed twice on a given group.)
for g in self:
g.trans_implied_ids = g.implied_ids | g.implied_ids.trans_implied_ids
@api.model_create_multi
def create(self, vals_list):
user_ids_list = [vals.pop('users', None) for vals in vals_list]
groups = super(GroupsImplied, self).create(vals_list)
for group, user_ids in zip(groups, user_ids_list):
if user_ids:
# delegate addition of users to add implied groups
group.write({'users': user_ids})
return groups
def write(self, values):
res = super(GroupsImplied, self).write(values)
if values.get('users') or values.get('implied_ids'):
# add all implied groups (to all users of each group)
for group in self:
self._cr.execute("""
WITH RECURSIVE group_imply(gid, hid) AS (
SELECT gid, hid
FROM res_groups_implied_rel
UNION
SELECT i.gid, r.hid
FROM res_groups_implied_rel r
JOIN group_imply i ON (i.hid = r.gid)
)
INSERT INTO res_groups_users_rel (gid, uid)
SELECT i.hid, r.uid
FROM group_imply i, res_groups_users_rel r
WHERE r.gid = i.gid
AND i.gid = %(gid)s
EXCEPT
SELECT r.gid, r.uid
FROM res_groups_users_rel r
JOIN group_imply i ON (r.gid = i.hid)
WHERE i.gid = %(gid)s
""", dict(gid=group.id))
self._check_one_user_type()
return res
def _apply_group(self, implied_group):
""" Add the given group to the groups implied by the current group
:param implied_group: the implied group to add
"""
groups = self.filtered(lambda g: implied_group not in g.implied_ids)
groups.write({'implied_ids': [Command.link(implied_group.id)]})
def _remove_group(self, implied_group):
""" Remove the given group from the implied groups of the current group
:param implied_group: the implied group to remove
"""
groups = self.filtered(lambda g: implied_group in g.implied_ids)
if groups:
groups.write({'implied_ids': [Command.unlink(implied_group.id)]})
if groups.users:
implied_group.write({'users': [Command.unlink(user.id) for user in groups.users]})
class UsersImplied(models.Model):
_inherit = 'res.users'
@api.model_create_multi
def create(self, vals_list):
for values in vals_list:
if 'groups_id' in values:
# complete 'groups_id' with implied groups
user = self.new(values)
gs = user.groups_id._origin
gs = gs | gs.trans_implied_ids
values['groups_id'] = type(self).groups_id.convert_to_write(gs, user)
return super(UsersImplied, self).create(vals_list)
def write(self, values):
if not values.get('groups_id'):
return super(UsersImplied, self).write(values)
users_before = self.filtered(lambda u: u.has_group('base.group_user'))
res = super(UsersImplied, self).write(values)
demoted_users = users_before.filtered(lambda u: not u.has_group('base.group_user'))
if demoted_users:
# demoted users are restricted to the assigned groups only
vals = {'groups_id': [Command.clear()] + values['groups_id']}
super(UsersImplied, demoted_users).write(vals)
# add implied groups for all users (in batches)
users_batch = defaultdict(self.browse)
for user in self:
users_batch[user.groups_id] += user
for groups, users in users_batch.items():
gs = set(concat(g.trans_implied_ids for g in groups))
vals = {'groups_id': [Command.link(g.id) for g in gs]}
super(UsersImplied, users).write(vals)
return res
#
# Virtual checkbox and selection for res.user form view
#
# Extension of res.groups and res.users for the special groups view in the users
# form. This extension presents groups with selection and boolean widgets:
# - Groups are shown by application, with boolean and/or selection fields.
# Selection fields typically defines a role "Name" for the given application.
# - Uncategorized groups are presented as boolean fields and grouped in a
# section "Others".
#
# The user form view is modified by an inherited view (base.user_groups_view);
# the inherited view replaces the field 'groups_id' by a set of reified group
# fields (boolean or selection fields). The arch of that view is regenerated
# each time groups are changed.
#
# Naming conventions for reified groups fields:
# - boolean field 'in_group_ID' is True iff
# ID is in 'groups_id'
# - selection field 'sel_groups_ID1_..._IDk' is ID iff
# ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk}
#
class GroupsView(models.Model):
_inherit = 'res.groups'
@api.model
def create(self, values):
user = super(GroupsView, self).create(values)
self._update_user_groups_view()
# actions.get_bindings() depends on action records
self.env['ir.actions.actions'].clear_caches()
return user
def write(self, values):
# determine which values the "user groups view" depends on
VIEW_DEPS = ('category_id', 'implied_ids')
view_values0 = [g[name] for name in VIEW_DEPS if name in values for g in self]
res = super(GroupsView, self).write(values)
# update the "user groups view" only if necessary
view_values1 = [g[name] for name in VIEW_DEPS if name in values for g in self]
if view_values0 != view_values1:
self._update_user_groups_view()
# actions.get_bindings() depends on action records
self.env['ir.actions.actions'].clear_caches()
return res
def unlink(self):
res = super(GroupsView, self).unlink()
self._update_user_groups_view()
# actions.get_bindings() depends on action records
self.env['ir.actions.actions'].clear_caches()
return res
def _get_hidden_extra_categories(self):
return ['base.module_category_hidden', 'base.module_category_extra', 'base.module_category_usability']
@api.model
def _update_user_groups_view(self):
""" Modify the view with xmlid ``base.user_groups_view``, which inherits
the user form view, and introduces the reified group fields.
"""
# remove the language to avoid translations, it will be handled at the view level
self = self.with_context(lang=None)
# We have to try-catch this, because at first init the view does not
# exist but we are already creating some basic groups.
view = self.env.ref('base.user_groups_view', raise_if_not_found=False)
if not (view and view.exists() and view._name == 'ir.ui.view'):
return
if self._context.get('install_filename') or self._context.get(MODULE_UNINSTALL_FLAG):
# use a dummy view during install/upgrade/uninstall
xml = E.field(name="groups_id", position="after")
else:
group_no_one = view.env.ref('base.group_no_one')
group_employee = view.env.ref('base.group_user')
xml1, xml2, xml3 = [], [], []
xml_by_category = {}
xml1.append(E.separator(string='User Type', colspan="2", groups='base.group_no_one'))
user_type_field_name = ''
user_type_readonly = str({})
sorted_tuples = sorted(self.get_groups_by_application(),
key=lambda t: t[0].xml_id != 'base.module_category_user_type')
for app, kind, gs, category_name in sorted_tuples: # we process the user type first
attrs = {}
# hide groups in categories 'Hidden' and 'Extra' (except for group_no_one)
if app.xml_id in self._get_hidden_extra_categories():
attrs['groups'] = 'base.group_no_one'
# User type (employee, portal or public) is a separated group. This is the only 'selection'
# group of res.groups without implied groups (with each other).
if app.xml_id == 'base.module_category_user_type':
# application name with a selection field
field_name = name_selection_groups(gs.ids)
user_type_field_name = field_name
user_type_readonly = str({'readonly': [(user_type_field_name, '!=', group_employee.id)]})
attrs['widget'] = 'radio'
attrs['groups'] = 'base.group_no_one'
xml1.append(E.field(name=field_name, **attrs))
xml1.append(E.newline())
elif kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(gs.ids)
attrs['attrs'] = user_type_readonly
if category_name not in xml_by_category:
xml_by_category[category_name] = []
xml_by_category[category_name].append(E.newline())
xml_by_category[category_name].append(E.field(name=field_name, **attrs))
xml_by_category[category_name].append(E.newline())
else:
# application separator with boolean fields
app_name = app.name or 'Other'
xml3.append(E.separator(string=app_name, colspan="4", **attrs))
attrs['attrs'] = user_type_readonly
for g in gs:
field_name = name_boolean_group(g.id)
if g == group_no_one:
# make the group_no_one invisible in the form view
xml3.append(E.field(name=field_name, invisible="1", **attrs))
else:
xml3.append(E.field(name=field_name, **attrs))
xml3.append({'class': "o_label_nowrap"})
if user_type_field_name:
user_type_attrs = {'invisible': [(user_type_field_name, '!=', group_employee.id)]}
else:
user_type_attrs = {}
for xml_cat in sorted(xml_by_category.keys(), key=lambda it: it[0]):
master_category_name = xml_cat[1]
xml2.append(E.group(*(xml_by_category[xml_cat]), col="2", string=master_category_name))
xml = E.field(
E.group(*(xml1), col="2"),
E.group(*(xml2), col="2", attrs=str(user_type_attrs)),
E.group(*(xml3), col="4", attrs=str(user_type_attrs)), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
# serialize and update the view
xml_content = etree.tostring(xml, pretty_print=True, encoding="unicode")
if xml_content != view.arch: # avoid useless xml validation if no change
new_context = dict(view._context)
new_context.pop('install_filename', None) # don't set arch_fs for this computed view
new_context['lang'] = None
view.with_context(new_context).write({'arch': xml_content})
def get_application_groups(self, domain):
""" Return the non-share groups that satisfy ``domain``. """
return self.search(domain + [('share', '=', False)])
@api.model
def get_groups_by_application(self):
""" Return all groups classified by application (module category), as a list::
[(app, kind, groups), ...],
where ``app`` and ``groups`` are recordsets, and ``kind`` is either
``'boolean'`` or ``'selection'``. Applications are given in sequence
order. If ``kind`` is ``'selection'``, ``groups`` are given in
reverse implication order.
"""
def linearize(app, gs, category_name):
# 'User Type' is an exception
if app.xml_id == 'base.module_category_user_type':
return (app, 'selection', gs.sorted('id'), category_name)
# determine sequence order: a group appears after its implied groups
order = {g: len(g.trans_implied_ids & gs) for g in gs}
# We want a selection for Accounting too. Auditor and Invoice are both
# children of Accountant, but the two of them make a full accountant
# so it makes no sense to have checkboxes.
if app.xml_id == 'base.module_category_accounting_accounting':
return (app, 'selection', gs.sorted(key=order.get), category_name)
# check whether order is total, i.e., sequence orders are distinct
if len(set(order.values())) == len(gs):
return (app, 'selection', gs.sorted(key=order.get), category_name)
else:
return (app, 'boolean', gs, (100, 'Other'))
# classify all groups by application
by_app, others = defaultdict(self.browse), self.browse()
for g in self.get_application_groups([]):
if g.category_id:
by_app[g.category_id] += g
else:
others += g
# build the result
res = []
for app, gs in sorted(by_app.items(), key=lambda it: it[0].sequence or 0):
if app.parent_id:
res.append(linearize(app, gs, (app.parent_id.sequence, app.parent_id.name)))
else:
res.append(linearize(app, gs, (100, 'Other')))
if others:
res.append((self.env['ir.module.category'], 'boolean', others, (100,'Other')))
return res
class ModuleCategory(models.Model):
_inherit = "ir.module.category"
def write(self, values):
res = super().write(values)
if "name" in values:
self.env["res.groups"]._update_user_groups_view()
return res
def unlink(self):
res = super().unlink()
self.env["res.groups"]._update_user_groups_view()
return res
class UsersView(models.Model):
_inherit = 'res.users'
@api.model_create_multi
def create(self, vals_list):
new_vals_list = []
for values in vals_list:
new_vals_list.append(self._remove_reified_groups(values))
users = super(UsersView, self).create(new_vals_list)
group_multi_company_id = self.env['ir.model.data']._xmlid_to_res_id(
'base.group_multi_company', raise_if_not_found=False)
if group_multi_company_id:
for user in users:
if len(user.company_ids) <= 1 and group_multi_company_id in user.groups_id.ids:
user.write({'groups_id': [Command.unlink(group_multi_company_id)]})
elif len(user.company_ids) > 1 and group_multi_company_id not in user.groups_id.ids:
user.write({'groups_id': [Command.link(group_multi_company_id)]})
return users
def write(self, values):
values = self._remove_reified_groups(values)
res = super(UsersView, self).write(values)
if 'company_ids' not in values:
return res
group_multi_company = self.env.ref('base.group_multi_company', False)
if group_multi_company:
for user in self:
if len(user.company_ids) <= 1 and user.id in group_multi_company.users.ids:
user.write({'groups_id': [Command.unlink(group_multi_company.id)]})
elif len(user.company_ids) > 1 and user.id not in group_multi_company.users.ids:
user.write({'groups_id': [Command.link(group_multi_company.id)]})
return res
@api.model
def new(self, values={}, origin=None, ref=None):
values = self._remove_reified_groups(values)
user = super().new(values=values, origin=origin, ref=ref)
group_multi_company = self.env.ref('base.group_multi_company', False)
if group_multi_company and 'company_ids' in values:
if len(user.company_ids) <= 1 and user.id in group_multi_company.users.ids:
user.update({'groups_id': [Command.unlink(group_multi_company.id)]})
elif len(user.company_ids) > 1 and user.id not in group_multi_company.users.ids:
user.update({'groups_id': [Command.link(group_multi_company.id)]})
return user
def _remove_reified_groups(self, values):
""" return `values` without reified group fields """
add, rem = [], []
values1 = {}
for key, val in values.items():
if is_boolean_group(key):
(add if val else rem).append(get_boolean_group(key))
elif is_selection_groups(key):
rem += get_selection_groups(key)
if val:
add.append(val)
else:
values1[key] = val
if 'groups_id' not in values and (add or rem):
# remove group ids in `rem` and add group ids in `add`
values1['groups_id'] = list(itertools.chain(
zip(repeat(3), rem),
zip(repeat(4), add)
))
return values1
@api.model
def default_get(self, fields):
group_fields, fields = partition(is_reified_group, fields)
fields1 = (fields + ['groups_id']) if group_fields else fields
values = super(UsersView, self).default_get(fields1)
self._add_reified_groups(group_fields, values)
return values
def onchange(self, values, field_name, field_onchange):
field_onchange['groups_id'] = ''
result = super().onchange(values, field_name, field_onchange)
if not field_name: # merged default_get
self._add_reified_groups(
filter(is_reified_group, field_onchange),
result.setdefault('value', {})
)
return result
def read(self, fields=None, load='_classic_read'):
# determine whether reified groups fields are required, and which ones
fields1 = fields or list(self.fields_get())
group_fields, other_fields = partition(is_reified_group, fields1)
# read regular fields (other_fields); add 'groups_id' if necessary
drop_groups_id = False
if group_fields and fields:
if 'groups_id' not in other_fields:
other_fields.append('groups_id')
drop_groups_id = True
else:
other_fields = fields
res = super(UsersView, self).read(other_fields, load=load)
# post-process result to add reified group fields
if group_fields:
for values in res:
self._add_reified_groups(group_fields, values)
if drop_groups_id:
values.pop('groups_id', None)
return res
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
if fields:
# ignore reified fields
fields = [fname for fname in fields if not is_reified_group(fname)]
return super().read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
def _add_reified_groups(self, fields, values):
""" add the given reified group fields into `values` """
gids = set(parse_m2m(values.get('groups_id') or []))
for f in fields:
if is_boolean_group(f):
values[f] = get_boolean_group(f) in gids
elif is_selection_groups(f):
# determine selection groups, in order
sel_groups = self.env['res.groups'].sudo().browse(get_selection_groups(f))
sel_order = {g: len(g.trans_implied_ids & sel_groups) for g in sel_groups}
sel_groups = sel_groups.sorted(key=sel_order.get)
# determine which ones are in gids
selected = [gid for gid in sel_groups.ids if gid in gids]
# if 'Internal User' is in the group, this is the "User Type" group
# and we need to show 'Internal User' selected, not Public/Portal.
if self.env.ref('base.group_user').id in selected:
values[f] = self.env.ref('base.group_user').id
else:
values[f] = selected and selected[-1] or False
@api.model
def fields_get(self, allfields=None, attributes=None):
res = super(UsersView, self).fields_get(allfields, attributes=attributes)
# add reified groups fields
for app, kind, gs, category_name in self.env['res.groups'].sudo().get_groups_by_application():
if kind == 'selection':
# 'User Type' should not be 'False'. A user is either 'employee', 'portal' or 'public' (required).
selection_vals = [(False, '')]
if app.xml_id == 'base.module_category_user_type':
selection_vals = []
field_name = name_selection_groups(gs.ids)
if allfields and field_name not in allfields:
continue
# selection group field
tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment]
res[field_name] = {
'type': 'selection',
'string': app.name or _('Other'),
'selection': selection_vals + [(g.id, g.name) for g in gs],
'help': '\n'.join(tips),
'exportable': False,
'selectable': False,
}
else:
# boolean group fields
for g in gs:
field_name = name_boolean_group(g.id)
if allfields and field_name not in allfields:
continue
res[field_name] = {
'type': 'boolean',
'string': g.name,
'help': g.comment,
'exportable': False,
'selectable': False,
}
return res
class CheckIdentity(models.TransientModel):
""" Wizard used to re-check the user's credentials (password)
Might be useful before the more security-sensitive operations, users might be
leaving their computer unlocked & unattended. Re-checking credentials mitigates
some of the risk of a third party using such an unattended device to manipulate
the account.
"""
_name = 'res.users.identitycheck'
_description = "Password Check Wizard"
request = fields.Char(readonly=True, groups=fields.NO_ACCESS)
password = fields.Char()
def run_check(self):
assert request, "This method can only be accessed over HTTP"
try:
self.create_uid._check_credentials(self.password, {'interactive': True})
except AccessDenied:
raise UserError(_("Incorrect Password, try again or click on Forgot Password to reset your password."))
self.password = False
request.session['identity-check-last'] = time.time()
ctx, model, ids, method = json.loads(self.sudo().request)
method = getattr(self.env(context=ctx)[model].browse(ids), method)
assert getattr(method, '__has_check_identity', False)
return method()
#----------------------------------------------------------
# change password wizard
#----------------------------------------------------------
class ChangePasswordWizard(models.TransientModel):
""" A wizard to manage the change of users' passwords. """
_name = "change.password.wizard"
_description = "Change Password Wizard"
def _default_user_ids(self):
user_ids = self._context.get('active_model') == 'res.users' and self._context.get('active_ids') or []
return [
Command.create({'user_id': user.id, 'user_login': user.login})
for user in self.env['res.users'].browse(user_ids)
]
user_ids = fields.One2many('change.password.user', 'wizard_id', string='Users', default=_default_user_ids)
def change_password_button(self):
self.ensure_one()
self.user_ids.change_password_button()
if self.env.user in self.user_ids.user_id:
return {'type': 'ir.actions.client', 'tag': 'reload'}
return {'type': 'ir.actions.act_window_close'}
class ChangePasswordUser(models.TransientModel):
""" A model to configure users in the change password wizard. """
_name = 'change.password.user'
_description = 'User, Change Password Wizard'
wizard_id = fields.Many2one('change.password.wizard', string='Wizard', required=True, ondelete='cascade')
user_id = fields.Many2one('res.users', string='User', required=True, ondelete='cascade')
user_login = fields.Char(string='User Login', readonly=True)
new_passwd = fields.Char(string='New Password', default='')
def change_password_button(self):
for line in self:
if not line.new_passwd:
raise UserError(_("Before clicking on 'Change Password', you have to write a new password."))
line.user_id.write({'password': line.new_passwd})
# don't keep temporary passwords in the database longer than necessary
self.write({'new_passwd': False})
# API keys support
API_KEY_SIZE = 20 # in bytes
INDEX_SIZE = 8 # in hex digits, so 4 bytes, or 20% of the key
KEY_CRYPT_CONTEXT = passlib.context.CryptContext(
# default is 29000 rounds which is 25~50ms, which is probably unnecessary
# given in this case all the keys are completely random data: dictionary
# attacks on API keys isn't much of a concern
['pbkdf2_sha512'], pbkdf2_sha512__rounds=6000,
)
hash_api_key = getattr(KEY_CRYPT_CONTEXT, 'hash', None) or KEY_CRYPT_CONTEXT.encrypt
class APIKeysUser(models.Model):
_inherit = 'res.users'
api_key_ids = fields.One2many('res.users.apikeys', 'user_id', string="API Keys")
@property
def SELF_READABLE_FIELDS(self):
return super().SELF_READABLE_FIELDS + ['api_key_ids']
@property
def SELF_WRITEABLE_FIELDS(self):
return super().SELF_WRITEABLE_FIELDS + ['api_key_ids']
def _rpc_api_keys_only(self):
""" To be overridden if RPC access needs to be restricted to API keys, e.g. for 2FA """
return False
def _check_credentials(self, password, user_agent_env):
user_agent_env = user_agent_env or {}
if user_agent_env.get('interactive', True):
if 'interactive' not in user_agent_env:
_logger.warning(
"_check_credentials without 'interactive' env key, assuming interactive login. \
Check calls and overrides to ensure the 'interactive' key is properly set in \
all _check_credentials environments"
)
return super()._check_credentials(password, user_agent_env)
if not self.env.user._rpc_api_keys_only():
try:
return super()._check_credentials(password, user_agent_env)
except AccessDenied:
pass
# 'rpc' scope does not really exist, we basically require a global key (scope NULL)
if self.env['res.users.apikeys']._check_credentials(scope='rpc', key=password) == self.env.uid:
return
raise AccessDenied()
@check_identity
def api_key_wizard(self):
return {
'type': 'ir.actions.act_window',
'res_model': 'res.users.apikeys.description',
'name': 'New API Key',
'target': 'new',
'views': [(False, 'form')],
}
class APIKeys(models.Model):
_name = _description = 'res.users.apikeys'
_auto = False # so we can have a secret column
name = fields.Char("Description", required=True, readonly=True)
user_id = fields.Many2one('res.users', index=True, required=True, readonly=True, ondelete="cascade")
scope = fields.Char("Scope", readonly=True)
create_date = fields.Datetime("Creation Date", readonly=True)
def init(self):
table = sql.Identifier(self._table)
self.env.cr.execute(sql.SQL("""
CREATE TABLE IF NOT EXISTS {table} (
id serial primary key,
name varchar not null,
user_id integer not null REFERENCES res_users(id),
scope varchar,
index varchar({index_size}) not null CHECK (char_length(index) = {index_size}),
key varchar not null,
create_date timestamp without time zone DEFAULT (now() at time zone 'utc')
)
""").format(table=table, index_size=sql.Placeholder('index_size')), {
'index_size': INDEX_SIZE
})
index_name = self._table + "_user_id_index_idx"
if len(index_name) > 63:
# unique determinist index name
index_name = self._table[:50] + "_idx_" + sha256(self._table.encode()).hexdigest()[:8]
self.env.cr.execute(sql.SQL("""
CREATE INDEX IF NOT EXISTS {index_name} ON {table} (user_id, index);
""").format(
table=table,
index_name=sql.Identifier(index_name)
))
@check_identity
def remove(self):
return self._remove()
def _remove(self):
"""Use the remove() method to remove an API Key. This method implement logic,
but won't check the identity (mainly used to remove trusted devices)"""
if not self:
return {'type': 'ir.actions.act_window_close'}
if self.env.is_system() or self.mapped('user_id') == self.env.user:
ip = request.httprequest.environ['REMOTE_ADDR'] if request else 'n/a'
_logger.info("API key(s) removed: scope: <%s> for '%s' (#%s) from %s",
self.mapped('scope'), self.env.user.login, self.env.uid, ip)
self.sudo().unlink()
return {'type': 'ir.actions.act_window_close'}
raise AccessError(_("You can not remove API keys unless they're yours or you are a system user"))
def _check_credentials(self, *, scope, key):
assert scope, "scope is required"
index = key[:INDEX_SIZE]
self.env.cr.execute('''
SELECT user_id, key
FROM {} INNER JOIN res_users u ON (u.id = user_id)
WHERE u.active and index = %s AND (scope IS NULL OR scope = %s)
'''.format(self._table),
[index, scope])
for user_id, current_key in self.env.cr.fetchall():
if KEY_CRYPT_CONTEXT.verify(key, current_key):
return user_id
def _generate(self, scope, name):
"""Generates an api key.
:param str scope: the scope of the key. If None, the key will give access to any rpc.
:param str name: the name of the key, mainly intended to be displayed in the UI.
:return: str: the key.
"""
# no need to clear the LRU when *adding* a key, only when removing
k = binascii.hexlify(os.urandom(API_KEY_SIZE)).decode()
self.env.cr.execute("""
INSERT INTO {table} (name, user_id, scope, key, index)
VALUES (%s, %s, %s, %s, %s)
RETURNING id
""".format(table=self._table),
[name, self.env.user.id, scope, hash_api_key(k), k[:INDEX_SIZE]])
ip = request.httprequest.environ['REMOTE_ADDR'] if request else 'n/a'
_logger.info("%s generated: scope: <%s> for '%s' (#%s) from %s",
self._description, scope, self.env.user.login, self.env.uid, ip)
return k
class APIKeyDescription(models.TransientModel):
_name = _description = 'res.users.apikeys.description'
name = fields.Char("Description", required=True)
@check_identity
def make_key(self):
# only create keys for users who can delete their keys
self.check_access_make_key()
description = self.sudo()
k = self.env['res.users.apikeys']._generate(None, self.sudo().name)
description.unlink()
return {
'type': 'ir.actions.act_window',
'res_model': 'res.users.apikeys.show',
'name': 'API Key Ready',
'views': [(False, 'form')],
'target': 'new',
'context': {
'default_key': k,
}
}
def check_access_make_key(self):
if not self.user_has_groups('base.group_user'):
raise AccessError(_("Only internal users can create API keys"))
class APIKeyShow(models.AbstractModel):
_name = _description = 'res.users.apikeys.show'
# the field 'id' is necessary for the onchange that returns the value of 'key'
id = fields.Id()
key = fields.Char(readonly=True)
| 44.048928 | 80,125 |
6,790 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
# see http://doc.qt.io/archives/qt-4.8/qprinter.html#PaperSize-enum
PAPER_SIZES = [
{
'description': 'A0 5 841 x 1189 mm',
'key': 'A0',
'height': 1189.0,
'width': 841.0,
}, {
'key': 'A1',
'description': 'A1 6 594 x 841 mm',
'height': 841.0,
'width': 594.0,
}, {
'key': 'A2',
'description': 'A2 7 420 x 594 mm',
'height': 594.0,
'width': 420.0,
}, {
'key': 'A3',
'description': 'A3 8 297 x 420 mm',
'height': 420.0,
'width': 297.0,
}, {
'key': 'A4',
'description': 'A4 0 210 x 297 mm, 8.26 x 11.69 inches',
'height': 297.0,
'width': 210.0,
}, {
'key': 'A5',
'description': 'A5 9 148 x 210 mm',
'height': 210.0,
'width': 148.0,
}, {
'key': 'A6',
'description': 'A6 10 105 x 148 mm',
'height': 148.0,
'width': 105.0,
}, {
'key': 'A7',
'description': 'A7 11 74 x 105 mm',
'height': 105.0,
'width': 74.0,
}, {
'key': 'A8',
'description': 'A8 12 52 x 74 mm',
'height': 74.0,
'width': 52.0,
}, {
'key': 'A9',
'description': 'A9 13 37 x 52 mm',
'height': 52.0,
'width': 37.0,
}, {
'key': 'B0',
'description': 'B0 14 1000 x 1414 mm',
'height': 1414.0,
'width': 1000.0,
}, {
'key': 'B1',
'description': 'B1 15 707 x 1000 mm',
'height': 1000.0,
'width': 707.0,
}, {
'key': 'B2',
'description': 'B2 17 500 x 707 mm',
'height': 707.0,
'width': 500.0,
}, {
'key': 'B3',
'description': 'B3 18 353 x 500 mm',
'height': 500.0,
'width': 353.0,
}, {
'key': 'B4',
'description': 'B4 19 250 x 353 mm',
'height': 353.0,
'width': 250.0,
}, {
'key': 'B5',
'description': 'B5 1 176 x 250 mm, 6.93 x 9.84 inches',
'height': 250.0,
'width': 176.0,
}, {
'key': 'B6',
'description': 'B6 20 125 x 176 mm',
'height': 176.0,
'width': 125.0,
}, {
'key': 'B7',
'description': 'B7 21 88 x 125 mm',
'height': 125.0,
'width': 88.0,
}, {
'key': 'B8',
'description': 'B8 22 62 x 88 mm',
'height': 88.0,
'width': 62.0,
}, {
'key': 'B9',
'description': 'B9 23 33 x 62 mm',
'height': 62.0,
'width': 33.0,
}, {
'key': 'B10',
'description': 'B10 16 31 x 44 mm',
'height': 44.0,
'width': 31.0,
}, {
'key': 'C5E',
'description': 'C5E 24 163 x 229 mm',
'height': 229.0,
'width': 163.0,
}, {
'key': 'Comm10E',
'description': 'Comm10E 25 105 x 241 mm, U.S. Common 10 Envelope',
'height': 241.0,
'width': 105.0,
}, {
'key': 'DLE',
'description': 'DLE 26 110 x 220 mm',
'height': 220.0,
'width': 110.0,
}, {
'key': 'Executive',
'description': 'Executive 4 7.5 x 10 inches, 190.5 x 254 mm',
'height': 254.0,
'width': 190.5,
}, {
'key': 'Folio',
'description': 'Folio 27 210 x 330 mm',
'height': 330.0,
'width': 210.0,
}, {
'key': 'Ledger',
'description': 'Ledger 28 431.8 x 279.4 mm',
'height': 279.4,
'width': 431.8,
}, {
'key': 'Legal',
'description': 'Legal 3 8.5 x 14 inches, 215.9 x 355.6 mm',
'height': 355.6,
'width': 215.9,
}, {
'key': 'Letter',
'description': 'Letter 2 8.5 x 11 inches, 215.9 x 279.4 mm',
'height': 279.4,
'width': 215.9,
}, {
'key': 'Tabloid',
'description': 'Tabloid 29 279.4 x 431.8 mm',
'height': 431.8,
'width': 279.4,
}, {
'key': 'custom',
'description': 'Custom',
},
]
class report_paperformat(models.Model):
_name = "report.paperformat"
_description = "Paper Format Config"
name = fields.Char('Name', required=True)
default = fields.Boolean('Default paper format ?')
format = fields.Selection([(ps['key'], ps['description']) for ps in PAPER_SIZES], 'Paper size', default='A4', help="Select Proper Paper size")
margin_top = fields.Float('Top Margin (mm)', default=40)
margin_bottom = fields.Float('Bottom Margin (mm)', default=20)
margin_left = fields.Float('Left Margin (mm)', default=7)
margin_right = fields.Float('Right Margin (mm)', default=7)
page_height = fields.Integer('Page height (mm)', default=False)
page_width = fields.Integer('Page width (mm)', default=False)
orientation = fields.Selection([
('Landscape', 'Landscape'),
('Portrait', 'Portrait')
], 'Orientation', default='Landscape')
header_line = fields.Boolean('Display a header line', default=False)
header_spacing = fields.Integer('Header spacing', default=35)
disable_shrinking = fields.Boolean('Disable smart shrinking')
dpi = fields.Integer('Output DPI', required=True, default=90)
report_ids = fields.One2many('ir.actions.report', 'paperformat_id', 'Associated reports', help="Explicitly associated reports")
print_page_width = fields.Float('Print page width (mm)', compute='_compute_print_page_size')
print_page_height = fields.Float('Print page height (mm)', compute='_compute_print_page_size')
@api.constrains('format')
def _check_format_or_page(self):
if self.filtered(lambda x: x.format != 'custom' and (x.page_width or x.page_height)):
raise ValidationError(_('You can select either a format or a specific page width/height, but not both.'))
def _compute_print_page_size(self):
for record in self:
width = height = 0.0
if record.format:
if record.format == 'custom':
width = record.page_width
height = record.page_height
else:
paper_size = next(ps for ps in PAPER_SIZES if ps['key'] == record.format)
width = paper_size['width']
height = paper_size['height']
if record.orientation == 'Landscape':
# swap sizes
width, height = height, width
record.print_page_width = width
record.print_page_height = height
| 31.877934 | 6,790 |
21,958 | py | PYTHON | 15.0 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import threading
import time
import os
import psycopg2
import pytz
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import odoo
from odoo import api, fields, models, _
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
BASE_VERSION = odoo.modules.load_information_from_description_file('base')['version']
MAX_FAIL_TIME = timedelta(hours=5) # chosen with a fair roll of the dice
class BadVersion(Exception):
pass
class BadModuleState(Exception):
pass
_intervalTypes = {
'days': lambda interval: relativedelta(days=interval),
'hours': lambda interval: relativedelta(hours=interval),
'weeks': lambda interval: relativedelta(days=7*interval),
'months': lambda interval: relativedelta(months=interval),
'minutes': lambda interval: relativedelta(minutes=interval),
}
class ir_cron(models.Model):
""" Model describing cron jobs (also called actions or tasks).
"""
# TODO: perhaps in the future we could consider a flag on ir.cron jobs
# that would cause database wake-up even if the database has not been
# loaded yet or was already unloaded (e.g. 'force_db_wakeup' or something)
# See also odoo.cron
_name = "ir.cron"
_order = 'cron_name'
_description = 'Scheduled Actions'
ir_actions_server_id = fields.Many2one(
'ir.actions.server', 'Server action',
delegate=True, ondelete='restrict', required=True)
cron_name = fields.Char('Name', related='ir_actions_server_id.name', store=True, readonly=False)
user_id = fields.Many2one('res.users', string='Scheduler User', default=lambda self: self.env.user, required=True)
active = fields.Boolean(default=True)
interval_number = fields.Integer(default=1, help="Repeat every x.")
interval_type = fields.Selection([('minutes', 'Minutes'),
('hours', 'Hours'),
('days', 'Days'),
('weeks', 'Weeks'),
('months', 'Months')], string='Interval Unit', default='months')
numbercall = fields.Integer(string='Number of Calls', default=1, help='How many times the method is called,\na negative number indicates no limit.')
doall = fields.Boolean(string='Repeat Missed', help="Specify if missed occurrences should be executed when the server restarts.")
nextcall = fields.Datetime(string='Next Execution Date', required=True, default=fields.Datetime.now, help="Next planned execution date for this job.")
lastcall = fields.Datetime(string='Last Execution Date', help="Previous time the cron ran successfully, provided to the job through the context on the `lastcall` key")
priority = fields.Integer(default=5, help='The priority of the job, as an integer: 0 means higher priority, 10 means lower priority.')
@api.model
def create(self, values):
values['usage'] = 'ir_cron'
if os.getenv('ODOO_NOTIFY_CRON_CHANGES'):
self._cr.postcommit.add(self._notifydb)
return super(ir_cron, self).create(values)
@api.model
def default_get(self, fields_list):
# only 'code' state is supported for cron job so set it as default
if not self._context.get('default_state'):
self = self.with_context(default_state='code')
return super(ir_cron, self).default_get(fields_list)
def method_direct_trigger(self):
self.check_access_rights('write')
for cron in self:
cron.with_user(cron.user_id).with_context(lastcall=cron.lastcall).ir_actions_server_id.run()
cron.lastcall = fields.Datetime.now()
return True
@classmethod
def _process_jobs(cls, db_name):
""" Execute every job ready to be run on this database. """
try:
db = odoo.sql_db.db_connect(db_name)
threading.current_thread().dbname = db_name
with db.cursor() as cron_cr:
cls._check_version(cron_cr)
jobs = cls._get_all_ready_jobs(cron_cr)
if not jobs:
return
cls._check_modules_state(cron_cr, jobs)
for job_id in (job['id'] for job in jobs):
try:
job = cls._acquire_one_job(cron_cr, (job_id,))
except psycopg2.extensions.TransactionRollbackError:
cron_cr.rollback()
_logger.debug("job %s has been processed by another worker, skip", job_id)
continue
if not job:
_logger.debug("another worker is processing job %s, skip", job_id)
continue
_logger.debug("job %s acquired", job_id)
# take into account overridings of _process_job() on that database
registry = odoo.registry(db_name)
registry[cls._name]._process_job(db, cron_cr, job)
_logger.debug("job %s updated and released", job_id)
except BadVersion:
_logger.warning('Skipping database %s as its base version is not %s.', db_name, BASE_VERSION)
except BadModuleState:
_logger.warning('Skipping database %s because of modules to install/upgrade/remove.', db_name)
except psycopg2.ProgrammingError as e:
if e.pgcode == '42P01':
# Class 42 — Syntax Error or Access Rule Violation; 42P01: undefined_table
# The table ir_cron does not exist; this is probably not an OpenERP database.
_logger.warning('Tried to poll an undefined table on database %s.', db_name)
else:
raise
except Exception:
_logger.warning('Exception in cron:', exc_info=True)
finally:
if hasattr(threading.current_thread(), 'dbname'):
del threading.current_thread().dbname
@classmethod
def _check_version(cls, cron_cr):
""" Ensure the code version matches the database version """
cron_cr.execute("""
SELECT latest_version
FROM ir_module_module
WHERE name='base'
""")
(version,) = cron_cr.fetchone()
if version is None:
raise BadModuleState()
if version != BASE_VERSION:
raise BadVersion()
@classmethod
def _check_modules_state(cls, cr, jobs):
""" Ensure no module is installing or upgrading """
cr.execute("""
SELECT COUNT(*)
FROM ir_module_module
WHERE state LIKE %s
""", ['to %'])
(changes,) = cr.fetchone()
if not changes:
return
if not jobs:
raise BadModuleState()
oldest = min([
fields.Datetime.from_string(job['nextcall'])
for job in jobs
])
if datetime.now() - oldest < MAX_FAIL_TIME:
raise BadModuleState()
# the cron execution failed around MAX_FAIL_TIME * 60 times (1 failure
# per minute for 5h) in which case we assume that the crons are stuck
# because the db has zombie states and we force a call to
# reset_module_states.
odoo.modules.reset_modules_state(cr.dbname)
@classmethod
def _get_all_ready_jobs(cls, cr):
""" Return a list of all jobs that are ready to be executed """
cr.execute("""
SELECT *
FROM ir_cron
WHERE active = true
AND numbercall != 0
AND (nextcall <= (now() at time zone 'UTC')
OR id in (
SELECT cron_id
FROM ir_cron_trigger
WHERE call_at <= (now() at time zone 'UTC')
)
)
ORDER BY priority
""")
return cr.dictfetchall()
@classmethod
def _acquire_one_job(cls, cr, job_ids):
"""
Acquire for update one job that is ready from the job_ids tuple.
The jobs that have already been processed in this worker should
be excluded from the tuple.
This function raises a ``psycopg2.errors.SerializationFailure``
when the ``nextcall`` of one of the job_ids is modified in
another transaction. You should rollback the transaction and try
again later.
"""
# We have to make sure ALL jobs are executed ONLY ONCE no matter
# how many cron workers may process them. The exlusion mechanism
# is twofold: (i) prevent parallel processing of the same job,
# and (ii) prevent re-processing jobs that have been processed
# already.
#
# (i) is implemented via `LIMIT 1 FOR UPDATE SKIP LOCKED`, each
# worker just acquire one available job at a time and lock it so
# the other workers don't select it too.
# (ii) is implemented via the `WHERE` statement, when a job has
# been processed, its nextcall is updated to a date in the
# future and the optional triggers are removed.
#
# Note about (ii): it is possible that a job becomes available
# again quickly (e.g. high frequency or self-triggering cron).
# This function doesn't prevent from acquiring that job multiple
# times at different moments. This can block a worker on
# executing a same job in loop. To prevent this problem, the
# callee is responsible of providing a `job_ids` tuple without
# the jobs it has executed already.
#
# An `UPDATE` lock type is the strongest row lock, it conflicts
# with ALL other lock types. Among them the `KEY SHARE` row lock
# which is implicitely aquired by foreign keys to prevent the
# referenced record from being removed while in use. Because we
# never delete acquired cron jobs, foreign keys are safe to
# concurrently reference cron jobs. Hence, the `NO KEY UPDATE`
# row lock is used, it is a weaker lock that does conflict with
# everything BUT `KEY SHARE`.
#
# Learn more: https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-ROWS
query = """
SELECT *
FROM ir_cron
WHERE active = true
AND numbercall != 0
AND (nextcall <= (now() at time zone 'UTC')
OR EXISTS (
SELECT cron_id
FROM ir_cron_trigger
WHERE call_at <= (now() at time zone 'UTC')
AND cron_id = ir_cron.id
)
)
AND id in %s
ORDER BY priority
LIMIT 1 FOR NO KEY UPDATE SKIP LOCKED
"""
try:
cr.execute(query, [job_ids], log_exceptions=False)
except psycopg2.extensions.TransactionRollbackError:
# A serialization error can occur when another cron worker
# commits the new `nextcall` value of a cron it just ran and
# that commit occured just before this query. The error is
# genuine and the job should be skipped in this cron worker.
raise
except Exception as exc:
_logger.error("bad query: %s\nERROR: %s", query, exc)
raise
return cr.dictfetchone()
@classmethod
def _process_job(cls, db, cron_cr, job):
""" Execute a cron job and re-schedule a call for later. """
# Compute how many calls were missed and at what time we should
# recall the cron next. In the example bellow, we fake a cron
# with an interval of 30 (starting at 0) that was last executed
# at 15 and that is executed again at 135.
#
# 0 60 120 180
# --|-----|-----|-----|-----|-----|-----|----> time
# 1 2* * * * 3 4
#
# 1: lastcall, the last time the cron was executed
# 2: past_nextcall, the cron nextcall as seen from lastcall
# *: missed_call, a total of 4 calls are missing
# 3: now
# 4: future_nextcall, the cron nextcall as seen from now
with cls.pool.cursor() as job_cr:
lastcall = fields.Datetime.to_datetime(job['lastcall'])
interval = _intervalTypes[job['interval_type']](job['interval_number'])
env = api.Environment(job_cr, job['user_id'], {'lastcall': lastcall})
ir_cron = env[cls._name]
# Use the user's timezone to compare and compute datetimes,
# otherwise unexpected results may appear. For instance, adding
# 1 month in UTC to July 1st at midnight in GMT+2 gives July 30
# instead of August 1st!
now = fields.Datetime.context_timestamp(ir_cron, datetime.utcnow())
past_nextcall = fields.Datetime.context_timestamp(
ir_cron, fields.Datetime.to_datetime(job['nextcall']))
# Compute how many call were missed
missed_call = past_nextcall
missed_call_count = 0
while missed_call <= now:
missed_call += interval
missed_call_count += 1
future_nextcall = missed_call
# Compute how many time we should run the cron
effective_call_count = (
1 if not missed_call_count # run at least once
else 1 if not job['doall'] # run once for all
else missed_call_count if job['numbercall'] == -1 # run them all
else min(missed_call_count, job['numbercall']) # run maximum numbercall times
)
call_count_left = max(job['numbercall'] - effective_call_count, -1)
# The actual cron execution
for call in range(effective_call_count):
ir_cron._callback(job['cron_name'], job['ir_actions_server_id'], job['id'])
# Update the cron with the information computed above
cron_cr.execute("""
UPDATE ir_cron
SET nextcall=%s,
numbercall=%s,
lastcall=%s,
active=%s
WHERE id=%s
""", [
fields.Datetime.to_string(future_nextcall.astimezone(pytz.UTC)),
call_count_left,
fields.Datetime.to_string(now.astimezone(pytz.UTC)),
job['active'] and bool(call_count_left),
job['id'],
])
cron_cr.execute("""
DELETE FROM ir_cron_trigger
WHERE cron_id = %s
AND call_at < (now() at time zone 'UTC')
""", [job['id']])
cron_cr.commit()
@api.model
def _callback(self, cron_name, server_action_id, job_id):
""" Run the method associated to a given job. It takes care of logging
and exception handling. Note that the user running the server action
is the user calling this method. """
try:
if self.pool != self.pool.check_signaling():
# the registry has changed, reload self in the new registry
self.env.reset()
self = self.env()[self._name]
log_depth = (None if _logger.isEnabledFor(logging.DEBUG) else 1)
odoo.netsvc.log(_logger, logging.DEBUG, 'cron.object.execute', (self._cr.dbname, self._uid, '*', cron_name, server_action_id), depth=log_depth)
start_time = False
_logger.info('Starting job `%s`.', cron_name)
if _logger.isEnabledFor(logging.DEBUG):
start_time = time.time()
self.env['ir.actions.server'].browse(server_action_id).run()
_logger.info('Job `%s` done.', cron_name)
if start_time and _logger.isEnabledFor(logging.DEBUG):
end_time = time.time()
_logger.debug('%.3fs (cron %s, server action %d with uid %d)', end_time - start_time, cron_name, server_action_id, self.env.uid)
self.pool.signal_changes()
except Exception as e:
self.pool.reset_changes()
_logger.exception("Call from cron %s for server action #%s failed in Job #%s",
cron_name, server_action_id, job_id)
self._handle_callback_exception(cron_name, server_action_id, job_id, e)
@api.model
def _handle_callback_exception(self, cron_name, server_action_id, job_id, job_exception):
""" Method called when an exception is raised by a job.
Simply logs the exception and rollback the transaction. """
self._cr.rollback()
def _try_lock(self, lockfk=False):
"""Try to grab a dummy exclusive write-lock to the rows with the given ids,
to make sure a following write() or unlink() will not block due
to a process currently executing those cron tasks.
:param lockfk: acquire a strong row lock which conflicts with
the lock aquired by foreign keys when they
reference this row.
"""
row_level_lock = "UPDATE" if lockfk else "NO KEY UPDATE"
try:
self._cr.execute(f"""
SELECT id
FROM "{self._table}"
WHERE id IN %s
FOR {row_level_lock} NOWAIT
""", [tuple(self.ids)], log_exceptions=False)
except psycopg2.OperationalError:
self._cr.rollback() # early rollback to allow translations to work for the user feedback
raise UserError(_("Record cannot be modified right now: "
"This cron task is currently being executed and may not be modified "
"Please try again in a few minutes"))
def write(self, vals):
self._try_lock()
if ('nextcall' in vals or vals.get('active')) and os.getenv('ODOO_NOTIFY_CRON_CHANGES'):
self._cr.postcommit.add(self._notifydb)
return super(ir_cron, self).write(vals)
def unlink(self):
self._try_lock(lockfk=True)
return super(ir_cron, self).unlink()
def try_write(self, values):
try:
with self._cr.savepoint():
self._cr.execute(f"""
SELECT id
FROM "{self._table}"
WHERE id IN %s
FOR NO KEY UPDATE NOWAIT
""", [tuple(self.ids)], log_exceptions=False)
except psycopg2.OperationalError:
pass
else:
return super(ir_cron, self).write(values)
return False
@api.model
def toggle(self, model, domain):
# Prevent deactivated cron jobs from being re-enabled through side effects on
# neutralized databases.
if self.env['ir.config_parameter'].sudo().get_param('database.is_neutralized'):
return True
active = bool(self.env[model].search_count(domain))
return self.try_write({'active': active})
@api.model
def _trigger(self, at=None):
"""
Schedule a cron job to be executed soon independently of its
``nextcall`` field value.
By default the cron is scheduled to be executed in the next batch but
the optional `at` argument may be given to delay the execution later
with a precision down to 1 minute.
The method may be called with a datetime or an iterable of datetime.
The actual implementation is in :meth:`~._trigger_list`, which is the
recommended method for overrides.
:param Optional[Union[datetime.datetime, list[datetime.datetime]]] at:
When to execute the cron, at one or several moments in time instead
of as soon as possible.
"""
if at is None:
at_list = [fields.Datetime.now()]
elif isinstance(at, datetime):
at_list = [at]
else:
at_list = list(at)
assert all(isinstance(at, datetime) for at in at_list)
self._trigger_list(at_list)
@api.model
def _trigger_list(self, at_list):
"""
Implementation of :meth:`~._trigger`.
:param list[datetime.datetime] at_list:
Execute the cron later, at precise moments in time.
"""
self.ensure_one()
now = fields.Datetime.now()
if not self.sudo().active:
# skip triggers that would be ignored
at_list = [at for at in at_list if at > now]
if not at_list:
return
self.env['ir.cron.trigger'].sudo().create([
{'cron_id': self.id, 'call_at': at}
for at in at_list
])
if _logger.isEnabledFor(logging.DEBUG):
ats = ', '.join(map(str, at_list))
_logger.debug("will execute '%s' at %s", self.sudo().name, ats)
if min(at_list) <= now or os.getenv('ODOO_NOTIFY_CRON_CHANGES'):
self._cr.postcommit.add(self._notifydb)
def _notifydb(self):
""" Wake up the cron workers
The ODOO_NOTIFY_CRON_CHANGES environment variable allows to force the notifydb on both
ir_cron modification and on trigger creation (regardless of call_at)
"""
with odoo.sql_db.db_connect('postgres').cursor() as cr:
cr.execute('NOTIFY cron_trigger, %s', [self.env.cr.dbname])
_logger.debug("cron workers notified")
class ir_cron_trigger(models.Model):
_name = 'ir.cron.trigger'
_description = 'Triggered actions'
cron_id = fields.Many2one("ir.cron", index=True)
call_at = fields.Datetime()
@api.autovacuum
def _gc_cron_triggers(self):
self.search([('call_at', '<', datetime.now() + relativedelta(weeks=-1))]).unlink()
| 42.223077 | 21,956 |
5,796 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import datetime
import json
import logging
from dateutil.relativedelta import relativedelta
from odoo import fields, models, api, _
from odoo.exceptions import UserError
from odoo.http import request
from odoo.tools.profiler import make_session
from odoo.tools.speedscope import Speedscope
_logger = logging.getLogger(__name__)
class IrProfile(models.Model):
_name = 'ir.profile'
_description = 'Profiling results'
_log_access = False # avoid useless foreign key on res_user
_order = 'session desc, id desc'
create_date = fields.Datetime('Creation Date')
session = fields.Char('Session', index=True)
name = fields.Char('Description')
duration = fields.Float('Duration')
init_stack_trace = fields.Text('Initial stack trace', prefetch=False)
sql = fields.Text('Sql', prefetch=False)
traces_async = fields.Text('Traces Async', prefetch=False)
traces_sync = fields.Text('Traces Sync', prefetch=False)
qweb = fields.Text('Qweb', prefetch=False)
entry_count = fields.Integer('Entry count')
speedscope = fields.Binary('Speedscope', compute='_compute_speedscope')
speedscope_url = fields.Text('Open', compute='_compute_speedscope_url')
@api.autovacuum
def _gc_profile(self):
# remove profiles older than 30 days
domain = [('create_date', '<', fields.Datetime.now() - datetime.timedelta(days=30))]
return self.sudo().search(domain).unlink()
def _compute_speedscope(self):
for execution in self:
sp = Speedscope(init_stack_trace=json.loads(execution.init_stack_trace))
if execution.sql:
sp.add('sql', json.loads(execution.sql))
if execution.traces_async:
sp.add('frames', json.loads(execution.traces_async))
if execution.traces_sync:
sp.add('settrace', json.loads(execution.traces_sync))
result = json.dumps(sp.add_default().make())
execution.speedscope = base64.b64encode(result.encode('utf-8'))
def _compute_speedscope_url(self):
for profile in self:
profile.speedscope_url = f'/web/speedscope/{profile.id}'
def _enabled_until(self):
"""
If the profiling is enabled, return until when it is enabled.
Otherwise return ``None``.
"""
limit = self.env['ir.config_parameter'].sudo().get_param('base.profiling_enabled_until', '')
return limit if str(fields.Datetime.now()) < limit else None
@api.model
def set_profiling(self, profile=None, collectors=None, params=None):
"""
Enable or disable profiling for the current user.
:param profile: ``True`` to enable profiling, ``False`` to disable it.
:param list collectors: optional list of collectors to use (string)
:param dict params: optional parameters set on the profiler object
"""
# Note: parameters are coming from a rpc calls or route param (public user),
# meaning that corresponding session variables are client-defined.
# This allows to activate any profiler, but can be
# dangerous handling request.session.profile_collectors/profile_params.
if profile:
limit = self._enabled_until()
_logger.info("User %s started profiling", self.env.user.name)
if not limit:
request.session.profile_session = None
if self.env.user._is_system():
return {
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'base.enable.profiling.wizard',
'target': 'new',
'views': [[False, 'form']],
}
raise UserError(_('Profiling is not enabled on this database. Please contact an administrator.'))
if not request.session.profile_session:
request.session.profile_session = make_session(self.env.user.name)
request.session.profile_expiration = limit
if request.session.profile_collectors is None:
request.session.profile_collectors = []
if request.session.profile_params is None:
request.session.profile_params = {}
elif profile is not None:
request.session.profile_session = None
if collectors is not None:
request.session.profile_collectors = collectors
if params is not None:
request.session.profile_params = params
return {
'session': request.session.profile_session,
'collectors': request.session.profile_collectors,
'params': request.session.profile_params,
}
class EnableProfilingWizard(models.TransientModel):
_name = 'base.enable.profiling.wizard'
_description = "Enable profiling for some time"
duration = fields.Selection([
('minutes_5', "5 Minutes"),
('hours_1', "1 Hour"),
('days_1', "1 Day"),
('months_1', "1 Month"),
], string="Enable profiling for")
expiration = fields.Datetime("Enable profiling until", compute='_compute_expiration', store=True, readonly=False)
@api.depends('duration')
def _compute_expiration(self):
for record in self:
unit, quantity = (record.duration or 'days_0').split('_')
record.expiration = fields.Datetime.now() + relativedelta(**{unit: int(quantity)})
def submit(self):
self.env['ir.config_parameter'].set_param('base.profiling_enabled_until', self.expiration)
return False
| 40.25 | 5,796 |
33,683 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import logging
import re
from lxml import etree
from odoo import api, models, _, Command
from odoo.exceptions import AccessError, RedirectWarning, UserError
from odoo.tools import ustr
_logger = logging.getLogger(__name__)
class ResConfigModuleInstallationMixin(object):
__slots__ = ()
@api.model
def _install_modules(self, modules):
""" Install the requested modules.
:param modules: a list of tuples (module_name, module_record)
:return: the next action to execute
"""
to_install_modules = self.env['ir.module.module']
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_modules += module
result = None
if to_install_modules:
result = to_install_modules.button_immediate_install()
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class ResConfigConfigurable(models.TransientModel):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
_description = 'Config'
def start(self):
# pylint: disable=next-method-called
return self.next()
def next(self):
"""
Reload the settings page
"""
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def execute(self):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
# pylint: disable=next-method-called
return self.execute() or self.next()
def action_skip(self):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
# pylint: disable=next-method-called
return self.cancel() or self.next()
def action_cancel(self):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
# pylint: disable=next-method-called
return self.cancel() or self.next()
class ResConfigInstaller(models.TransientModel, ResConfigModuleInstallationMixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of boolean fields. The field names
should be the names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be interpreted as
addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['sale_service'],
}
will install both ``sale_crm`` and ``sale_service`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``ResConfigInstaller.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_description = 'Config Installer'
_install_if = {}
def already_installed(self):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return [m.name for m in self._already_installed()]
def _already_installed(self):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's record
:returns: a list of all installed modules in this installer
:rtype: recordset (collection of Record)
"""
selectable = [name for name, field in self._fields.items()
if field.type == 'boolean']
return self.env['ir.module.module'].search([('name', 'in', selectable),
('state', 'in', ['to install', 'installed', 'to upgrade'])])
def modules_to_install(self):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read()
for module_name, to_install in installer.items()
if self._fields[module_name].type == 'boolean' and to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook() or set())
additionals = set(module
for requirements, consequences in self._install_if.items()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals) - set(self.already_installed())
@api.model
def default_get(self, fields_list):
''' If an addon is already installed, check it by default
'''
defaults = super(ResConfigInstaller, self).default_get(fields_list)
return dict(defaults, **dict.fromkeys(self.already_installed(), True))
@api.model
def fields_get(self, fields=None, attributes=None):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(ResConfigInstaller, self).fields_get(fields, attributes=attributes)
for name in self.already_installed():
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self):
to_install = list(self.modules_to_install())
_logger.info('Selecting addons %s to install', to_install)
IrModule = self.env['ir.module.module']
modules = []
for name in to_install:
module = IrModule.search([('name', '=', name)], limit=1)
modules.append((name, module))
return self._install_modules(modules)
class ResConfigSettings(models.TransientModel, ResConfigModuleInstallationMixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class MyConfigWizard(models.TransientModel):
_name = 'my.settings'
_inherit = 'res.config.settings'
default_foo = fields.type(..., default_model='my.model'),
group_bar = fields.Boolean(..., group='base.group_user', implied_group='my.group'),
module_baz = fields.Boolean(...),
config_qux = fields.Char(..., config_parameter='my.parameter')
other_field = fields.type(...),
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a selection field like 'group_XXX' composed of 2 string values ('0' and '1'),
``execute`` adds/removes 'implied_group' to/from the implied groups of 'group',
depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For a selection field like 'module_XXX' composed of 2 string values ('0' and '1'),
``execute`` triggers the immediate installation of the module named 'XXX'
if the field has the value ``'1'``.
* For a field with no specific prefix BUT an attribute 'config_parameter',
``execute``` will save its value in an ir.config.parameter (global setting for the
database).
* For the other fields, the method ``execute`` invokes `set_values`.
Override it to implement the effect of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX', 'module_XXX' and config_XXX.
It also invokes all methods with a name that starts with 'get_default_';
such methods can be defined to provide current values for other fields.
"""
_name = 'res.config.settings'
_description = 'Config Settings'
def _valid_field_parameter(self, field, name):
return (
name in ('default_model', 'config_parameter')
or field.type in ('boolean', 'selection') and name in ('group', 'implied_group')
or super()._valid_field_parameter(field, name)
)
def copy(self, values):
raise UserError(_("Cannot duplicate configuration!"), "")
@api.model
def fields_view_get(self, view_id=None, view_type='form',
toolbar=False, submenu=False):
ret_val = super(ResConfigSettings, self).fields_view_get(
view_id=view_id, view_type=view_type,
toolbar=toolbar, submenu=submenu)
can_install_modules = self.env['ir.module.module'].check_access_rights(
'write', raise_exception=False)
doc = etree.XML(ret_val['arch'])
for field in ret_val['fields']:
if not field.startswith("module_"):
continue
for node in doc.xpath("//field[@name='%s']" % field):
if not can_install_modules:
node.set("readonly", "1")
modifiers = json.loads(node.get("modifiers"))
modifiers['readonly'] = True
node.set("modifiers", json.dumps(modifiers))
ret_val['arch'] = etree.tostring(doc, encoding='unicode')
return ret_val
def onchange_module(self, field_value, module_name):
ModuleSudo = self.env['ir.module.module'].sudo()
modules = ModuleSudo.search(
[('name', '=', module_name.replace("module_", '')),
('state', 'in', ['to install', 'installed', 'to upgrade'])])
if modules and not int(field_value):
deps = modules.sudo().downstream_dependencies()
dep_names = (deps | modules).mapped('shortdesc')
message = '\n'.join(dep_names)
return {
'warning': {
'title': _('Warning!'),
'message': _('Disabling this option will also uninstall the following modules \n%s', message),
}
}
return {}
def _register_hook(self):
""" Add an onchange method for each module field. """
def make_method(name):
return lambda self: self.onchange_module(self[name], name)
for name in self._fields:
if name.startswith('module_'):
method = make_method(name)
self._onchange_methods[name].append(method)
@api.model
def _get_classified_fields(self):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', [browse_group], browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'config': [('config_qux', 'my.parameter'), ...],
'other': ['other_field', ...],
}
"""
IrModule = self.env['ir.module.module'].sudo()
Groups = self.env['res.groups']
ref = self.env.ref
defaults, groups, module_names, configs, others = [], [], [], [], []
for name, field in self._fields.items():
if name.startswith('default_'):
if not hasattr(field, 'default_model'):
raise Exception("Field %s without attribute 'default_model'" % field)
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_'):
if field.type not in ('boolean', 'selection'):
raise Exception("Field %s must have type 'boolean' or 'selection'" % field)
if not hasattr(field, 'implied_group'):
raise Exception("Field %s without attribute 'implied_group'" % field)
field_group_xmlids = getattr(field, 'group', 'base.group_user').split(',')
field_groups = Groups.concat(*(ref(it) for it in field_group_xmlids))
groups.append((name, field_groups, ref(field.implied_group)))
elif name.startswith('module_'):
if field.type not in ('boolean', 'selection'):
raise Exception("Field %s must have type 'boolean' or 'selection'" % field)
module_names.append(name[7:])
elif hasattr(field, 'config_parameter'):
if field.type not in ('boolean', 'integer', 'float', 'char', 'selection', 'many2one', 'datetime'):
raise Exception("Field %s must have type 'boolean', 'integer', 'float', 'char', 'selection', 'many2one' or 'datetime'" % field)
configs.append((name, field.config_parameter))
else:
others.append(name)
# retrieve all modules at once, and build the list 'modules' from it
name2module = {module.name: module for module in IrModule.search([('name', 'in', module_names)])}
modules = [('module_' + name, name2module.get(name, IrModule)) for name in module_names]
return {'default': defaults, 'group': groups, 'module': modules, 'config': configs, 'other': others}
def get_values(self):
"""
Return values for the fields other that `default`, `group` and `module`
"""
return {}
@api.model
def default_get(self, fields):
IrDefault = self.env['ir.default']
IrConfigParameter = self.env['ir.config_parameter'].sudo()
classified = self._get_classified_fields()
res = super(ResConfigSettings, self).default_get(fields)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = IrDefault.get(model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, groups, implied_group in classified['group']:
res[name] = all(implied_group in group.implied_ids for group in groups)
if self._fields[name].type == 'selection':
res[name] = str(int(res[name])) # True, False -> '1', '0'
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module.state in ('installed', 'to install', 'to upgrade')
if self._fields[name].type == 'selection':
res[name] = str(int(res[name])) # True, False -> '1', '0'
# config: get & convert stored ir.config_parameter (or default)
WARNING_MESSAGE = "Error when converting value %r of field %s for ir.config.parameter %r"
for name, icp in classified['config']:
field = self._fields[name]
value = IrConfigParameter.get_param(icp, field.default(self) if field.default else False)
if value is not False:
if field.type == 'many2one':
try:
# Special case when value is the id of a deleted record, we do not want to
# block the settings screen
value = self.env[field.comodel_name].browse(int(value)).exists().id
except (ValueError, TypeError):
_logger.warning(WARNING_MESSAGE, value, field, icp)
value = False
elif field.type == 'integer':
try:
value = int(value)
except (ValueError, TypeError):
_logger.warning(WARNING_MESSAGE, value, field, icp)
value = 0
elif field.type == 'float':
try:
value = float(value)
except (ValueError, TypeError):
_logger.warning(WARNING_MESSAGE, value, field, icp)
value = 0.0
elif field.type == 'boolean':
value = bool(value)
res[name] = value
res.update(self.get_values())
return res
def set_values(self):
"""
Set values for the fields other that `default`, `group` and `module`
"""
self = self.with_context(active_test=False)
classified = self._get_classified_fields()
# default values fields
IrDefault = self.env['ir.default'].sudo()
for name, model, field in classified['default']:
if isinstance(self[name], models.BaseModel):
if self._fields[name].type == 'many2one':
value = self[name].id
else:
value = self[name].ids
else:
value = self[name]
IrDefault.set(model, field, value)
# group fields: modify group / implied groups
current_settings = self.default_get(list(self.fields_get()))
with self.env.norecompute():
for name, groups, implied_group in sorted(classified['group'], key=lambda k: self[k[0]]):
groups = groups.sudo()
implied_group = implied_group.sudo()
if self[name] == current_settings[name]:
continue
if int(self[name]):
groups._apply_group(implied_group)
else:
groups._remove_group(implied_group)
# config fields: store ir.config_parameters
IrConfigParameter = self.env['ir.config_parameter'].sudo()
for name, icp in classified['config']:
field = self._fields[name]
value = self[name]
if field.type == 'char':
# storing developer keys as ir.config_parameter may lead to nasty
# bugs when users leave spaces around them
value = (value or "").strip() or False
elif field.type in ('integer', 'float'):
value = repr(value) if value else False
elif field.type == 'many2one':
# value is a (possibly empty) recordset
value = value.id
IrConfigParameter.set_param(icp, value)
def execute(self):
"""
Called when settings are saved.
This method will call `set_values` and will install/uninstall any modules defined by
`module_` Boolean fields and then trigger a web client reload.
.. warning::
This method **SHOULD NOT** be overridden, in most cases what you want to override is
`~set_values()` since `~execute()` does little more than simply call `~set_values()`.
The part that installs/uninstalls modules **MUST ALWAYS** be at the end of the
transaction, otherwise there's a big risk of registry <-> database desynchronisation.
"""
self.ensure_one()
if not self.env.is_admin():
raise AccessError(_("Only administrators can change the settings"))
self = self.with_context(active_test=False)
classified = self._get_classified_fields()
self.set_values()
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_modules = self.env['ir.module.module']
lm = len('module_')
for name, module in classified['module']:
if int(self[name]):
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_modules += module
if to_install or to_uninstall_modules:
self.flush()
if to_uninstall_modules:
to_uninstall_modules.button_immediate_uninstall()
installation_status = self._install_modules(to_install)
if installation_status or to_uninstall_modules:
# After the uninstall/install calls, the registry and environments
# are no longer valid. So we reset the environment.
self.env.reset()
self = self.env()[self._name]
# pylint: disable=next-method-called
config = self.env['res.config'].next() or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self):
# ignore the current record, and send the action to reopen the view
actions = self.env['ir.actions.act_window'].search([('res_model', '=', self._name)], limit=1)
if actions:
return actions.read()[0]
return {}
def name_get(self):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
action = self.env['ir.actions.act_window'].search([('res_model', '=', self._name)], limit=1)
name = action.name or self._name
return [(record.id, name) for record in self]
@api.model
def get_option_path(self, menu_xml_id):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "sales_team.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
ir_ui_menu = self.env.ref(menu_xml_id)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
@api.model
def get_option_name(self, full_field_name):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.env[model_name].fields_get([field_name])[field_name]['string']
@api.model
def get_config_warning(self, msg):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from odoo.addons.base.models.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:sales_team.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
self = self.sudo()
# Process the message
# 1/ find the menu and/or field references, put them in a list
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = self.get_option_path(ref)
elif ref_type == 'field':
values[item] = self.get_option_name(ref)
# 3/ substitute and return the result
if (action_id):
return RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return UserError(msg % values)
@api.model
def create(self, values):
# Optimisation: saving a res.config.settings even without changing any
# values will trigger the write of all related values. This in turn may
# trigger chain of further recomputation. To avoid it, delete values
# that were not changed.
for field in self._fields.values():
if not (field.name in values and field.related and not field.readonly):
continue
# we write on a related field like
# qr_code = fields.Boolean(related='company_id.qr_code', readonly=False)
fname0, *fnames = field.related.split(".")
if fname0 not in values:
continue
# determine the current value
field0 = self._fields[fname0]
old_value = field0.convert_to_record(
field0.convert_to_cache(values[fname0], self), self)
for fname in fnames:
old_value = next(iter(old_value), old_value)[fname]
# determine the new value
new_value = field.convert_to_record(
field.convert_to_cache(values[field.name], self), self)
# drop if the value is the same
if old_value == new_value:
values.pop(field.name)
return super(ResConfigSettings, self).create(values)
| 42.96301 | 33,683 |
44,927 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
from contextlib import closing
from datetime import datetime
from subprocess import Popen, PIPE
import base64
import hashlib
import itertools
import json
import logging
import os
import re
import textwrap
import uuid
import psycopg2
try:
import sass as libsass
except ImportError:
# If the `sass` python library isn't found, we fallback on the
# `sassc` executable in the path.
libsass = None
from odoo import release, SUPERUSER_ID
from odoo.http import request
from odoo.modules.module import get_resource_path
from odoo.tools import func, misc, transpile_javascript, is_odoo_module, SourceMapGenerator, profiler
from odoo.tools.misc import file_open, html_escape as escape
from odoo.tools.pycompat import to_text
_logger = logging.getLogger(__name__)
EXTENSIONS = (".js", ".css", ".scss", ".sass", ".less")
class CompileError(RuntimeError): pass
def rjsmin(script):
""" Minify js with a clever regex.
Taken from http://opensource.perlig.de/rjsmin (version 1.1.0)
Apache License, Version 2.0 """
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
(groups[3] and (groups[2] + '\n')) or
groups[2] or
(groups[5] and "%s%s%s" % (
groups[4] and '\n' or '',
groups[5],
groups[6] and '\n' or '',
)) or
(groups[7] and '\n') or
(groups[8] and ' ') or
(groups[9] and ' ') or
(groups[10] and ' ') or
''
)
result = re.sub(
r'([^\047"\140/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^'
r'\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^'
r'\r\n]|\r?\n|\r)[^"\\\r\n]*)*")|(?:\140[^\140\\]*(?:\\(?:[^\r\n'
r']|\r?\n|\r)[^\140\\]*)*\140))[^\047"\140/\000-\040]*)|(?<=[(,='
r':\[!&|?{};\r\n+*-])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*'
r'\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*('
r'(?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/))((?:[\000-\011'
r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:('
r'?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040&)+,.:;=?\]|}-]))?|'
r'(?<=[\000-#%-,./:-@\[-^\140{-~-]return)(?:[\000-\011\013\014\0'
r'16-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:((?:(?://[^\r'
r'\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?'
r':[^/*][^*]*\*+)*/))*)*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^'
r'\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r'
r'\n]*)*/))((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013'
r'\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000'
r'-\040&)+,.:;=?\]|}-]))?|(?<=[^\000-!#%&(*,./:-@\[\\^{|~])(?:['
r'\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
r')*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040'
r']|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#%-\047'
r')*,./:-@\\-^\140|-~])|(?<=[^\000-#%-,./:-@\[-^\140{-~-])((?:['
r'\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
r'))+(?=[^\000-#%-,./:-@\[-^\140{-~-])|(?<=\+)((?:[\000-\011\013'
r'\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<'
r'=-)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]'
r'*\*+)*/)))+(?=-)|(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*'
r'+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
return result
class AssetError(Exception):
pass
class AssetNotFound(AssetError):
pass
class AssetsBundle(object):
rx_css_import = re.compile("(@import[^;{]+;?)", re.M)
rx_preprocess_imports = re.compile("""(@import\s?['"]([^'"]+)['"](;?))""")
rx_css_split = re.compile("\/\*\! ([a-f0-9-]+) \*\/")
TRACKED_BUNDLES = ['web.assets_common', 'web.assets_backend']
def __init__(self, name, files, env=None, css=True, js=True):
"""
:param name: bundle name
:param files: files to be added to the bundle
:param css: if css is True, the stylesheets files are added to the bundle
:param js: if js is True, the javascript files are added to the bundle
"""
self.name = name
self.env = request.env if env is None else env
self.javascripts = []
self.stylesheets = []
self.css_errors = []
self.files = files
self.user_direction = self.env['res.lang']._lang_get(
self.env.context.get('lang') or self.env.user.lang
).direction
# asset-wide html "media" attribute
for f in files:
if css:
if f['atype'] == 'text/sass':
self.stylesheets.append(SassStylesheetAsset(self, url=f['url'], filename=f['filename'], inline=f['content'], media=f['media'], direction=self.user_direction))
elif f['atype'] == 'text/scss':
self.stylesheets.append(ScssStylesheetAsset(self, url=f['url'], filename=f['filename'], inline=f['content'], media=f['media'], direction=self.user_direction))
elif f['atype'] == 'text/less':
self.stylesheets.append(LessStylesheetAsset(self, url=f['url'], filename=f['filename'], inline=f['content'], media=f['media'], direction=self.user_direction))
elif f['atype'] == 'text/css':
self.stylesheets.append(StylesheetAsset(self, url=f['url'], filename=f['filename'], inline=f['content'], media=f['media'], direction=self.user_direction))
if js and f['atype'] == 'text/javascript':
self.javascripts.append(JavascriptAsset(self, url=f['url'], filename=f['filename'], inline=f['content']))
def to_node(self, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False):
"""
:returns [(tagName, attributes, content)] if the tag is auto close
"""
response = []
is_debug_assets = debug and 'assets' in debug
if css and self.stylesheets:
css_attachments = self.css(is_minified=not is_debug_assets) or []
for attachment in css_attachments:
if is_debug_assets:
href = self.get_debug_asset_url(extra='rtl/' if self.user_direction == 'rtl' else '',
name=css_attachments.name,
extension='')
else:
href = attachment.url
attr = dict([
["type", "text/css"],
["rel", "stylesheet"],
["href", href],
['data-asset-bundle', self.name],
['data-asset-version', self.version],
])
response.append(("link", attr, None))
if self.css_errors:
msg = '\n'.join(self.css_errors)
response.append(JavascriptAsset(self, inline=self.dialog_message(msg)).to_node())
response.append(StylesheetAsset(self, url="/web/static/lib/bootstrap/css/bootstrap.css").to_node())
if js and self.javascripts:
js_attachment = self.js(is_minified=not is_debug_assets)
src = self.get_debug_asset_url(name=js_attachment.name, extension='') if is_debug_assets else js_attachment[0].url
attr = dict([
["async", "async" if async_load else None],
["defer", "defer" if defer_load or lazy_load else None],
["type", "text/javascript"],
["data-src" if lazy_load else "src", src],
['data-asset-bundle', self.name],
['data-asset-version', self.version],
])
response.append(("script", attr, None))
return response
@func.lazy_property
def last_modified(self):
"""Returns last modified date of linked files"""
assets = [WebAsset(self, url=f['url'], filename=f['filename'], inline=f['content'])
for f in self.files
if f['atype'] in ['text/sass', "text/scss", "text/less", "text/css", "text/javascript"]]
return max(itertools.chain(
(asset.last_modified for asset in assets),
))
@func.lazy_property
def version(self):
return self.checksum[0:7]
@func.lazy_property
def checksum(self):
"""
Not really a full checksum.
We compute a SHA512/256 on the rendered bundle + max linked files last_modified date
"""
check = u"%s%s" % (json.dumps(self.files, sort_keys=True), self.last_modified)
return hashlib.sha512(check.encode('utf-8')).hexdigest()[:64]
def _get_asset_template_url(self):
return "/web/assets/{id}-{unique}/{extra}{name}{sep}{extension}"
def _get_asset_url_values(self, id, unique, extra, name, sep, extension): # extra can contain direction or/and website
return {
'id': id,
'unique': unique,
'extra': extra,
'name': name,
'sep': sep,
'extension': extension,
}
def get_asset_url(self, id='%', unique='%', extra='', name='%', sep="%", extension='%'):
return self._get_asset_template_url().format(
**self._get_asset_url_values(id=id, unique=unique, extra=extra, name=name, sep=sep, extension=extension)
)
def get_debug_asset_url(self, extra='', name='%', extension='%'):
return f"/web/assets/debug/{extra}{name}{extension}"
def _unlink_attachments(self, attachments):
""" Unlinks attachments without actually calling unlink, so that the ORM cache is not cleared.
Specifically, if an attachment is generated while a view is rendered, clearing the ORM cache
could unload fields loaded with a sudo(), and expected to be readable by the view.
Such a view would be website.layout when main_object is an ir.ui.view.
"""
to_delete = set(attach.store_fname for attach in attachments if attach.store_fname)
self.env.cr.execute(f"DELETE FROM {attachments._table} WHERE id IN %s", [tuple(attachments.ids)])
for file_path in to_delete:
attachments._file_delete(file_path)
def clean_attachments(self, extension):
""" Takes care of deleting any outdated ir.attachment records associated to a bundle before
saving a fresh one.
When `extension` is js we need to check that we are deleting a different version (and not *any*
version) because, as one of the creates in `save_attachment` can trigger a rollback, the
call to `clean_attachments ` is made at the end of the method in order to avoid the rollback
of an ir.attachment unlink (because we cannot rollback a removal on the filestore), thus we
must exclude the current bundle.
"""
ira = self.env['ir.attachment']
url = self.get_asset_url(
extra='%s' % ('rtl/' if extension in ['css', 'min.css'] and self.user_direction == 'rtl' else ''),
name=self.name,
sep='',
extension='.%s' % extension
)
domain = [
('url', '=like', url),
'!', ('url', '=like', self.get_asset_url(unique=self.version))
]
attachments = ira.sudo().search(domain)
# avoid to invalidate cache if it's already empty (mainly useful for test)
if attachments:
self._unlink_attachments(attachments)
# force bundle invalidation on other workers
self.env['ir.qweb'].clear_caches()
return True
def get_attachments(self, extension, ignore_version=False):
""" Return the ir.attachment records for a given bundle. This method takes care of mitigating
an issue happening when parallel transactions generate the same bundle: while the file is not
duplicated on the filestore (as it is stored according to its hash), there are multiple
ir.attachment records referencing the same version of a bundle. As we don't want to source
multiple time the same bundle in our `to_html` function, we group our ir.attachment records
by file name and only return the one with the max id for each group.
:param extension: file extension (js, min.js, css)
:param ignore_version: if ignore_version, the url contains a version => web/assets/%-%/name.extension
(the second '%' corresponds to the version),
else: the url contains a version equal to that of the self.version
=> web/assets/%-self.version/name.extension.
"""
unique = "%" if ignore_version else self.version
url_pattern = self.get_asset_url(
unique=unique,
extra='%s' % ('rtl/' if extension in ['css', 'min.css'] and self.user_direction == 'rtl' else ''),
name=self.name,
sep='',
extension='.%s' % extension
)
self.env.cr.execute("""
SELECT max(id)
FROM ir_attachment
WHERE create_uid = %s
AND url like %s
GROUP BY name
ORDER BY name
""", [SUPERUSER_ID, url_pattern])
attachment_ids = [r[0] for r in self.env.cr.fetchall()]
return self.env['ir.attachment'].sudo().browse(attachment_ids)
def save_attachment(self, extension, content):
"""Record the given bundle in an ir.attachment and delete
all other ir.attachments referring to this bundle (with the same name and extension).
:param extension: extension of the bundle to be recorded
:param content: bundle content to be recorded
:return the ir.attachment records for a given bundle.
"""
assert extension in ('js', 'min.js', 'js.map', 'css', 'min.css', 'css.map')
ira = self.env['ir.attachment']
# Set user direction in name to store two bundles
# 1 for ltr and 1 for rtl, this will help during cleaning of assets bundle
# and allow to only clear the current direction bundle
# (this applies to css bundles only)
fname = '%s.%s' % (self.name, extension)
mimetype = (
'text/css' if extension in ['css', 'min.css'] else
'application/json' if extension in ['js.map', 'css.map'] else
'application/javascript'
)
values = {
'name': fname,
'mimetype': mimetype,
'res_model': 'ir.ui.view',
'res_id': False,
'type': 'binary',
'public': True,
'raw': content.encode('utf8'),
}
attachment = ira.with_user(SUPERUSER_ID).create(values)
url = self.get_asset_url(
id=attachment.id,
unique=self.version,
extra='%s' % ('rtl/' if extension in ['css', 'min.css'] and self.user_direction == 'rtl' else ''),
name=fname,
sep='', # included in fname
extension=''
)
values = {
'url': url,
}
attachment.write(values)
if self.env.context.get('commit_assetsbundle') is True:
self.env.cr.commit()
self.clean_attachments(extension)
# For end-user assets (common and backend), send a message on the bus
# to invite the user to refresh their browser
if self.env and 'bus.bus' in self.env and self.name in self.TRACKED_BUNDLES:
self.env['bus.bus']._sendone('broadcast', 'bundle_changed', {
'server_version': release.version # Needs to be dynamically imported
})
_logger.debug('Asset Changed: bundle: %s -- version: %s', self.name, self.version)
return attachment
def js(self, is_minified=True):
extension = 'min.js' if is_minified else 'js'
attachments = self.get_attachments(extension)
if not attachments:
if is_minified:
content = ';\n'.join(asset.minify() for asset in self.javascripts)
return self.save_attachment(extension, content)
else:
return self.js_with_sourcemap()
return attachments[0]
def js_with_sourcemap(self):
"""Create the ir.attachment representing the not-minified content of the bundleJS
and create/modify the ir.attachment representing the linked sourcemap.
:return ir.attachment representing the un-minified content of the bundleJS
"""
sourcemap_attachment = self.get_attachments('js.map') \
or self.save_attachment('js.map', '')
generator = SourceMapGenerator(
source_root="/".join(
[".." for i in range(0, len(self.get_debug_asset_url(name=self.name).split("/")) - 2)]
) + "/",
)
content_bundle_list = []
content_line_count = 0
line_header = 6 # number of lines added by with_header()
for asset in self.javascripts:
if asset.is_transpiled:
# '+ 3' corresponds to the 3 lines added at the beginning of the file during transpilation.
generator.add_source(
asset.url, asset._content, content_line_count, start_offset=line_header + 3)
else:
generator.add_source(
asset.url, asset.content, content_line_count, start_offset=line_header)
content_bundle_list.append(asset.with_header(asset.content, minimal=False))
content_line_count += len(asset.content.split("\n")) + line_header
content_bundle = ';\n'.join(content_bundle_list) + "\n//# sourceMappingURL=" + sourcemap_attachment.url
js_attachment = self.save_attachment('js', content_bundle)
generator._file = js_attachment.url
sourcemap_attachment.write({
"raw": generator.get_content()
})
return js_attachment
def css(self, is_minified=True):
extension = 'min.css' if is_minified else 'css'
attachments = self.get_attachments(extension)
if not attachments:
# get css content
css = self.preprocess_css()
if self.css_errors:
return self.get_attachments(extension, ignore_version=True)
matches = []
css = re.sub(self.rx_css_import, lambda matchobj: matches.append(matchobj.group(0)) and '', css)
if is_minified:
# move up all @import rules to the top
matches.append(css)
css = u'\n'.join(matches)
self.save_attachment(extension, css)
attachments = self.get_attachments(extension)
else:
return self.css_with_sourcemap(u'\n'.join(matches))
return attachments
def css_with_sourcemap(self, content_import_rules):
"""Create the ir.attachment representing the not-minified content of the bundleCSS
and create/modify the ir.attachment representing the linked sourcemap.
:param content_import_rules: string containing all the @import rules to put at the beginning of the bundle
:return ir.attachment representing the un-minified content of the bundleCSS
"""
sourcemap_attachment = self.get_attachments('css.map') \
or self.save_attachment('css.map', '')
debug_asset_url = self.get_debug_asset_url(name=self.name,
extra='rtl/' if self.user_direction == 'rtl' else '')
generator = SourceMapGenerator(
source_root="/".join(
[".." for i in range(0, len(debug_asset_url.split("/")) - 2)]
) + "/",
)
# adds the @import rules at the beginning of the bundle
content_bundle_list = [content_import_rules]
content_line_count = len(content_import_rules.split("\n"))
for asset in self.stylesheets:
if asset.content:
content = asset.with_header(asset.content)
if asset.url:
generator.add_source(asset.url, content, content_line_count)
# comments all @import rules that have been added at the beginning of the bundle
content = re.sub(self.rx_css_import, lambda matchobj: f"/* {matchobj.group(0)} */", content)
content_bundle_list.append(content)
content_line_count += len(content.split("\n"))
content_bundle = '\n'.join(content_bundle_list) + f"\n//*# sourceMappingURL={sourcemap_attachment.url} */"
css_attachment = self.save_attachment('css', content_bundle)
generator._file = css_attachment.url
sourcemap_attachment.write({
"raw": generator.get_content(),
})
return css_attachment
def dialog_message(self, message):
"""
Returns a JS script which shows a warning to the user on page load.
TODO: should be refactored to be a base js file whose code is extended
by related apps (web/website).
"""
return """
(function (message) {
'use strict';
if (window.__assetsBundleErrorSeen) {
return;
}
window.__assetsBundleErrorSeen = true;
if (document.readyState !== 'loading') {
onDOMContentLoaded();
} else {
window.addEventListener('DOMContentLoaded', () => onDOMContentLoaded());
}
async function onDOMContentLoaded() {
var odoo = window.top.odoo;
if (!odoo || !odoo.define) {
useAlert();
return;
}
// Wait for potential JS loading
await new Promise(resolve => {
const noLazyTimeout = setTimeout(() => resolve(), 10); // 10 since need to wait for promise resolutions of odoo.define
odoo.define('AssetsBundle.PotentialLazyLoading', function (require) {
'use strict';
const lazyloader = require('web.public.lazyloader');
clearTimeout(noLazyTimeout);
lazyloader.allScriptsLoaded.then(() => resolve());
});
});
var alertTimeout = setTimeout(useAlert, 10); // 10 since need to wait for promise resolutions of odoo.define
odoo.define('AssetsBundle.ErrorMessage', function (require) {
'use strict';
require('web.dom_ready');
var core = require('web.core');
var Dialog = require('web.Dialog');
var _t = core._t;
clearTimeout(alertTimeout);
new Dialog(null, {
title: _t("Style error"),
$content: $('<div/>')
.append($('<p/>', {text: _t("The style compilation failed, see the error below. Your recent actions may be the cause, please try reverting the changes you made.")}))
.append($('<pre/>', {html: message})),
}).open();
});
}
function useAlert() {
window.alert(message);
}
})("%s");
""" % message.replace('"', '\\"').replace('\n', '
')
def _get_assets_domain_for_already_processed_css(self, assets):
""" Method to compute the attachments' domain to search the already process assets (css).
This method was created to be overridden.
"""
return [('url', 'in', list(assets.keys()))]
def is_css_preprocessed(self):
preprocessed = True
old_attachments = self.env['ir.attachment'].sudo()
asset_types = [SassStylesheetAsset, ScssStylesheetAsset, LessStylesheetAsset]
if self.user_direction == 'rtl':
asset_types.append(StylesheetAsset)
for atype in asset_types:
outdated = False
assets = dict((asset.html_url, asset) for asset in self.stylesheets if isinstance(asset, atype))
if assets:
assets_domain = self._get_assets_domain_for_already_processed_css(assets)
attachments = self.env['ir.attachment'].sudo().search(assets_domain)
old_attachments += attachments
for attachment in attachments:
asset = assets[attachment.url]
if asset.last_modified > attachment['__last_update']:
outdated = True
break
if asset._content is None:
asset._content = (attachment.raw or b'').decode('utf8')
if not asset._content and attachment.file_size > 0:
asset._content = None # file missing, force recompile
if any(asset._content is None for asset in assets.values()):
outdated = True
if outdated:
preprocessed = False
return preprocessed, old_attachments
def preprocess_css(self, debug=False, old_attachments=None):
"""
Checks if the bundle contains any sass/less content, then compiles it to css.
If user language direction is Right to Left then consider css files to call run_rtlcss,
css files are also stored in ir.attachment after processing done by rtlcss.
Returns the bundle's flat css.
"""
if self.stylesheets:
compiled = ""
for atype in (SassStylesheetAsset, ScssStylesheetAsset, LessStylesheetAsset):
assets = [asset for asset in self.stylesheets if isinstance(asset, atype)]
if assets:
source = '\n'.join([asset.get_source() for asset in assets])
compiled += self.compile_css(assets[0].compile, source)
# We want to run rtlcss on normal css, so merge it in compiled
if self.user_direction == 'rtl':
stylesheet_assets = [asset for asset in self.stylesheets if not isinstance(asset, (SassStylesheetAsset, ScssStylesheetAsset, LessStylesheetAsset))]
compiled += '\n'.join([asset.get_source() for asset in stylesheet_assets])
compiled = self.run_rtlcss(compiled)
if not self.css_errors and old_attachments:
self._unlink_attachments(old_attachments)
old_attachments = None
fragments = self.rx_css_split.split(compiled)
at_rules = fragments.pop(0)
if at_rules:
# Sass and less moves @at-rules to the top in order to stay css 2.1 compatible
self.stylesheets.insert(0, StylesheetAsset(self, inline=at_rules))
while fragments:
asset_id = fragments.pop(0)
asset = next(asset for asset in self.stylesheets if asset.id == asset_id)
asset._content = fragments.pop(0)
return '\n'.join(asset.minify() for asset in self.stylesheets)
def compile_css(self, compiler, source):
"""Sanitizes @import rules, remove duplicates @import rules, then compile"""
imports = []
def handle_compile_error(e, source):
error = self.get_preprocessor_error(e, source=source)
_logger.warning(error)
self.css_errors.append(error)
return ''
def sanitize(matchobj):
ref = matchobj.group(2)
line = '@import "%s"%s' % (ref, matchobj.group(3))
if '.' not in ref and line not in imports and not ref.startswith(('.', '/', '~')):
imports.append(line)
return line
msg = "Local import '%s' is forbidden for security reasons. Please remove all @import {your_file} imports in your custom files. In Odoo you have to import all files in the assets, and not through the @import statement." % ref
_logger.warning(msg)
self.css_errors.append(msg)
return ''
source = re.sub(self.rx_preprocess_imports, sanitize, source)
compiled = ''
try:
compiled = compiler(source)
except CompileError as e:
return handle_compile_error(e, source=source)
compiled = compiled.strip()
# Post process the produced css to add required vendor prefixes here
compiled = re.sub(r'(appearance: (\w+);)', r'-webkit-appearance: \2; -moz-appearance: \2; \1', compiled)
# Most of those are only useful for wkhtmltopdf (some for old PhantomJS)
compiled = re.sub(r'(display: ((?:inline-)?)flex((?: ?!important)?);)', r'display: -webkit-\2box\3; display: -webkit-\2flex\3; \1', compiled)
compiled = re.sub(r'(justify-content: flex-(\w+)((?: ?!important)?);)', r'-webkit-box-pack: \2\3; \1', compiled)
compiled = re.sub(r'(flex-flow: (\w+ \w+);)', r'-webkit-flex-flow: \2; \1', compiled)
compiled = re.sub(r'(flex-direction: (column);)', r'-webkit-box-orient: vertical; -webkit-box-direction: normal; -webkit-flex-direction: \2; \1', compiled)
compiled = re.sub(r'(flex-wrap: (\w+);)', r'-webkit-flex-wrap: \2; \1', compiled)
compiled = re.sub(r'(flex: ((\d)+ \d+ (?:\d+|auto));)', r'-webkit-box-flex: \3; -webkit-flex: \2; \1', compiled)
return compiled
def run_rtlcss(self, source):
rtlcss = 'rtlcss'
if os.name == 'nt':
try:
rtlcss = misc.find_in_path('rtlcss.cmd')
except IOError:
rtlcss = 'rtlcss'
cmd = [rtlcss, '-']
try:
rtlcss = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except Exception:
# Check the presence of rtlcss, if rtlcss not available then we should return normal less file
try:
process = Popen(
['rtlcss', '--version'], stdout=PIPE, stderr=PIPE
)
except (OSError, IOError):
_logger.warning('You need https://rtlcss.com/ to convert css file to right to left compatiblity. Use: npm install -g rtlcss')
return source
msg = "Could not execute command %r" % cmd[0]
_logger.error(msg)
self.css_errors.append(msg)
return ''
result = rtlcss.communicate(input=source.encode('utf-8'))
if rtlcss.returncode:
cmd_output = ''.join(misc.ustr(result))
if not cmd_output:
cmd_output = "Process exited with return code %d\n" % rtlcss.returncode
error = self.get_rtlcss_error(cmd_output, source=source)
_logger.warning(error)
self.css_errors.append(error)
return ''
rtlcss_result = result[0].strip().decode('utf8')
return rtlcss_result
def get_preprocessor_error(self, stderr, source=None):
"""Improve and remove sensitive information from sass/less compilator error messages"""
error = misc.ustr(stderr).split('Load paths')[0].replace(' Use --trace for backtrace.', '')
if 'Cannot load compass' in error:
error += "Maybe you should install the compass gem using this extra argument:\n\n" \
" $ sudo gem install compass --pre\n"
error += "This error occurred while compiling the bundle '%s' containing:" % self.name
for asset in self.stylesheets:
if isinstance(asset, PreprocessedCSS):
error += '\n - %s' % (asset.url if asset.url else '<inline sass>')
return error
def get_rtlcss_error(self, stderr, source=None):
"""Improve and remove sensitive information from sass/less compilator error messages"""
error = misc.ustr(stderr).split('Load paths')[0].replace(' Use --trace for backtrace.', '')
error += "This error occurred while compiling the bundle '%s' containing:" % self.name
return error
class WebAsset(object):
html_url_format = '%s'
_content = None
_filename = None
_ir_attach = None
_id = None
def __init__(self, bundle, inline=None, url=None, filename=None):
self.bundle = bundle
self.inline = inline
self._filename = filename
self.url = url
self.html_url_args = url
if not inline and not url:
raise Exception("An asset should either be inlined or url linked, defined in bundle '%s'" % bundle.name)
@func.lazy_property
def id(self):
if self._id is None: self._id = str(uuid.uuid4())
return self._id
@func.lazy_property
def name(self):
name = '<inline asset>' if self.inline else self.url
return "%s defined in bundle '%s'" % (name, self.bundle.name)
@property
def html_url(self):
return self.html_url_format % self.html_url_args
def stat(self):
if not (self.inline or self._filename or self._ir_attach):
path = (segment for segment in self.url.split('/') if segment)
self._filename = get_resource_path(*path)
if self._filename:
return
try:
# Test url against ir.attachments
attach = self.bundle.env['ir.attachment'].sudo().get_serve_attachment(self.url)
self._ir_attach = attach[0]
except Exception:
raise AssetNotFound("Could not find %s" % self.name)
def to_node(self):
raise NotImplementedError()
@func.lazy_property
def last_modified(self):
try:
self.stat()
if self._filename:
return datetime.fromtimestamp(os.path.getmtime(self._filename))
elif self._ir_attach:
return self._ir_attach['__last_update']
except Exception:
pass
return datetime(1970, 1, 1)
@property
def content(self):
if self._content is None:
self._content = self.inline or self._fetch_content()
return self._content
def _fetch_content(self):
""" Fetch content from file or database"""
try:
self.stat()
if self._filename:
with closing(file_open(self._filename, 'rb', filter_ext=EXTENSIONS)) as fp:
return fp.read().decode('utf-8')
else:
return base64.b64decode(self._ir_attach['datas']).decode('utf-8')
except UnicodeDecodeError:
raise AssetError('%s is not utf-8 encoded.' % self.name)
except IOError:
raise AssetNotFound('File %s does not exist.' % self.name)
except:
raise AssetError('Could not get content for %s.' % self.name)
def minify(self):
return self.content
def with_header(self, content=None):
if content is None:
content = self.content
return f'\n/* {self.name} */\n{content}'
class JavascriptAsset(WebAsset):
def __init__(self, bundle, inline=None, url=None, filename=None):
super().__init__(bundle, inline, url, filename)
self.is_transpiled = is_odoo_module(super().content)
self._converted_content = None
@property
def content(self):
content = super().content
if self.is_transpiled:
if not self._converted_content:
self._converted_content = transpile_javascript(self.url, content)
return self._converted_content
return content
def minify(self):
return self.with_header(rjsmin(self.content))
def _fetch_content(self):
try:
return super(JavascriptAsset, self)._fetch_content()
except AssetError as e:
return u"console.error(%s);" % json.dumps(to_text(e))
def to_node(self):
if self.url:
return ("script", dict([
["type", "text/javascript"],
["src", self.html_url],
['data-asset-bundle', self.bundle.name],
['data-asset-version', self.bundle.version],
]), None)
else:
return ("script", dict([
["type", "text/javascript"],
["charset", "utf-8"],
['data-asset-bundle', self.bundle.name],
['data-asset-version', self.bundle.version],
]), self.with_header())
def with_header(self, content=None, minimal=True):
if minimal:
return super().with_header(content)
# format the header like
# /**************************
# * Filepath: <asset_url> *
# * Bundle: <name> *
# * Lines: 42 *
# **************************/
lines = [
f"Filepath: {self.url}",
f"Bundle: {self.bundle.name}",
f"Lines: {len(content.splitlines())}",
]
length = max(map(len, lines))
return "\n".join([
"",
"/" + "*" * (length + 5),
*(f"* {line:<{length}} *" for line in lines),
"*" * (length + 5) + "/",
content,
])
class StylesheetAsset(WebAsset):
rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U)
rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://|data:)""", re.U)
rx_sourceMap = re.compile(r'(/\*# sourceMappingURL=.*)', re.U)
rx_charset = re.compile(r'(@charset "[^"]+";)', re.U)
def __init__(self, *args, **kw):
self.media = kw.pop('media', None)
self.direction = kw.pop('direction', None)
super(StylesheetAsset, self).__init__(*args, **kw)
if self.direction == 'rtl' and self.url:
self.html_url_args = self.url.rsplit('.', 1)
self.html_url_format = '%%s/%s/%s.%%s' % ('rtl', self.bundle.name)
self.html_url_args = tuple(self.html_url_args)
@property
def content(self):
content = super(StylesheetAsset, self).content
if self.media:
content = '@media %s { %s }' % (self.media, content)
return content
def _fetch_content(self):
try:
content = super(StylesheetAsset, self)._fetch_content()
web_dir = os.path.dirname(self.url)
if self.rx_import:
content = self.rx_import.sub(
r"""@import \1%s/""" % (web_dir,),
content,
)
if self.rx_url:
content = self.rx_url.sub(
r"url(\1%s/" % (web_dir,),
content,
)
if self.rx_charset:
# remove charset declarations, we only support utf-8
content = self.rx_charset.sub('', content)
return content
except AssetError as e:
self.bundle.css_errors.append(str(e))
return ''
def get_source(self):
content = self.inline or self._fetch_content()
return "/*! %s */\n%s" % (self.id, content)
def minify(self):
# remove existing sourcemaps, make no sense after re-mini
content = self.rx_sourceMap.sub('', self.content)
# comments
content = re.sub(r'/\*.*?\*/', '', content, flags=re.S)
# space
content = re.sub(r'\s+', ' ', content)
content = re.sub(r' *([{}]) *', r'\1', content)
return self.with_header(content)
def to_node(self):
if self.url:
attr = dict([
["type", "text/css"],
["rel", "stylesheet"],
["href", self.html_url],
["media", escape(to_text(self.media)) if self.media else None],
['data-asset-bundle', self.bundle.name],
['data-asset-version', self.bundle.version],
])
return ("link", attr, None)
else:
attr = dict([
["type", "text/css"],
["media", escape(to_text(self.media)) if self.media else None],
['data-asset-bundle', self.bundle.name],
['data-asset-version', self.bundle.version],
])
return ("style", attr, self.with_header())
class PreprocessedCSS(StylesheetAsset):
rx_import = None
def __init__(self, *args, **kw):
super(PreprocessedCSS, self).__init__(*args, **kw)
self.html_url_args = tuple(self.url.rsplit('/', 1))
self.html_url_format = '%%s/%s%s/%%s.css' % ('rtl/' if self.direction == 'rtl' else '', self.bundle.name)
def get_command(self):
raise NotImplementedError
def compile(self, source):
command = self.get_command()
try:
compiler = Popen(command, stdin=PIPE, stdout=PIPE,
stderr=PIPE)
except Exception:
raise CompileError("Could not execute command %r" % command[0])
(out, err) = compiler.communicate(input=source.encode('utf-8'))
if compiler.returncode:
cmd_output = misc.ustr(out) + misc.ustr(err)
if not cmd_output:
cmd_output = u"Process exited with return code %d\n" % compiler.returncode
raise CompileError(cmd_output)
return out.decode('utf8')
class SassStylesheetAsset(PreprocessedCSS):
rx_indent = re.compile(r'^( +|\t+)', re.M)
indent = None
reindent = ' '
def minify(self):
return self.with_header()
def get_source(self):
content = textwrap.dedent(self.inline or self._fetch_content())
def fix_indent(m):
# Indentation normalization
ind = m.group()
if self.indent is None:
self.indent = ind
if self.indent == self.reindent:
# Don't reindent the file if identation is the final one (reindent)
raise StopIteration()
return ind.replace(self.indent, self.reindent)
try:
content = self.rx_indent.sub(fix_indent, content)
except StopIteration:
pass
return "/*! %s */\n%s" % (self.id, content)
def get_command(self):
try:
sass = misc.find_in_path('sass')
except IOError:
sass = 'sass'
return [sass, '--stdin', '-t', 'compressed', '--unix-newlines', '--compass',
'-r', 'bootstrap-sass']
class ScssStylesheetAsset(PreprocessedCSS):
@property
def bootstrap_path(self):
return get_resource_path('web', 'static', 'lib', 'bootstrap', 'scss')
precision = 8
output_style = 'expanded'
def compile(self, source):
if libsass is None:
return super(ScssStylesheetAsset, self).compile(source)
try:
profiler.force_hook()
return libsass.compile(
string=source,
include_paths=[
self.bootstrap_path,
],
output_style=self.output_style,
precision=self.precision,
)
except libsass.CompileError as e:
raise CompileError(e.args[0])
def get_command(self):
try:
sassc = misc.find_in_path('sassc')
except IOError:
sassc = 'sassc'
return [sassc, '--stdin', '--precision', str(self.precision), '--load-path', self.bootstrap_path, '-t', self.output_style]
class LessStylesheetAsset(PreprocessedCSS):
def get_command(self):
try:
if os.name == 'nt':
lessc = misc.find_in_path('lessc.cmd')
else:
lessc = misc.find_in_path('lessc')
except IOError:
lessc = 'lessc'
lesspath = get_resource_path('web', 'static', 'lib', 'bootstrap', 'less')
return [lessc, '-', '--no-js', '--no-color', '--include-path=%s' % lesspath]
| 42.145403 | 44,927 |
52,062 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import collections
import datetime
import hashlib
import pytz
import threading
import re
import requests
from collections import defaultdict
from lxml import etree
from random import randint
from werkzeug import urls
from odoo import api, fields, models, tools, SUPERUSER_ID, _, Command
from odoo.osv.expression import get_unaccent_wrapper
from odoo.exceptions import RedirectWarning, UserError, ValidationError
# Global variables used for the warning fields declared on the res.partner
# in the following modules : sale, purchase, account, stock
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = 'Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.'
ADDRESS_FIELDS = ('street', 'street2', 'zip', 'city', 'state_id', 'country_id')
@api.model
def _lang_get(self):
return self.env['res.lang'].get_installed()
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
_tzs = [(tz, tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
def _tz_get(self):
return _tzs
class FormatAddressMixin(models.AbstractModel):
_name = "format.address.mixin"
_description = 'Address Format'
def _fields_view_get_address(self, arch):
# consider the country of the user, not the country of the partner we want to display
address_view_id = self.env.company.country_id.address_view_id.sudo()
if address_view_id and not self._context.get('no_address_format') and (not address_view_id.model or address_view_id.model == self._name):
#render the partner address accordingly to address_view_id
doc = etree.fromstring(arch)
for address_node in doc.xpath("//div[hasclass('o_address_format')]"):
Partner = self.env['res.partner'].with_context(no_address_format=True)
sub_view = Partner.fields_view_get(
view_id=address_view_id.id, view_type='form', toolbar=False, submenu=False)
sub_view_node = etree.fromstring(sub_view['arch'])
#if the model is different than res.partner, there are chances that the view won't work
#(e.g fields not present on the model). In that case we just return arch
if self._name != 'res.partner':
try:
self.env['ir.ui.view'].postprocess_and_fields(sub_view_node, model=self._name)
except ValueError:
return arch
address_node.getparent().replace(address_node, sub_view_node)
arch = etree.tostring(doc, encoding='unicode')
return arch
class PartnerCategory(models.Model):
_description = 'Partner Tags'
_name = 'res.partner.category'
_order = 'name'
_parent_store = True
def _get_default_color(self):
return randint(1, 11)
name = fields.Char(string='Tag Name', required=True, translate=True)
color = fields.Integer(string='Color', default=_get_default_color)
parent_id = fields.Many2one('res.partner.category', string='Parent Category', index=True, ondelete='cascade')
child_ids = fields.One2many('res.partner.category', 'parent_id', string='Child Tags')
active = fields.Boolean(default=True, help="The active field allows you to hide the category without removing it.")
parent_path = fields.Char(index=True)
partner_ids = fields.Many2many('res.partner', column1='category_id', column2='partner_id', string='Partners', copy=False)
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('You can not create recursive tags.'))
def name_get(self):
""" Return the categories' display name, including their direct
parent by default.
If ``context['partner_category_display']`` is ``'short'``, the short
version of the category name (without the direct parent) is used.
The default is the long version.
"""
if self._context.get('partner_category_display') == 'short':
return super(PartnerCategory, self).name_get()
res = []
for category in self:
names = []
current = category
while current:
names.append(current.name)
current = current.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
args = [('name', operator, name)] + args
return self._search(args, limit=limit, access_rights_uid=name_get_uid)
class PartnerTitle(models.Model):
_name = 'res.partner.title'
_order = 'name'
_description = 'Partner Title'
name = fields.Char(string='Title', required=True, translate=True)
shortcut = fields.Char(string='Abbreviation', translate=True)
class Partner(models.Model):
_description = 'Contact'
_inherit = ['format.address.mixin', 'avatar.mixin']
_name = "res.partner"
_order = "display_name"
def _default_category(self):
return self.env['res.partner.category'].browse(self._context.get('category_id'))
@api.model
def default_get(self, default_fields):
"""Add the company of the parent as default if we are creating a child partner.
Also take the parent lang by default if any, otherwise, fallback to default DB lang."""
values = super().default_get(default_fields)
parent = self.env["res.partner"]
if 'parent_id' in default_fields and values.get('parent_id'):
parent = self.browse(values.get('parent_id'))
values['company_id'] = parent.company_id.id
if 'lang' in default_fields:
values['lang'] = values.get('lang') or parent.lang or self.env.lang
# protection for `default_type` values leaking from menu action context (e.g. for crm's email)
if 'type' in default_fields and values.get('type'):
if values['type'] not in self._fields['type'].get_values(self.env):
values['type'] = None
return values
name = fields.Char(index=True)
display_name = fields.Char(compute='_compute_display_name', recursive=True, store=True, index=True)
date = fields.Date(index=True)
title = fields.Many2one('res.partner.title')
parent_id = fields.Many2one('res.partner', string='Related Company', index=True)
parent_name = fields.Char(related='parent_id.name', readonly=True, string='Parent name')
child_ids = fields.One2many('res.partner', 'parent_id', string='Contact', domain=[('active', '=', True)]) # force "active_test" domain to bypass _search() override
ref = fields.Char(string='Reference', index=True)
lang = fields.Selection(_lang_get, string='Language',
help="All the emails and documents sent to this contact will be translated in this language.")
active_lang_count = fields.Integer(compute='_compute_active_lang_count')
tz = fields.Selection(_tz_get, string='Timezone', default=lambda self: self._context.get('tz'),
help="When printing documents and exporting/importing data, time values are computed according to this timezone.\n"
"If the timezone is not set, UTC (Coordinated Universal Time) is used.\n"
"Anywhere else, time values are computed according to the time offset of your web client.")
tz_offset = fields.Char(compute='_compute_tz_offset', string='Timezone offset', invisible=True)
user_id = fields.Many2one('res.users', string='Salesperson',
help='The internal user in charge of this contact.')
vat = fields.Char(string='Tax ID', index=True, help="The Tax Identification Number. Complete it if the contact is subjected to government taxes. Used in some legal statements.")
same_vat_partner_id = fields.Many2one('res.partner', string='Partner with same Tax ID', compute='_compute_same_vat_partner_id', store=False)
bank_ids = fields.One2many('res.partner.bank', 'partner_id', string='Banks')
website = fields.Char('Website Link')
comment = fields.Html(string='Notes')
category_id = fields.Many2many('res.partner.category', column1='partner_id',
column2='category_id', string='Tags', default=_default_category)
credit_limit = fields.Float(string='Credit Limit')
active = fields.Boolean(default=True)
employee = fields.Boolean(help="Check this box if this contact is an Employee.")
function = fields.Char(string='Job Position')
type = fields.Selection(
[('contact', 'Contact'),
('invoice', 'Invoice Address'),
('delivery', 'Delivery Address'),
('other', 'Other Address'),
("private", "Private Address"),
], string='Address Type',
default='contact',
help="Invoice & Delivery addresses are used in sales orders. Private addresses are only visible by authorized users.")
# address fields
street = fields.Char()
street2 = fields.Char()
zip = fields.Char(change_default=True)
city = fields.Char()
state_id = fields.Many2one("res.country.state", string='State', ondelete='restrict', domain="[('country_id', '=?', country_id)]")
country_id = fields.Many2one('res.country', string='Country', ondelete='restrict')
country_code = fields.Char(related='country_id.code', string="Country Code")
partner_latitude = fields.Float(string='Geo Latitude', digits=(10, 7))
partner_longitude = fields.Float(string='Geo Longitude', digits=(10, 7))
email = fields.Char()
email_formatted = fields.Char(
'Formatted Email', compute='_compute_email_formatted',
help='Format email address "Name <email@domain>"')
phone = fields.Char()
mobile = fields.Char()
is_company = fields.Boolean(string='Is a Company', default=False,
help="Check if the contact is a company, otherwise it is a person")
industry_id = fields.Many2one('res.partner.industry', 'Industry')
# company_type is only an interface field, do not use it in business logic
company_type = fields.Selection(string='Company Type',
selection=[('person', 'Individual'), ('company', 'Company')],
compute='_compute_company_type', inverse='_write_company_type')
company_id = fields.Many2one('res.company', 'Company', index=True)
color = fields.Integer(string='Color Index', default=0)
user_ids = fields.One2many('res.users', 'partner_id', string='Users', auto_join=True)
partner_share = fields.Boolean(
'Share Partner', compute='_compute_partner_share', store=True,
help="Either customer (not a user), either shared user. Indicated the current partner is a customer without "
"access or with a limited access created for sharing data.")
contact_address = fields.Char(compute='_compute_contact_address', string='Complete Address')
# technical field used for managing commercial fields
commercial_partner_id = fields.Many2one('res.partner', string='Commercial Entity',
compute='_compute_commercial_partner', recursive=True,
store=True, index=True)
commercial_company_name = fields.Char('Company Name Entity', compute='_compute_commercial_company_name',
store=True)
company_name = fields.Char('Company Name')
barcode = fields.Char(help="Use a barcode to identify this contact.", copy=False, company_dependent=True)
# hack to allow using plain browse record in qweb views, and used in ir.qweb.field.contact
self = fields.Many2one(comodel_name=_name, compute='_compute_get_ids')
_sql_constraints = [
('check_name', "CHECK( (type='contact' AND name IS NOT NULL) or (type!='contact') )", 'Contacts require a name'),
]
@api.depends('name', 'user_ids.share', 'image_1920', 'is_company', 'type')
def _compute_avatar_1920(self):
super()._compute_avatar_1920()
@api.depends('name', 'user_ids.share', 'image_1024', 'is_company', 'type')
def _compute_avatar_1024(self):
super()._compute_avatar_1024()
@api.depends('name', 'user_ids.share', 'image_512', 'is_company', 'type')
def _compute_avatar_512(self):
super()._compute_avatar_512()
@api.depends('name', 'user_ids.share', 'image_256', 'is_company', 'type')
def _compute_avatar_256(self):
super()._compute_avatar_256()
@api.depends('name', 'user_ids.share', 'image_128', 'is_company', 'type')
def _compute_avatar_128(self):
super()._compute_avatar_128()
def _compute_avatar(self, avatar_field, image_field):
partners_with_internal_user = self.filtered(lambda partner: partner.user_ids - partner.user_ids.filtered('share'))
super(Partner, partners_with_internal_user)._compute_avatar(avatar_field, image_field)
partners_without_image = (self - partners_with_internal_user).filtered(lambda p: not p[image_field])
for _, group in tools.groupby(partners_without_image, key=lambda p: p._avatar_get_placeholder_path()):
group_partners = self.env['res.partner'].concat(*group)
group_partners[avatar_field] = group_partners[0]._avatar_get_placeholder()
for partner in self - partners_with_internal_user - partners_without_image:
partner[avatar_field] = partner[image_field]
def _avatar_get_placeholder_path(self):
if self.is_company:
return "base/static/img/company_image.png"
if self.type == 'delivery':
return "base/static/img/truck.png"
if self.type == 'invoice':
return "base/static/img/money.png"
return super()._avatar_get_placeholder_path()
@api.depends('is_company', 'name', 'parent_id.display_name', 'type', 'company_name', 'commercial_company_name')
def _compute_display_name(self):
diff = dict(show_address=None, show_address_only=None, show_email=None, html_format=None, show_vat=None)
names = dict(self.with_context(**diff).name_get())
for partner in self:
partner.display_name = names.get(partner.id)
@api.depends('lang')
def _compute_active_lang_count(self):
lang_count = len(self.env['res.lang'].get_installed())
for partner in self:
partner.active_lang_count = lang_count
@api.depends('tz')
def _compute_tz_offset(self):
for partner in self:
partner.tz_offset = datetime.datetime.now(pytz.timezone(partner.tz or 'GMT')).strftime('%z')
@api.depends('user_ids.share', 'user_ids.active')
def _compute_partner_share(self):
super_partner = self.env['res.users'].browse(SUPERUSER_ID).partner_id
if super_partner in self:
super_partner.partner_share = False
for partner in self - super_partner:
partner.partner_share = not partner.user_ids or not any(not user.share for user in partner.user_ids)
@api.depends('vat', 'company_id')
def _compute_same_vat_partner_id(self):
for partner in self:
# use _origin to deal with onchange()
partner_id = partner._origin.id
#active_test = False because if a partner has been deactivated you still want to raise the error,
#so that you can reactivate it instead of creating a new one, which would loose its history.
Partner = self.with_context(active_test=False).sudo()
domain = [
('vat', '=', partner.vat),
]
if partner.company_id:
domain += [('company_id', 'in', [False, partner.company_id.id])]
if partner_id:
domain += [('id', '!=', partner_id), '!', ('id', 'child_of', partner_id)]
partner.same_vat_partner_id = bool(partner.vat) and not partner.parent_id and Partner.search(domain, limit=1)
@api.depends(lambda self: self._display_address_depends())
def _compute_contact_address(self):
for partner in self:
partner.contact_address = partner._display_address()
def _compute_get_ids(self):
for partner in self:
partner.self = partner.id
@api.depends('is_company', 'parent_id.commercial_partner_id')
def _compute_commercial_partner(self):
for partner in self:
if partner.is_company or not partner.parent_id:
partner.commercial_partner_id = partner
else:
partner.commercial_partner_id = partner.parent_id.commercial_partner_id
@api.depends('company_name', 'parent_id.is_company', 'commercial_partner_id.name')
def _compute_commercial_company_name(self):
for partner in self:
p = partner.commercial_partner_id
partner.commercial_company_name = p.is_company and p.name or partner.company_name
@api.model
def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
if (not view_id) and (view_type == 'form') and self._context.get('force_email'):
view_id = self.env.ref('base.view_partner_simple_form').id
res = super(Partner, self)._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self._fields_view_get_address(res['arch'])
return res
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('You cannot create recursive Partner hierarchies.'))
def copy(self, default=None):
self.ensure_one()
chosen_name = default.get('name') if default else ''
new_name = chosen_name or _('%s (copy)', self.name)
default = dict(default or {}, name=new_name)
return super(Partner, self).copy(default)
@api.onchange('parent_id')
def onchange_parent_id(self):
# return values in result, as this method is used by _fields_sync()
if not self.parent_id:
return
result = {}
partner = self._origin
if partner.parent_id and partner.parent_id != self.parent_id:
result['warning'] = {
'title': _('Warning'),
'message': _('Changing the company of a contact should only be done if it '
'was never correctly set. If an existing contact starts working for a new '
'company then a new contact should be created under that new '
'company. You can use the "Discard" button to abandon this change.')}
if partner.type == 'contact' or self.type == 'contact':
# for contacts: copy the parent address, if set (aka, at least one
# value is set in the address: otherwise, keep the one from the
# contact)
address_fields = self._address_fields()
if any(self.parent_id[key] for key in address_fields):
def convert(value):
return value.id if isinstance(value, models.BaseModel) else value
result['value'] = {key: convert(self.parent_id[key]) for key in address_fields}
return result
@api.onchange('parent_id')
def _onchange_parent_id_for_lang(self):
# While creating / updating child contact, take the parent lang by default if any
# otherwise, fallback to default context / DB lang
if self.parent_id:
self.lang = self.parent_id.lang or self.env.context.get('default_lang') or self.env.lang
@api.onchange('country_id')
def _onchange_country_id(self):
if self.country_id and self.country_id != self.state_id.country_id:
self.state_id = False
@api.onchange('state_id')
def _onchange_state(self):
if self.state_id.country_id:
self.country_id = self.state_id.country_id
@api.onchange('email')
def onchange_email(self):
if not self.image_1920 and self._context.get('gravatar_image') and self.email:
self.image_1920 = self._get_gravatar_image(self.email)
@api.onchange('parent_id', 'company_id')
def _onchange_company_id(self):
if self.parent_id:
self.company_id = self.parent_id.company_id.id
@api.depends('name', 'email')
def _compute_email_formatted(self):
for partner in self:
if partner.email:
partner.email_formatted = tools.formataddr((partner.name or u"False", partner.email or u"False"))
else:
partner.email_formatted = ''
@api.depends('is_company')
def _compute_company_type(self):
for partner in self:
partner.company_type = 'company' if partner.is_company else 'person'
def _write_company_type(self):
for partner in self:
partner.is_company = partner.company_type == 'company'
@api.onchange('company_type')
def onchange_company_type(self):
self.is_company = (self.company_type == 'company')
@api.constrains('barcode')
def _check_barcode_unicity(self):
if self.barcode and self.env['res.partner'].search_count([('barcode', '=', self.barcode)]) > 1:
raise ValidationError('An other user already has this barcode')
def _update_fields_values(self, fields):
""" Returns dict of write() values for synchronizing ``fields`` """
values = {}
for fname in fields:
field = self._fields[fname]
if field.type == 'many2one':
values[fname] = self[fname].id
elif field.type == 'one2many':
raise AssertionError(_('One2Many fields cannot be synchronized as part of `commercial_fields` or `address fields`'))
elif field.type == 'many2many':
values[fname] = [Command.set(self[fname].ids)]
else:
values[fname] = self[fname]
return values
@api.model
def _address_fields(self):
"""Returns the list of address fields that are synced from the parent."""
return list(ADDRESS_FIELDS)
@api.model
def _formatting_address_fields(self):
"""Returns the list of address fields usable to format addresses."""
return self._address_fields()
def update_address(self, vals):
addr_vals = {key: vals[key] for key in self._address_fields() if key in vals}
if addr_vals:
return super(Partner, self).write(addr_vals)
@api.model
def _commercial_fields(self):
""" Returns the list of fields that are managed by the commercial entity
to which a partner belongs. These fields are meant to be hidden on
partners that aren't `commercial entities` themselves, and will be
delegated to the parent `commercial entity`. The list is meant to be
extended by inheriting classes. """
return ['vat', 'credit_limit', 'industry_id']
def _commercial_sync_from_company(self):
""" Handle sync of commercial fields when a new parent commercial entity is set,
as if they were related fields """
commercial_partner = self.commercial_partner_id
if commercial_partner != self:
sync_vals = commercial_partner._update_fields_values(self._commercial_fields())
self.write(sync_vals)
self._commercial_sync_to_children()
def _commercial_sync_to_children(self):
""" Handle sync of commercial fields to descendants """
commercial_partner = self.commercial_partner_id
sync_vals = commercial_partner._update_fields_values(self._commercial_fields())
sync_children = self.child_ids.filtered(lambda c: not c.is_company)
for child in sync_children:
child._commercial_sync_to_children()
res = sync_children.write(sync_vals)
sync_children._compute_commercial_partner()
return res
def _fields_sync(self, values):
""" Sync commercial fields and address fields from company and to children after create/update,
just as if those were all modeled as fields.related to the parent """
# 1. From UPSTREAM: sync from parent
if values.get('parent_id') or values.get('type') == 'contact':
# 1a. Commercial fields: sync if parent changed
if values.get('parent_id'):
self._commercial_sync_from_company()
# 1b. Address fields: sync if parent or use_parent changed *and* both are now set
if self.parent_id and self.type == 'contact':
onchange_vals = self.onchange_parent_id().get('value', {})
self.update_address(onchange_vals)
# 2. To DOWNSTREAM: sync children
self._children_sync(values)
def _children_sync(self, values):
if not self.child_ids:
return
# 2a. Commercial Fields: sync if commercial entity
if self.commercial_partner_id == self:
commercial_fields = self._commercial_fields()
if any(field in values for field in commercial_fields):
self._commercial_sync_to_children()
for child in self.child_ids.filtered(lambda c: not c.is_company):
if child.commercial_partner_id != self.commercial_partner_id:
self._commercial_sync_to_children()
break
# 2b. Address fields: sync if address changed
address_fields = self._address_fields()
if any(field in values for field in address_fields):
contacts = self.child_ids.filtered(lambda c: c.type == 'contact')
contacts.update_address(values)
def _handle_first_contact_creation(self):
""" On creation of first contact for a company (or root) that has no address, assume contact address
was meant to be company address """
parent = self.parent_id
address_fields = self._address_fields()
if (parent.is_company or not parent.parent_id) and len(parent.child_ids) == 1 and \
any(self[f] for f in address_fields) and not any(parent[f] for f in address_fields):
addr_vals = self._update_fields_values(address_fields)
parent.update_address(addr_vals)
def _clean_website(self, website):
url = urls.url_parse(website)
if not url.scheme:
if not url.netloc:
url = url.replace(netloc=url.path, path='')
website = url.replace(scheme='http').to_url()
return website
def write(self, vals):
if vals.get('active') is False:
# DLE: It should not be necessary to modify this to make work the ORM. The problem was just the recompute
# of partner.user_ids when you create a new user for this partner, see test test_70_archive_internal_partners
# You modified it in a previous commit, see original commit of this:
# https://github.com/odoo/odoo/commit/9d7226371730e73c296bcc68eb1f856f82b0b4ed
#
# RCO: when creating a user for partner, the user is automatically added in partner.user_ids.
# This is wrong if the user is not active, as partner.user_ids only returns active users.
# Hence this temporary hack until the ORM updates inverse fields correctly.
self.invalidate_cache(['user_ids'], self._ids)
users = self.env['res.users'].sudo().search([('partner_id', 'in', self.ids)])
if users:
if self.env['res.users'].sudo(False).check_access_rights('write', raise_exception=False):
error_msg = _('You cannot archive contacts linked to an active user.\n'
'You first need to archive their associated user.\n\n'
'Linked active users : %(names)s', names=", ".join([u.display_name for u in users]))
action_error = users._action_show()
raise RedirectWarning(error_msg, action_error, _('Go to users'))
else:
raise ValidationError(_('You cannot archive contacts linked to an active user.\n'
'Ask an administrator to archive their associated user first.\n\n'
'Linked active users :\n%(names)s', names=", ".join([u.display_name for u in users])))
# res.partner must only allow to set the company_id of a partner if it
# is the same as the company of all users that inherit from this partner
# (this is to allow the code from res_users to write to the partner!) or
# if setting the company_id to False (this is compatible with any user
# company)
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('parent_id'):
vals['company_name'] = False
if 'company_id' in vals:
company_id = vals['company_id']
for partner in self:
if company_id and partner.user_ids:
company = self.env['res.company'].browse(company_id)
companies = set(user.company_id for user in partner.user_ids)
if len(companies) > 1 or company not in companies:
raise UserError(
("The selected company is not compatible with the companies of the related user(s)"))
if partner.child_ids:
partner.child_ids.write({'company_id': company_id})
result = True
# To write in SUPERUSER on field is_company and avoid access rights problems.
if 'is_company' in vals and self.user_has_groups('base.group_partner_manager') and not self.env.su:
result = super(Partner, self.sudo()).write({'is_company': vals.get('is_company')})
del vals['is_company']
result = result and super(Partner, self).write(vals)
for partner in self:
if any(u.has_group('base.group_user') for u in partner.user_ids if u != self.env.user):
self.env['res.users'].check_access_rights('write')
partner._fields_sync(vals)
return result
@api.model_create_multi
def create(self, vals_list):
if self.env.context.get('import_file'):
self._check_import_consistency(vals_list)
for vals in vals_list:
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('parent_id'):
vals['company_name'] = False
partners = super(Partner, self).create(vals_list)
if self.env.context.get('_partners_skip_fields_sync'):
return partners
for partner, vals in zip(partners, vals_list):
partner._fields_sync(vals)
# Lang: propagate from parent if no value was given
if 'lang' not in vals and partner.parent_id:
partner._onchange_parent_id_for_lang()
partner._handle_first_contact_creation()
return partners
@api.ondelete(at_uninstall=False)
def _unlink_except_user(self):
users = self.env['res.users'].sudo().search([('partner_id', 'in', self.ids)])
if not users:
return # no linked user, operation is allowed
if self.env['res.users'].sudo(False).check_access_rights('write', raise_exception=False):
error_msg = _('You cannot delete contacts linked to an active user.\n'
'You should rather archive them after archiving their associated user.\n\n'
'Linked active users : %(names)s', names=", ".join([u.display_name for u in users]))
action_error = users._action_show()
raise RedirectWarning(error_msg, action_error, _('Go to users'))
else:
raise ValidationError(_('You cannot delete contacts linked to an active user.\n'
'Ask an administrator to archive their associated user first.\n\n'
'Linked active users :\n%(names)s', names=", ".join([u.display_name for u in users])))
def _load_records_create(self, vals_list):
partners = super(Partner, self.with_context(_partners_skip_fields_sync=True))._load_records_create(vals_list)
# batch up first part of _fields_sync
# group partners by commercial_partner_id (if not self) and parent_id (if type == contact)
groups = collections.defaultdict(list)
for partner, vals in zip(partners, vals_list):
cp_id = None
if vals.get('parent_id') and partner.commercial_partner_id != partner:
cp_id = partner.commercial_partner_id.id
add_id = None
if partner.parent_id and partner.type == 'contact':
add_id = partner.parent_id.id
groups[(cp_id, add_id)].append(partner.id)
for (cp_id, add_id), children in groups.items():
# values from parents (commercial, regular) written to their common children
to_write = {}
# commercial fields from commercial partner
if cp_id:
to_write = self.browse(cp_id)._update_fields_values(self._commercial_fields())
# address fields from parent
if add_id:
parent = self.browse(add_id)
for f in self._address_fields():
v = parent[f]
if v:
to_write[f] = v.id if isinstance(v, models.BaseModel) else v
if to_write:
self.browse(children).write(to_write)
# do the second half of _fields_sync the "normal" way
for partner, vals in zip(partners, vals_list):
partner._children_sync(vals)
partner._handle_first_contact_creation()
return partners
def create_company(self):
self.ensure_one()
if self.company_name:
# Create parent company
values = dict(name=self.company_name, is_company=True, vat=self.vat)
values.update(self._update_fields_values(self._address_fields()))
new_company = self.create(values)
# Set new company as my parent
self.write({
'parent_id': new_company.id,
'child_ids': [Command.update(partner_id, dict(parent_id=new_company.id)) for partner_id in self.child_ids.ids]
})
return True
def open_commercial_entity(self):
""" Utility method used to add an "Open Company" button in partner views """
self.ensure_one()
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'res_id': self.commercial_partner_id.id,
'target': 'current',
'flags': {'form': {'action_buttons': True}}}
def open_parent(self):
""" Utility method used to add an "Open Parent" button in partner views """
self.ensure_one()
address_form_id = self.env.ref('base.view_partner_address_form').id
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'views': [(address_form_id, 'form')],
'res_id': self.parent_id.id,
'target': 'new',
'flags': {'form': {'action_buttons': True}}}
def _get_contact_name(self, partner, name):
return "%s, %s" % (partner.commercial_company_name or partner.sudo().parent_id.name, name)
def _get_name(self):
""" Utility method to allow name_get to be overrided without re-browse the partner """
partner = self
name = partner.name or ''
if partner.company_name or partner.parent_id:
if not name and partner.type in ['invoice', 'delivery', 'other']:
name = dict(self.fields_get(['type'])['type']['selection'])[partner.type]
if not partner.is_company:
name = self._get_contact_name(partner, name)
if self._context.get('show_address_only'):
name = partner._display_address(without_company=True)
if self._context.get('show_address'):
name = name + "\n" + partner._display_address(without_company=True)
name = re.sub(r'\s+\n', '\n', name)
if self._context.get('partner_show_db_id'):
name = "%s (%s)" % (name, partner.id)
if self._context.get('address_inline'):
splitted_names = name.split("\n")
name = ", ".join([n for n in splitted_names if n.strip()])
if self._context.get('show_email') and partner.email:
name = "%s <%s>" % (name, partner.email)
if self._context.get('html_format'):
name = name.replace('\n', '<br/>')
if self._context.get('show_vat') and partner.vat:
name = "%s ‒ %s" % (name, partner.vat)
return name.strip()
def name_get(self):
res = []
for partner in self:
name = partner._get_name()
res.append((partner.id, name))
return res
def _parse_partner_name(self, text):
""" Parse partner name (given by text) in order to find a name and an
email. Supported syntax:
* Raoul <[email protected]>
* "Raoul le Grand" <[email protected]>
* Raoul [email protected] (strange fault tolerant support from df40926d2a57c101a3e2d221ecfd08fbb4fea30e)
Otherwise: default, everything is set as the name. Starting from 13.3
returned email will be normalized to have a coherent encoding.
"""
name, email = '', ''
split_results = tools.email_split_tuples(text)
if split_results:
name, email = split_results[0]
if email and not name:
fallback_emails = tools.email_split(text.replace(' ', ','))
if fallback_emails:
email = fallback_emails[0]
name = text[:text.index(email)].replace('"', '').replace('<', '').strip()
if email:
email = tools.email_normalize(email)
else:
name, email = text, ''
return name, email
@api.model
def name_create(self, name):
""" Override of orm's name_create method for partners. The purpose is
to handle some basic formats to create partners using the
name_create.
If only an email address is received and that the regex cannot find
a name, the name will have the email value.
If 'force_email' key in context: must find the email address. """
default_type = self._context.get('default_type')
if default_type and default_type not in self._fields['type'].get_values(self.env):
context = dict(self._context)
context.pop('default_type')
self = self.with_context(context)
name, email = self._parse_partner_name(name)
if self._context.get('force_email') and not email:
raise UserError(_("Couldn't create contact without email address!"))
create_values = {self._rec_name: name or email}
if email: # keep default_email in context
create_values['email'] = email
partner = self.create(create_values)
return partner.name_get()[0]
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
""" Override search() to always show inactive children when searching via ``child_of`` operator. The ORM will
always call search() with a simple domain of the form [('parent_id', 'in', [ids])]. """
# a special ``domain`` is set on the ``child_ids`` o2m to bypass this logic, as it uses similar domain expressions
if len(args) == 1 and len(args[0]) == 3 and args[0][:2] == ('parent_id','in') \
and args[0][2] != [False]:
self = self.with_context(active_test=False)
return super(Partner, self)._search(args, offset=offset, limit=limit, order=order,
count=count, access_rights_uid=access_rights_uid)
def _get_name_search_order_by_fields(self):
return ''
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
self = self.with_user(name_get_uid) if name_get_uid else self
# as the implementation is in SQL, we force the recompute of fields if necessary
self.recompute(['display_name'])
self.flush()
if args is None:
args = []
order_by_rank = self.env.context.get('res_partner_search_mode')
if (name or order_by_rank) and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
self.check_access_rights('read')
where_query = self._where_calc(args)
self._apply_ir_rules(where_query, 'read')
from_clause, where_clause, where_clause_params = where_query.get_sql()
from_str = from_clause if from_clause else 'res_partner'
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(self.env.cr)
fields = self._get_name_search_order_by_fields()
query = """SELECT res_partner.id
FROM {from_str}
{where} ({email} {operator} {percent}
OR {display_name} {operator} {percent}
OR {reference} {operator} {percent}
OR {vat} {operator} {percent})
-- don't panic, trust postgres bitmap
ORDER BY {fields} {display_name} {operator} {percent} desc,
{display_name}
""".format(from_str=from_str,
fields=fields,
where=where_str,
operator=operator,
email=unaccent('res_partner.email'),
display_name=unaccent('res_partner.display_name'),
reference=unaccent('res_partner.ref'),
percent=unaccent('%s'),
vat=unaccent('res_partner.vat'),)
where_clause_params += [search_name]*3 # for email / display_name, reference
where_clause_params += [re.sub(r'[^a-zA-Z0-9\-\.]+', '', search_name) or None] # for vat
where_clause_params += [search_name] # for order by
if limit:
query += ' limit %s'
where_clause_params.append(limit)
self.env.cr.execute(query, where_clause_params)
return [row[0] for row in self.env.cr.fetchall()]
return super(Partner, self)._name_search(name, args, operator=operator, limit=limit, name_get_uid=name_get_uid)
@api.model
@api.returns('self', lambda value: value.id)
def find_or_create(self, email, assert_valid_email=False):
""" Find a partner with the given ``email`` or use :py:method:`~.name_create`
to create a new one.
:param str email: email-like string, which should contain at least one email,
e.g. ``"Raoul Grosbedon <[email protected]>"``
:param boolean assert_valid_email: raise if no valid email is found
:return: newly created record
"""
if not email:
raise ValueError(_('An email is required for find_or_create to work'))
parsed_name, parsed_email = self._parse_partner_name(email)
if not parsed_email and assert_valid_email:
raise ValueError(_('A valid email is required for find_or_create to work properly.'))
partners = self.search([('email', '=ilike', parsed_email)], limit=1)
if partners:
return partners
create_values = {self._rec_name: parsed_name or parsed_email}
if parsed_email: # keep default_email in context
create_values['email'] = parsed_email
return self.create(create_values)
def _get_gravatar_image(self, email):
email_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
url = "https://www.gravatar.com/avatar/" + email_hash
try:
res = requests.get(url, params={'d': '404', 's': '128'}, timeout=5)
if res.status_code != requests.codes.ok:
return False
except requests.exceptions.ConnectionError as e:
return False
except requests.exceptions.Timeout as e:
return False
return base64.b64encode(res.content)
def _email_send(self, email_from, subject, body, on_error=None):
for partner in self.filtered('email'):
tools.email_send(email_from, [partner.email], subject, body, on_error)
return True
def address_get(self, adr_pref=None):
""" Find contacts/addresses of the right type(s) by doing a depth-first-search
through descendants within company boundaries (stop at entities flagged ``is_company``)
then continuing the search at the ancestors that are within the same company boundaries.
Defaults to partners of type ``'default'`` when the exact type is not found, or to the
provided partner itself if no type ``'default'`` is found either. """
adr_pref = set(adr_pref or [])
if 'contact' not in adr_pref:
adr_pref.add('contact')
result = {}
visited = set()
for partner in self:
current_partner = partner
while current_partner:
to_scan = [current_partner]
# Scan descendants, DFS
while to_scan:
record = to_scan.pop(0)
visited.add(record)
if record.type in adr_pref and not result.get(record.type):
result[record.type] = record.id
if len(result) == len(adr_pref):
return result
to_scan = [c for c in record.child_ids
if c not in visited
if not c.is_company] + to_scan
# Continue scanning at ancestor if current_partner is not a commercial entity
if current_partner.is_company or not current_partner.parent_id:
break
current_partner = current_partner.parent_id
# default to type 'contact' or the partner itself
default = result.get('contact', self.id or False)
for adr_type in adr_pref:
result[adr_type] = result.get(adr_type) or default
return result
@api.model
def view_header_get(self, view_id, view_type):
if self.env.context.get('category_id'):
return _(
'Partners: %(category)s',
category=self.env['res.partner.category'].browse(self.env.context['category_id']).name,
)
return super().view_header_get(view_id, view_type)
@api.model
@api.returns('self')
def main_partner(self):
''' Return the main partner '''
return self.env.ref('base.main_partner')
@api.model
def _get_default_address_format(self):
return "%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s"
@api.model
def _get_address_format(self):
return self.country_id.address_format or self._get_default_address_format()
def _prepare_display_address(self, without_company=False):
# get the information that will be injected into the display format
# get the address format
address_format = self._get_address_format()
args = defaultdict(str, {
'state_code': self.state_id.code or '',
'state_name': self.state_id.name or '',
'country_code': self.country_id.code or '',
'country_name': self._get_country_name(),
'company_name': self.commercial_company_name or '',
})
for field in self._formatting_address_fields():
args[field] = getattr(self, field) or ''
if without_company:
args['company_name'] = ''
elif self.commercial_company_name:
address_format = '%(company_name)s\n' + address_format
return address_format, args
def _display_address(self, without_company=False):
'''
The purpose of this function is to build and return an address formatted accordingly to the
standards of the country where it belongs.
:param without_company: if address contains company
:returns: the address formatted in a display that fit its country habits (or the default ones
if not country is specified)
:rtype: string
'''
address_format, args = self._prepare_display_address(without_company)
return address_format % args
def _display_address_depends(self):
# field dependencies of method _display_address()
return self._formatting_address_fields() + [
'country_id', 'company_name', 'state_id',
]
@api.model
def get_import_templates(self):
return [{
'label': _('Import Template for Customers'),
'template': '/base/static/xls/res_partner.xls'
}]
@api.model
def _check_import_consistency(self, vals_list):
"""
The values created by an import are generated by a name search, field by field.
As a result there is no check that the field values are consistent with each others.
We check that if the state is given a value, it does belong to the given country, or we remove it.
"""
States = self.env['res.country.state']
states_ids = {vals['state_id'] for vals in vals_list if vals.get('state_id')}
state_to_country = States.search([('id', 'in', list(states_ids))]).read(['country_id'])
for vals in vals_list:
if vals.get('state_id'):
country_id = next(c['country_id'][0] for c in state_to_country if c['id'] == vals.get('state_id'))
state = States.browse(vals['state_id'])
if state.country_id.id != country_id:
state_domain = [('code', '=', state.code),
('country_id', '=', country_id)]
state = States.search(state_domain, limit=1)
vals['state_id'] = state.id # replace state or remove it if not found
def _get_country_name(self):
return self.country_id.name or ''
class ResPartnerIndustry(models.Model):
_description = 'Industry'
_name = "res.partner.industry"
_order = "name"
name = fields.Char('Name', translate=True)
full_name = fields.Char('Full Name', translate=True)
active = fields.Boolean('Active', default=True)
| 48.472998 | 52,060 |
39,092 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo
from odoo import api, fields, models, tools, SUPERUSER_ID, _, Command
from odoo.exceptions import MissingError, UserError, ValidationError, AccessError
from odoo.osv import expression
from odoo.tools.safe_eval import safe_eval, test_python_expr
from odoo.tools.float_utils import float_compare
from odoo.http import request
import base64
from collections import defaultdict
import functools
import logging
from pytz import timezone
_logger = logging.getLogger(__name__)
class IrActions(models.Model):
_name = 'ir.actions.actions'
_description = 'Actions'
_table = 'ir_actions'
_order = 'name'
name = fields.Char(required=True)
type = fields.Char(string='Action Type', required=True)
xml_id = fields.Char(compute='_compute_xml_id', string="External ID")
help = fields.Html(string='Action Description',
help='Optional help text for the users with a description of the target view, such as its usage and purpose.',
translate=True)
binding_model_id = fields.Many2one('ir.model', ondelete='cascade',
help="Setting a value makes this action available in the sidebar for the given model.")
binding_type = fields.Selection([('action', 'Action'),
('report', 'Report')],
required=True, default='action')
binding_view_types = fields.Char(default='list,form')
def _compute_xml_id(self):
res = self.get_external_id()
for record in self:
record.xml_id = res.get(record.id)
@api.model_create_multi
def create(self, vals_list):
res = super(IrActions, self).create(vals_list)
# self.get_bindings() depends on action records
self.clear_caches()
return res
def write(self, vals):
res = super(IrActions, self).write(vals)
# self.get_bindings() depends on action records
self.clear_caches()
return res
def unlink(self):
"""unlink ir.action.todo which are related to actions which will be deleted.
NOTE: ondelete cascade will not work on ir.actions.actions so we will need to do it manually."""
todos = self.env['ir.actions.todo'].search([('action_id', 'in', self.ids)])
todos.unlink()
res = super(IrActions, self).unlink()
# self.get_bindings() depends on action records
self.clear_caches()
return res
@api.ondelete(at_uninstall=True)
def _unlink_check_home_action(self):
self.env['res.users'].with_context(active_test=False).search([('action_id', 'in', self.ids)]).sudo().write({'action_id': None})
@api.model
def _get_eval_context(self, action=None):
""" evaluation context to pass to safe_eval """
return {
'uid': self._uid,
'user': self.env.user,
'time': tools.safe_eval.time,
'datetime': tools.safe_eval.datetime,
'dateutil': tools.safe_eval.dateutil,
'timezone': timezone,
'float_compare': float_compare,
'b64encode': base64.b64encode,
'b64decode': base64.b64decode,
'Command': Command,
}
@api.model
def get_bindings(self, model_name):
return self._get_bindings(model_name, bool(request) and request.session.debug)
@tools.ormcache('frozenset(self.env.user.groups_id.ids)', 'model_name', 'debug')
def _get_bindings(self, model_name, debug=False):
""" Retrieve the list of actions bound to the given model.
:return: a dict mapping binding types to a list of dict describing
actions, where the latter is given by calling the method
``read`` on the action record.
"""
cr = self.env.cr
IrModelAccess = self.env['ir.model.access']
# discard unauthorized actions, and read action definitions
result = defaultdict(list)
user_groups = self.env.user.groups_id
if not debug:
user_groups -= self.env.ref('base.group_no_one')
self.flush()
cr.execute("""
SELECT a.id, a.type, a.binding_type
FROM ir_actions a
JOIN ir_model m ON a.binding_model_id = m.id
WHERE m.model = %s
ORDER BY a.id
""", [model_name])
for action_id, action_model, binding_type in cr.fetchall():
try:
action = self.env[action_model].sudo().browse(action_id)
action_groups = getattr(action, 'groups_id', ())
action_model = getattr(action, 'res_model', False)
if action_groups and not action_groups & user_groups:
# the user may not perform this action
continue
if action_model and not IrModelAccess.check(action_model, mode='read', raise_exception=False):
# the user won't be able to read records
continue
fields = ['name', 'binding_view_types']
if 'sequence' in action._fields:
fields.append('sequence')
result[binding_type].append(action.read(fields)[0])
except (AccessError, MissingError):
continue
# sort actions by their sequence if sequence available
if result.get('action'):
result['action'] = sorted(result['action'], key=lambda vals: vals.get('sequence', 0))
return result
@api.model
def _for_xml_id(self, full_xml_id):
""" Returns the action content for the provided xml_id
:param xml_id: the namespace-less id of the action (the @id
attribute from the XML file)
:return: A read() view of the ir.actions.action safe for web use
"""
record = self.env.ref(full_xml_id)
assert isinstance(self.env[record._name], type(self))
action = record.sudo().read()[0]
return {
field: value
for field, value in action.items()
if field in record._get_readable_fields()
}
def _get_readable_fields(self):
""" return the list of fields that are safe to read
Fetched via /web/action/load or _for_xml_id method
Only fields used by the web client should included
Accessing content useful for the server-side must
be done manually with superuser
"""
return {
"binding_model_id", "binding_type", "binding_view_types",
"display_name", "help", "id", "name", "type", "xml_id",
}
class IrActionsActWindow(models.Model):
_name = 'ir.actions.act_window'
_description = 'Action Window'
_table = 'ir_act_window'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
@api.constrains('res_model', 'binding_model_id')
def _check_model(self):
for action in self:
if action.res_model not in self.env:
raise ValidationError(_('Invalid model name %r in action definition.', action.res_model))
if action.binding_model_id and action.binding_model_id.model not in self.env:
raise ValidationError(_('Invalid model name %r in action definition.', action.binding_model_id.model))
@api.depends('view_ids.view_mode', 'view_mode', 'view_id.type')
def _compute_views(self):
""" Compute an ordered list of the specific view modes that should be
enabled when displaying the result of this action, along with the
ID of the specific view to use for each mode, if any were required.
This function hides the logic of determining the precedence between
the view_modes string, the view_ids o2m, and the view_id m2o that
can be set on the action.
"""
for act in self:
act.views = [(view.view_id.id, view.view_mode) for view in act.view_ids]
got_modes = [view.view_mode for view in act.view_ids]
all_modes = act.view_mode.split(',')
missing_modes = [mode for mode in all_modes if mode not in got_modes]
if missing_modes:
if act.view_id.type in missing_modes:
# reorder missing modes to put view_id first if present
missing_modes.remove(act.view_id.type)
act.views.append((act.view_id.id, act.view_id.type))
act.views.extend([(False, mode) for mode in missing_modes])
@api.constrains('view_mode')
def _check_view_mode(self):
for rec in self:
modes = rec.view_mode.split(',')
if len(modes) != len(set(modes)):
raise ValidationError(_('The modes in view_mode must not be duplicated: %s', modes))
if ' ' in modes:
raise ValidationError(_('No spaces allowed in view_mode: %r', modes))
@api.depends('res_model', 'search_view_id')
def _compute_search_view(self):
for act in self:
fvg = self.env[act.res_model].fields_view_get(act.search_view_id.id, 'search')
act.search_view = str(fvg)
name = fields.Char(string='Action Name', translate=True)
type = fields.Char(default="ir.actions.act_window")
view_id = fields.Many2one('ir.ui.view', string='View Ref.', ondelete='set null')
domain = fields.Char(string='Domain Value',
help="Optional domain filtering of the destination data, as a Python expression")
context = fields.Char(string='Context Value', default={}, required=True,
help="Context dictionary as Python expression, empty by default (Default: {})")
res_id = fields.Integer(string='Record ID', help="Database ID of record to open in form view, when ``view_mode`` is set to 'form' only")
res_model = fields.Char(string='Destination Model', required=True,
help="Model name of the object to open in the view window")
target = fields.Selection([('current', 'Current Window'), ('new', 'New Window'), ('inline', 'Inline Edit'), ('fullscreen', 'Full Screen'), ('main', 'Main action of Current Window')], default="current", string='Target Window')
view_mode = fields.Char(required=True, default='tree,form',
help="Comma-separated list of allowed view modes, such as 'form', 'tree', 'calendar', etc. (Default: tree,form)")
usage = fields.Char(string='Action Usage',
help="Used to filter menu and home actions from the user form.")
view_ids = fields.One2many('ir.actions.act_window.view', 'act_window_id', string='No of Views')
views = fields.Binary(compute='_compute_views',
help="This function field computes the ordered list of views that should be enabled " \
"when displaying the result of an action, federating view mode, views and " \
"reference view. The result is returned as an ordered list of pairs (view_id,view_mode).")
limit = fields.Integer(default=80, help='Default limit for the list view')
groups_id = fields.Many2many('res.groups', 'ir_act_window_group_rel',
'act_id', 'gid', string='Groups')
search_view_id = fields.Many2one('ir.ui.view', string='Search View Ref.')
filter = fields.Boolean()
search_view = fields.Text(compute='_compute_search_view')
def read(self, fields=None, load='_classic_read'):
""" call the method get_empty_list_help of the model and set the window action help message
"""
result = super(IrActionsActWindow, self).read(fields, load=load)
if not fields or 'help' in fields:
for values in result:
model = values.get('res_model')
if model in self.env:
eval_ctx = dict(self.env.context)
try:
ctx = safe_eval(values.get('context', '{}'), eval_ctx)
except:
ctx = {}
values['help'] = self.with_context(**ctx).env[model].get_empty_list_help(values.get('help', ''))
return result
@api.model_create_multi
def create(self, vals_list):
self.clear_caches()
for vals in vals_list:
if not vals.get('name') and vals.get('res_model'):
vals['name'] = self.env[vals['res_model']]._description
return super(IrActionsActWindow, self).create(vals_list)
def unlink(self):
self.clear_caches()
return super(IrActionsActWindow, self).unlink()
def exists(self):
ids = self._existing()
existing = self.filtered(lambda rec: rec.id in ids)
return existing
@api.model
@tools.ormcache()
def _existing(self):
self._cr.execute("SELECT id FROM %s" % self._table)
return set(row[0] for row in self._cr.fetchall())
def _get_readable_fields(self):
return super()._get_readable_fields() | {
"context", "domain", "filter", "groups_id", "limit", "res_id",
"res_model", "search_view", "search_view_id", "target", "view_id",
"view_mode", "views",
# `flags` is not a real field of ir.actions.act_window but is used
# to give the parameters to generate the action
"flags"
}
VIEW_TYPES = [
('tree', 'Tree'),
('form', 'Form'),
('graph', 'Graph'),
('pivot', 'Pivot'),
('calendar', 'Calendar'),
('gantt', 'Gantt'),
('kanban', 'Kanban'),
]
class IrActionsActWindowView(models.Model):
_name = 'ir.actions.act_window.view'
_description = 'Action Window View'
_table = 'ir_act_window_view'
_rec_name = 'view_id'
_order = 'sequence,id'
sequence = fields.Integer()
view_id = fields.Many2one('ir.ui.view', string='View')
view_mode = fields.Selection(VIEW_TYPES, string='View Type', required=True)
act_window_id = fields.Many2one('ir.actions.act_window', string='Action', ondelete='cascade')
multi = fields.Boolean(string='On Multiple Doc.', help="If set to true, the action will not be displayed on the right toolbar of a form view.")
def _auto_init(self):
res = super(IrActionsActWindowView, self)._auto_init()
tools.create_unique_index(self._cr, 'act_window_view_unique_mode_per_action',
self._table, ['act_window_id', 'view_mode'])
return res
class IrActionsActWindowclose(models.Model):
_name = 'ir.actions.act_window_close'
_description = 'Action Window Close'
_inherit = 'ir.actions.actions'
_table = 'ir_actions'
type = fields.Char(default='ir.actions.act_window_close')
def _get_readable_fields(self):
return super()._get_readable_fields() | {
# 'effect' is not a real field of ir.actions.act_window_close but is
# used to display the rainbowman
"effect"
}
class IrActionsActUrl(models.Model):
_name = 'ir.actions.act_url'
_description = 'Action URL'
_table = 'ir_act_url'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
name = fields.Char(string='Action Name', translate=True)
type = fields.Char(default='ir.actions.act_url')
url = fields.Text(string='Action URL', required=True)
target = fields.Selection([('new', 'New Window'), ('self', 'This Window')],
string='Action Target', default='new', required=True)
def _get_readable_fields(self):
return super()._get_readable_fields() | {
"target", "url",
}
class IrActionsServer(models.Model):
""" Server actions model. Server action work on a base model and offer various
type of actions that can be executed automatically, for example using base
action rules, of manually, by adding the action in the 'More' contextual
menu.
Since Odoo 8.0 a button 'Create Menu Action' button is available on the
action form view. It creates an entry in the More menu of the base model.
This allows to create server actions and run them in mass mode easily through
the interface.
The available actions are :
- 'Execute Python Code': a block of python code that will be executed
- 'Create a new Record': create a new record with new values
- 'Write on a Record': update the values of a record
- 'Execute several actions': define an action that triggers several other
server actions
"""
_name = 'ir.actions.server'
_description = 'Server Actions'
_table = 'ir_act_server'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'sequence,name'
DEFAULT_PYTHON_CODE = """# Available variables:
# - env: Odoo Environment on which the action is triggered
# - model: Odoo Model of the record on which the action is triggered; is a void recordset
# - record: record on which the action is triggered; may be void
# - records: recordset of all records on which the action is triggered in multi-mode; may be void
# - time, datetime, dateutil, timezone: useful Python libraries
# - float_compare: Odoo function to compare floats based on specific precisions
# - log: log(message, level='info'): logging function to record debug information in ir.logging table
# - UserError: Warning Exception to use with raise
# - Command: x2Many commands namespace
# To return an action, assign: action = {...}\n\n\n\n"""
name = fields.Char(string='Action Name', translate=True)
type = fields.Char(default='ir.actions.server')
usage = fields.Selection([
('ir_actions_server', 'Server Action'),
('ir_cron', 'Scheduled Action')], string='Usage',
default='ir_actions_server', required=True)
state = fields.Selection([
('code', 'Execute Python Code'),
('object_create', 'Create a new Record'),
('object_write', 'Update the Record'),
('multi', 'Execute several actions')], string='Action To Do',
default='object_write', required=True, copy=True,
help="Type of server action. The following values are available:\n"
"- 'Execute Python Code': a block of python code that will be executed\n"
"- 'Create': create a new record with new values\n"
"- 'Update a Record': update the values of a record\n"
"- 'Execute several actions': define an action that triggers several other server actions\n"
"- 'Send Email': automatically send an email (Discuss)\n"
"- 'Add Followers': add followers to a record (Discuss)\n"
"- 'Create Next Activity': create an activity (Discuss)")
# Generic
sequence = fields.Integer(default=5,
help="When dealing with multiple actions, the execution order is "
"based on the sequence. Low number means high priority.")
model_id = fields.Many2one('ir.model', string='Model', required=True, ondelete='cascade', index=True,
help="Model on which the server action runs.")
model_name = fields.Char(related='model_id.model', string='Model Name', readonly=True, store=True)
# Python code
code = fields.Text(string='Python Code', groups='base.group_system',
default=DEFAULT_PYTHON_CODE,
help="Write Python code that the action will execute. Some variables are "
"available for use; help about python expression is given in the help tab.")
# Multi
child_ids = fields.Many2many('ir.actions.server', 'rel_server_actions', 'server_id', 'action_id',
string='Child Actions', help='Child server actions that will be executed. Note that the last return returned action value will be used as global return value.')
# Create
crud_model_id = fields.Many2one('ir.model', string='Target Model',
help="Model for record creation / update. Set this field only to specify a different model than the base model.")
crud_model_name = fields.Char(related='crud_model_id.model', string='Target Model Name', readonly=True)
link_field_id = fields.Many2one('ir.model.fields', string='Link Field',
help="Provide the field used to link the newly created record "
"on the record used by the server action.")
fields_lines = fields.One2many('ir.server.object.lines', 'server_id', string='Value Mapping', copy=True)
groups_id = fields.Many2many('res.groups', 'ir_act_server_group_rel',
'act_id', 'gid', string='Groups')
@api.constrains('code')
def _check_python_code(self):
for action in self.sudo().filtered('code'):
msg = test_python_expr(expr=action.code.strip(), mode="exec")
if msg:
raise ValidationError(msg)
@api.constrains('child_ids')
def _check_recursion(self):
if not self._check_m2m_recursion('child_ids'):
raise ValidationError(_('Recursion found in child server actions'))
def _get_readable_fields(self):
return super()._get_readable_fields() | {
"groups_id", "model_name",
}
def _get_runner(self):
multi = True
t = type(self)
fn = getattr(t, f'_run_action_{self.state}_multi', None)\
or getattr(t, f'run_action_{self.state}_multi', None)
if not fn:
multi = False
fn = getattr(t, f'_run_action_{self.state}', None)\
or getattr(t, f'run_action_{self.state}', None)
if fn and fn.__name__.startswith('run_action_'):
fn = functools.partial(fn, self)
return fn, multi
def _register_hook(self):
super()._register_hook()
for cls in type(self).mro():
for symbol in vars(cls).keys():
if symbol.startswith('run_action_'):
_logger.warning(
"RPC-public action methods are deprecated, found %r (in class %s.%s)",
symbol, cls.__module__, cls.__name__
)
@api.onchange('crud_model_id')
def _onchange_crud_model_id(self):
self.link_field_id = False
def create_action(self):
""" Create a contextual action for each server action. """
for action in self:
action.write({'binding_model_id': action.model_id.id,
'binding_type': 'action'})
return True
def unlink_action(self):
""" Remove the contextual actions created for the server actions. """
self.check_access_rights('write', raise_exception=True)
self.filtered('binding_model_id').write({'binding_model_id': False})
return True
def _run_action_code_multi(self, eval_context):
safe_eval(self.code.strip(), eval_context, mode="exec", nocopy=True) # nocopy allows to return 'action'
return eval_context.get('action')
def _run_action_multi(self, eval_context=None):
res = False
for act in self.child_ids.sorted():
res = act.run() or res
return res
def _run_action_object_write(self, eval_context=None):
"""Apply specified write changes to active_id."""
vals = self.fields_lines.eval_value(eval_context=eval_context)
res = {line.col1.name: vals[line.id] for line in self.fields_lines}
if self._context.get('onchange_self'):
record_cached = self._context['onchange_self']
for field, new_value in res.items():
record_cached[field] = new_value
else:
self.env[self.model_id.model].browse(self._context.get('active_id')).write(res)
def _run_action_object_create(self, eval_context=None):
"""Create specified model object with specified values.
If applicable, link active_id.<self.link_field_id> to the new record.
"""
vals = self.fields_lines.eval_value(eval_context=eval_context)
res = {line.col1.name: vals[line.id] for line in self.fields_lines}
res = self.env[self.crud_model_id.model].create(res)
if self.link_field_id:
record = self.env[self.model_id.model].browse(self._context.get('active_id'))
if self.link_field_id.ttype in ['one2many', 'many2many']:
record.write({self.link_field_id.name: [Command.link(res.id)]})
else:
record.write({self.link_field_id.name: res.id})
def _get_eval_context(self, action=None):
""" Prepare the context used when evaluating python code, like the
python formulas or code server actions.
:param action: the current server action
:type action: browse record
:returns: dict -- evaluation context given to (safe_)safe_eval """
def log(message, level="info"):
with self.pool.cursor() as cr:
cr.execute("""
INSERT INTO ir_logging(create_date, create_uid, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s, %s)
""", (self.env.uid, 'server', self._cr.dbname, __name__, level, message, "action", action.id, action.name))
eval_context = super(IrActionsServer, self)._get_eval_context(action=action)
model_name = action.model_id.sudo().model
model = self.env[model_name]
record = None
records = None
if self._context.get('active_model') == model_name and self._context.get('active_id'):
record = model.browse(self._context['active_id'])
if self._context.get('active_model') == model_name and self._context.get('active_ids'):
records = model.browse(self._context['active_ids'])
if self._context.get('onchange_self'):
record = self._context['onchange_self']
eval_context.update({
# orm
'env': self.env,
'model': model,
# Exceptions
'Warning': odoo.exceptions.Warning,
'UserError': odoo.exceptions.UserError,
# record
'record': record,
'records': records,
# helpers
'log': log,
})
return eval_context
def run(self):
""" Runs the server action. For each server action, the
:samp:`_run_action_{TYPE}[_multi]` method is called. This allows easy
overriding of the server actions.
The `_multi` suffix means the runner can operate on multiple records,
otherwise if there are multiple records the runner will be called once
for each
:param dict context: context should contain following keys
- active_id: id of the current object (single mode)
- active_model: current model that should equal the action's model
The following keys are optional:
- active_ids: ids of the current records (mass mode). If active_ids
and active_id are present, active_ids is given precedence.
:return: an action_id to be executed, or False is finished correctly without
return action
"""
res = False
for action in self.sudo():
action_groups = action.groups_id
if action_groups:
if not (action_groups & self.env.user.groups_id):
raise AccessError(_("You don't have enough access rights to run this action."))
else:
try:
self.env[action.model_name].check_access_rights("write")
except AccessError:
_logger.warning("Forbidden server action %r executed while the user %s does not have access to %s.",
action.name, self.env.user.login, action.model_name,
)
raise
eval_context = self._get_eval_context(action)
records = eval_context.get('record') or eval_context['model']
records |= eval_context.get('records') or eval_context['model']
if records:
try:
records.check_access_rule('write')
except AccessError:
_logger.warning("Forbidden server action %r executed while the user %s does not have access to %s.",
action.name, self.env.user.login, records,
)
raise
runner, multi = action._get_runner()
if runner and multi:
# call the multi method
run_self = action.with_context(eval_context['env'].context)
res = runner(run_self, eval_context=eval_context)
elif runner:
active_id = self._context.get('active_id')
if not active_id and self._context.get('onchange_self'):
active_id = self._context['onchange_self']._origin.id
if not active_id: # onchange on new record
res = runner(action, eval_context=eval_context)
active_ids = self._context.get('active_ids', [active_id] if active_id else [])
for active_id in active_ids:
# run context dedicated to a particular active_id
run_self = action.with_context(active_ids=[active_id], active_id=active_id)
eval_context["env"].context = run_self._context
res = runner(run_self, eval_context=eval_context)
else:
_logger.warning(
"Found no way to execute server action %r of type %r, ignoring it. "
"Verify that the type is correct or add a method called "
"`_run_action_<type>` or `_run_action_<type>_multi`.",
action.name, action.state
)
return res or False
class IrServerObjectLines(models.Model):
_name = 'ir.server.object.lines'
_description = 'Server Action value mapping'
_sequence = 'ir_actions_id_seq'
server_id = fields.Many2one('ir.actions.server', string='Related Server Action', ondelete='cascade')
col1 = fields.Many2one('ir.model.fields', string='Field', required=True, ondelete='cascade')
value = fields.Text(required=True, help="Expression containing a value specification. \n"
"When Formula type is selected, this field may be a Python expression "
" that can use the same values as for the code field on the server action.\n"
"If Value type is selected, the value will be used directly without evaluation.")
evaluation_type = fields.Selection([
('value', 'Value'),
('reference', 'Reference'),
('equation', 'Python expression')
], 'Evaluation Type', default='value', required=True, change_default=True)
resource_ref = fields.Reference(
string='Record', selection='_selection_target_model',
compute='_compute_resource_ref', inverse='_set_resource_ref')
@api.model
def _selection_target_model(self):
return [(model.model, model.name) for model in self.env['ir.model'].sudo().search([])]
@api.depends('col1.relation', 'value', 'evaluation_type')
def _compute_resource_ref(self):
for line in self:
if line.evaluation_type in ['reference', 'value'] and line.col1 and line.col1.relation:
value = line.value or ''
try:
value = int(value)
if not self.env[line.col1.relation].browse(value).exists():
record = list(self.env[line.col1.relation]._search([], limit=1))
value = record[0] if record else 0
except ValueError:
record = list(self.env[line.col1.relation]._search([], limit=1))
value = record[0] if record else 0
line.resource_ref = '%s,%s' % (line.col1.relation, value)
else:
line.resource_ref = False
@api.constrains('col1', 'evaluation_type')
def _raise_many2many_error(self):
if self.filtered(lambda line: line.col1.ttype == 'many2many' and line.evaluation_type == 'reference'):
raise ValidationError(_('many2many fields cannot be evaluated by reference'))
@api.onchange('resource_ref')
def _set_resource_ref(self):
for line in self.filtered(lambda line: line.evaluation_type == 'reference'):
if line.resource_ref:
line.value = str(line.resource_ref.id)
def eval_value(self, eval_context=None):
result = {}
for line in self:
expr = line.value
if line.evaluation_type == 'equation':
expr = safe_eval(line.value, eval_context)
elif line.col1.ttype in ['many2one', 'integer']:
try:
expr = int(line.value)
except Exception:
pass
result[line.id] = expr
return result
class IrActionsTodo(models.Model):
"""
Configuration Wizards
"""
_name = 'ir.actions.todo'
_description = "Configuration Wizards"
_order = "sequence, id"
action_id = fields.Many2one('ir.actions.actions', string='Action', required=True, index=True)
sequence = fields.Integer(default=10)
state = fields.Selection([('open', 'To Do'), ('done', 'Done')], string='Status', default='open', required=True)
name = fields.Char()
@api.model_create_multi
def create(self, vals_list):
todos = super(IrActionsTodo, self).create(vals_list)
for todo in todos:
if todo.state == "open":
self.ensure_one_open_todo()
return todos
def write(self, vals):
res = super(IrActionsTodo, self).write(vals)
if vals.get('state', '') == 'open':
self.ensure_one_open_todo()
return res
@api.model
def ensure_one_open_todo(self):
open_todo = self.search([('state', '=', 'open')], order='sequence asc, id desc', offset=1)
if open_todo:
open_todo.write({'state': 'done'})
def name_get(self):
return [(record.id, record.action_id.name) for record in self]
def unlink(self):
if self:
try:
todo_open_menu = self.env.ref('base.open_menu')
# don't remove base.open_menu todo but set its original action
if todo_open_menu in self:
todo_open_menu.action_id = self.env.ref('base.action_client_base_menu').id
self -= todo_open_menu
except ValueError:
pass
return super(IrActionsTodo, self).unlink()
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if name:
return self._search(expression.AND([[('action_id', operator, name)], args]), limit=limit, access_rights_uid=name_get_uid)
return super(IrActionsTodo, self)._name_search(name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)
def action_launch(self):
""" Launch Action of Wizard"""
self.ensure_one()
self.write({'state': 'done'})
# Load action
action_type = self.action_id.type
action = self.env[action_type].browse(self.action_id.id)
result = action.read()[0]
if action_type != 'ir.actions.act_window':
return result
result.setdefault('context', '{}')
# Open a specific record when res_id is provided in the context
ctx = safe_eval(result['context'], {'user': self.env.user})
if ctx.get('res_id'):
result['res_id'] = ctx.pop('res_id')
# disable log for automatic wizards
ctx['disable_log'] = True
result['context'] = ctx
return result
def action_open(self):
""" Sets configuration wizard in TODO state"""
return self.write({'state': 'open'})
class IrActionsActClient(models.Model):
_name = 'ir.actions.client'
_description = 'Client Action'
_inherit = 'ir.actions.actions'
_table = 'ir_act_client'
_sequence = 'ir_actions_id_seq'
_order = 'name'
name = fields.Char(string='Action Name', translate=True)
type = fields.Char(default='ir.actions.client')
tag = fields.Char(string='Client action tag', required=True,
help="An arbitrary string, interpreted by the client"
" according to its own needs and wishes. There "
"is no central tag repository across clients.")
target = fields.Selection([('current', 'Current Window'), ('new', 'New Window'), ('fullscreen', 'Full Screen'), ('main', 'Main action of Current Window')], default="current", string='Target Window')
res_model = fields.Char(string='Destination Model', help="Optional model, mostly used for needactions.")
context = fields.Char(string='Context Value', default="{}", required=True, help="Context dictionary as Python expression, empty by default (Default: {})")
params = fields.Binary(compute='_compute_params', inverse='_inverse_params', string='Supplementary arguments',
help="Arguments sent to the client along with "
"the view tag")
params_store = fields.Binary(string='Params storage', readonly=True, attachment=False)
@api.depends('params_store')
def _compute_params(self):
self_bin = self.with_context(bin_size=False, bin_size_params_store=False)
for record, record_bin in zip(self, self_bin):
record.params = record_bin.params_store and safe_eval(record_bin.params_store, {'uid': self._uid})
def _inverse_params(self):
for record in self:
params = record.params
record.params_store = repr(params) if isinstance(params, dict) else params
def _get_default_form_view(self):
doc = super(IrActionsActClient, self)._get_default_form_view()
params = doc.find(".//field[@name='params']")
params.getparent().remove(params)
params_store = doc.find(".//field[@name='params_store']")
params_store.getparent().remove(params_store)
return doc
def _get_readable_fields(self):
return super()._get_readable_fields() | {
"context", "params", "res_model", "tag", "target",
}
| 45.140878 | 39,092 |
2,615 | py | PYTHON | 15.0 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
import odoo.addons
import logging
import sys
_logger = logging.getLogger(__name__)
def get_precision(application):
_logger.warning("Deprecated call to decimal_precision.get_precision(<application>), use digits=<application> instead")
return application
class DecimalPrecision(models.Model):
_name = 'decimal.precision'
_description = 'Decimal Precision'
name = fields.Char('Usage', index=True, required=True)
digits = fields.Integer('Digits', required=True, default=2)
_sql_constraints = [
('name_uniq', 'unique (name)', """Only one value can be defined for each given usage!"""),
]
@api.model
@tools.ormcache('application')
def precision_get(self, application):
self.flush(['name', 'digits'])
self.env.cr.execute('select digits from decimal_precision where name=%s', (application,))
res = self.env.cr.fetchone()
return res[0] if res else 2
@api.model_create_multi
def create(self, vals_list):
res = super(DecimalPrecision, self).create(vals_list)
self.clear_caches()
return res
def write(self, data):
res = super(DecimalPrecision, self).write(data)
self.clear_caches()
return res
def unlink(self):
res = super(DecimalPrecision, self).unlink()
self.clear_caches()
return res
@api.onchange('digits')
def _onchange_digits_warning(self):
if self.digits < self._origin.digits:
return {
'warning': {
'title': _("Warning for %s", self.name),
'message': _(
"The precision has been reduced for %s.\n"
"Note that existing data WON'T be updated by this change.\n\n"
"As decimal precisions impact the whole system, this may cause critical issues.\n"
"E.g. reducing the precision could disturb your financial balance.\n\n"
"Therefore, changing decimal precisions in a running database is not recommended.",
self.name,
)
}
}
# compatibility for decimal_precision.get_precision(): expose the module in addons namespace
dp = sys.modules['odoo.addons.base.models.decimal_precision']
odoo.addons.decimal_precision = dp
sys.modules['odoo.addons.decimal_precision'] = dp
sys.modules['openerp.addons.decimal_precision'] = dp
| 35.821918 | 2,615 |
1,541 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
class IrModelReferenceReport(models.AbstractModel):
_name = 'report.base.report_irmodulereference'
_description = 'Module Reference Report (base)'
@api.model
def _object_find(self, module):
Data = self.env['ir.model.data'].sudo()
data = Data.search([('model','=','ir.model'), ('module','=',module.name)])
res_ids = data.mapped('res_id')
return self.env['ir.model'].browse(res_ids)
def _fields_find(self, model, module):
Data = self.env['ir.model.data'].sudo()
fname_wildcard = 'field_' + model.replace('.', '_') + '_%'
data = Data.search([('model', '=', 'ir.model.fields'), ('module', '=', module.name), ('name', 'like', fname_wildcard)])
if data:
res_ids = data.mapped('res_id')
fnames = self.env['ir.model.fields'].browse(res_ids).mapped('name')
return sorted(self.env[model].fields_get(fnames).items())
return []
@api.model
def _get_report_values(self, docids, data=None):
report = self.env['ir.actions.report']._get_report_from_name('base.report_irmodulereference')
selected_modules = self.env['ir.module.module'].browse(docids)
return {
'doc_ids': docids,
'doc_model': report.model,
'docs': selected_modules,
'findobj': self._object_find,
'findfields': self._fields_find,
}
| 40.552632 | 1,541 |
3,911 | py | PYTHON | 15.0 | import re
import xmlrpc.client
from datetime import date, datetime
from xmlrpc.client import dumps, loads
from markupsafe import Markup
from werkzeug.wrappers import Response
from odoo.http import Controller, dispatch_rpc, request, route
from odoo.service import wsgi_server
from odoo.fields import Date, Datetime, Command
from odoo.tools import lazy, ustr
from odoo.tools.misc import frozendict
# ustr decodes as utf-8 or latin1 so we can search for the ASCII bytes
# Char ::= #x9 | #xA | #xD | [#x20-#xD7FF]
XML_INVALID = re.compile(b'[\x00-\x08\x0B\x0C\x0F-\x1F]')
class OdooMarshaller(xmlrpc.client.Marshaller):
dispatch = dict(xmlrpc.client.Marshaller.dispatch)
def dump_frozen_dict(self, value, write):
value = dict(value)
self.dump_struct(value, write)
dispatch[frozendict] = dump_frozen_dict
# By default, in xmlrpc, bytes are converted to xmlrpclib.Binary object.
# Historically, odoo is sending binary as base64 string.
# In python 3, base64.b64{de,en}code() methods now works on bytes.
# Convert them to str to have a consistent behavior between python 2 and python 3.
def dump_bytes(self, value, write):
# XML 1.0 disallows control characters, check for them immediately to
# see if this is a "real" binary (rather than base64 or somesuch) and
# blank it out, otherwise they get embedded in the output and break
# client-side parsers
if XML_INVALID.search(value):
self.dump_unicode('', write)
else:
self.dump_unicode(ustr(value), write)
dispatch[bytes] = dump_bytes
def dump_datetime(self, value, write):
# override to marshall as a string for backwards compatibility
value = Datetime.to_string(value)
self.dump_unicode(value, write)
dispatch[datetime] = dump_datetime
def dump_date(self, value, write):
value = Date.to_string(value)
self.dump_unicode(value, write)
dispatch[date] = dump_date
def dump_lazy(self, value, write):
v = value._value
return self.dispatch[type(v)](self, v, write)
dispatch[lazy] = dump_lazy
dispatch[Command] = dispatch[int]
dispatch[Markup] = lambda self, value, write: self.dispatch[str](self, str(value), write)
# monkey-patch xmlrpc.client's marshaller
xmlrpc.client.Marshaller = OdooMarshaller
class RPC(Controller):
"""Handle RPC connections."""
def _xmlrpc(self, service):
"""Common method to handle an XML-RPC request."""
data = request.httprequest.get_data()
params, method = loads(data)
result = dispatch_rpc(service, method, params)
return dumps((result,), methodresponse=1, allow_none=False)
@route("/xmlrpc/<service>", auth="none", methods=["POST"], csrf=False, save_session=False)
def xmlrpc_1(self, service):
"""XML-RPC service that returns faultCode as strings.
This entrypoint is historical and non-compliant, but kept for
backwards-compatibility.
"""
try:
response = self._xmlrpc(service)
except Exception as error:
response = wsgi_server.xmlrpc_handle_exception_string(error)
return Response(response=response, mimetype='text/xml')
@route("/xmlrpc/2/<service>", auth="none", methods=["POST"], csrf=False, save_session=False)
def xmlrpc_2(self, service):
"""XML-RPC service that returns faultCode as int."""
try:
response = self._xmlrpc(service)
except Exception as error:
response = wsgi_server.xmlrpc_handle_exception_int(error)
return Response(response=response, mimetype='text/xml')
@route('/jsonrpc', type='json', auth="none", save_session=False)
def jsonrpc(self, service, method, args):
""" Method used by client APIs to contact OpenERP. """
return dispatch_rpc(service, method, args)
| 38.722772 | 3,911 |
2,918 | py | PYTHON | 15.0 | # -*- coding: utf-8 -*-
from odoo import fields, models
class GroupOnDate(models.Model):
_name = 'test_read_group.on_date'
_description = 'Group Test Read On Date'
date = fields.Date("Date")
value = fields.Integer("Value")
class BooleanAggregate(models.Model):
_name = 'test_read_group.aggregate.boolean'
_description = 'Group Test Read Boolean Aggregate'
_order = 'key DESC'
key = fields.Integer()
bool_and = fields.Boolean(default=False, group_operator='bool_and')
bool_or = fields.Boolean(default=False, group_operator='bool_or')
bool_array = fields.Boolean(default=False, group_operator='array_agg')
class Aggregate(models.Model):
_name = 'test_read_group.aggregate'
_order = 'id'
_description = 'Group Test Aggregate'
key = fields.Integer()
value = fields.Integer("Value")
partner_id = fields.Many2one('res.partner')
# we use a selection that is in reverse lexical order, in order to check the
# possible reordering made by read_group on selection fields
SELECTION = [('c', "C"), ('b', "B"), ('a', "A")]
class GroupOnSelection(models.Model):
_name = 'test_read_group.on_selection'
_description = 'Group Test Read On Selection'
state = fields.Selection([('a', "A"), ('b', "B")], group_expand='_expand_states')
static_expand = fields.Selection(SELECTION, group_expand=True)
dynamic_expand = fields.Selection(lambda self: SELECTION, group_expand=True)
no_expand = fields.Selection(SELECTION)
value = fields.Integer()
def _expand_states(self, states, domain, order):
# return all possible states, in order
return [key for key, val in type(self).state.selection]
class FillTemporal(models.Model):
_name = 'test_read_group.fill_temporal'
_description = 'Group Test Fill Temporal'
date = fields.Date()
datetime = fields.Datetime()
value = fields.Integer()
class Order(models.Model):
_name = 'test_read_group.order'
_description = 'Sales order'
line_ids = fields.One2many('test_read_group.order.line', 'order_id')
class OrderLine(models.Model):
_name = 'test_read_group.order.line'
_description = 'Sales order line'
order_id = fields.Many2one('test_read_group.order', ondelete='cascade')
value = fields.Integer()
class User(models.Model):
_name = 'test_read_group.user'
_description = "User"
name = fields.Char(required=True)
task_ids = fields.Many2many(
'test_read_group.task',
'test_read_group_task_user_rel',
'user_id',
'task_id',
string="Tasks",
)
class Task(models.Model):
_name = 'test_read_group.task'
_description = "Project task"
name = fields.Char(required=True)
user_ids = fields.Many2many(
'test_read_group.user',
'test_read_group_task_user_rel',
'task_id',
'user_id',
string="Collaborators",
)
| 28.330097 | 2,918 |