repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
unknown | revision_date
unknown | committer_date
unknown | github_id
int64 14.6k
687M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
unknown | gha_created_at
unknown | gha_updated_at
unknown | gha_pushed_at
unknown | gha_size
int64 0
10.2M
โ | gha_stargazers_count
int32 0
178k
โ | gha_forks_count
int32 0
88.9k
โ | gha_open_issues_count
int32 0
2.72k
โ | gha_language
stringlengths 1
16
โ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Psayta/WSU_Projects | 18,090,402,285,426 | fd95ee52e7da5030be04684d6f7bca6f82691643 | d34b72e4368fb03b78ca819ab4fd8626b159d25d | /Project 10/procedures.py | 7c52d9ce17c70a969f188ea4258caa0dc80a751c | [] | no_license | https://github.com/Psayta/WSU_Projects | 40722bfb2c6eb3d37615ab8079302b16763ff145 | dbcf984f41141df841d44ecc254c4749451ffdb6 | refs/heads/main | "2023-05-25T09:53:10.678099" | "2021-06-04T19:21:48" | "2021-06-04T19:21:48" | 360,944,142 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Procedure:
def __init__(self, ProName, ProDate,
Practitioner, Charges):
self.__ProName = ProName
self.__ProDate = ProDate
self.__Practitioner = Practitioner
self.__Charges = Charges
def ProName(self,ProName):
self.__ProName = ProName
def ProDate(self, ProDate):
self.__ProDate = ProDate
def Practitioner(self, Practitioner):
self.__Practitioner = Practitioner
def Charges(self, Charges):
self.__Charges = Charges
def Get_ProName(self):
return self.__ProName
def Get_ProDate(self):
return self.__ProDate
def Get_Practitioner(self):
return self.__Practitioner
def Get_Charges(self):
return self.__Charges
| UTF-8 | Python | false | false | 789 | py | 20 | procedures.py | 17 | 0.585551 | 0.585551 | 0 | 26 | 28.307692 | 42 |
Reekomer/scrapweb | 17,489,106,865,587 | de2656db84388e91875e3b63558fa48344841665 | aa85a4c6efd6146d8f959aa366dbaca63ad6c27c | /backend/scrapers/artprice/artprice/spiders/invaluable.py | b914fe99a392679c106a5e5c2ad35374abeba1be | [
"MIT"
] | permissive | https://github.com/Reekomer/scrapweb | 030176ce8b0d08583aee32e2724db5e79f783dae | b56eaa4ec5ea23ea2daf95ddf06ba150b8b53bbe | refs/heads/master | "2020-12-30T12:10:52.806859" | "2017-05-16T21:34:48" | "2017-05-16T21:34:48" | 91,506,381 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import scrapy
from scrapy.selector import HtmlXPathSelector
import urlparse
class Art(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
price = scrapy.Field()
class GetReview(scrapy.Spider):
name = 'invaluable'
start_urls = ["http://www.invaluable.com/drawings/cc-X40D0LU64C/"]
def parse(self, response):
hxs = HtmlXPathSelector(response)
art = Art()
art['title'] = response.xpath('//div[@class="lot-tile-title"]/a/text()').extract()
art['date'] = response.xpath('//p[@class="date-location"]/text()').extract()
art['price'] = response.xpath('//p[@class="current-bid"]/text()').extract()
yield art
| UTF-8 | Python | false | false | 722 | py | 17 | invaluable.py | 10 | 0.599723 | 0.592798 | 0 | 24 | 28.083333 | 90 |
TheUncleKai/bbutils | 7,739,531,112,286 | 794a11bb1047ff94944ce168fe3202f8ed7b2c1b | 98a3afc34adccd68d34a4d681070267714fd8b9d | /tests/logging/console.py | 505e772d50e01b5067d5b6951d188343ec4853ae | [
"Apache-2.0"
] | permissive | https://github.com/TheUncleKai/bbutils | 6f606d54060acdd2b795068bcd77d4d3f4067de2 | 62a1258849c284de5e33fe85fb10a50e5b9f034e | refs/heads/master | "2021-12-15T19:58:34.881224" | "2021-12-10T15:08:30" | "2021-12-10T15:08:30" | 232,920,435 | 0 | 0 | Apache-2.0 | false | "2023-09-01T11:24:32" | "2020-01-09T22:44:05" | "2021-12-10T15:08:34" | "2023-09-01T11:24:31" | 1,099 | 0 | 0 | 2 | Python | false | false | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2017, Kai Raphahn <kai.raphahn@laburec.de>
#
import time
import sys
import unittest
import unittest.mock as mock
import colorama
from bbutil.logging.writer.console import ConsoleWriter, _Style
from bbutil.logging.types import Message, Progress, Writer
RESET_ALL = colorama.Style.RESET_ALL
class Callback(object):
def __init__(self, writer: Writer):
self.item = None
self.writer = writer
return
def append(self, item: Message):
self.writer.write(item)
return
class SysWrite(mock.MagicMock):
encoding = "cp850"
def __index__(self):
mock.MagicMock.__init__(self)
return
class TestConsoleWriter(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def test_constructor(self):
item = ConsoleWriter()
self.assertEqual(len(item.styles), 8)
self.assertEqual(len(item.error_index), 2)
self.assertEqual(item.encoding, "")
self.assertEqual(item.text_space, 15)
self.assertEqual(item.seperator, "|")
self.assertEqual(item.length, 0)
self.assertEqual(item.bar_len, 50)
self.assertIs(item.stdout, sys.stdout)
self.assertIs(item.stderr, sys.stderr)
self.assertFalse(item.use_error)
return
def test_setup(self):
item = ConsoleWriter()
item.setup(text_space=20, seperator="#", error_index=["INFORM"], bar_len=40)
self.assertEqual(len(item.styles), 8)
self.assertEqual(len(item.error_index), 1)
self.assertEqual(item.encoding, "")
self.assertEqual(item.text_space, 20)
self.assertEqual(item.seperator, "#")
self.assertEqual(item.length, 0)
self.assertEqual(item.bar_len, 40)
self.assertIs(item.stdout, sys.stdout)
self.assertIs(item.stderr, sys.stderr)
self.assertFalse(item.use_error)
return
def test_add_style(self):
item = ConsoleWriter()
item.add_style("XXX", "BRIGHT", "GREEN", "")
self.assertEqual(len(item.styles), 9)
self.assertEqual(item.styles["XXX"].name, "XXX")
return
def test_open(self):
item = ConsoleWriter()
item.open()
self.assertNotEqual(item.encoding, "")
return
def test_write_01(self):
message = Message(app="TEST", level="INFORM", tag="TEST", content="This is a test!")
item = ConsoleWriter()
item.open()
item.stdout = SysWrite()
item.write(message)
write_called = item.stdout.write.called
call = item.stdout.write.call_args_list[0]
(args, kwargs) = call
data = args[0]
print(data)
self.assertTrue(write_called)
self.assertIn(message.app, data)
self.assertIn(message.tag, data)
self.assertIn(message.content, data)
return
def test_write_02(self):
message = Message(app="TEST", content="This is a test!", raw=True)
item = ConsoleWriter()
item.open()
item.stdout = SysWrite()
item.write(message)
write_called = item.stdout.write.called
call = item.stdout.write.call_args_list[0]
(args, kwargs) = call
data = args[0]
self.assertTrue(write_called)
self.assertNotIn(message.app, data)
self.assertIn(message.content, data)
return
def test_write_03(self):
message = Message(app="TEST", level="INFORM", content="This is a test!")
item = ConsoleWriter()
item.open()
item.stdout = SysWrite()
item.write(message)
write_called = item.stdout.write.called
call = item.stdout.write.call_args_list[0]
(args, kwargs) = call
data = args[0]
self.assertTrue(write_called)
self.assertIn(message.app, data)
self.assertIn(message.content, data)
return
def test_write_04(self):
message = Message(app="TEST", level="ERROR", content="This is a test!")
item = ConsoleWriter()
item.setup(error_index=["ERROR"])
item.open()
item.stderr = SysWrite()
item.write(message)
write_called = item.stderr.write.called
call = item.stderr.write.call_args_list[0]
(args, kwargs) = call
data = args[0]
print(data)
self.assertTrue(write_called)
self.assertIn(message.app, data)
self.assertIn(message.content, data)
return
def test_write_05(self):
writer = ConsoleWriter()
writer.open()
writer.stdout = SysWrite()
callback = Callback(writer)
progress = Progress(100, 0, callback.append)
n = 0
while True:
progress.inc()
time.sleep(0.0001)
n += 1
if progress.finished is True:
break
write_called = writer.stdout.write.called
count = writer.stdout.write.call_count
self.assertEqual(n, 100)
self.assertEqual(count, 100)
self.assertTrue(write_called)
return
def test_write_06(self):
writer = ConsoleWriter()
writer.open()
writer.stdout = SysWrite()
callback = Callback(writer)
progress = Progress(100, 0, callback.append)
writer.line_width = 20
n = 0
while True:
progress.inc()
time.sleep(0.0001)
n += 1
if progress.finished is True:
break
write_called = writer.stdout.write.called
count = writer.stdout.write.call_count
self.assertEqual(n, 100)
self.assertEqual(count, 0)
self.assertFalse(write_called)
return
def test_write_07(self):
message = Message(app="TEST", level="INFORM", tag="TEST", content="This is a test!")
_style = _Style("INFORM", "BRIGHT", "GREEN", "")
item = ConsoleWriter()
item.open()
item.stdout = SysWrite()
item.write(message)
write_called = item.stdout.write.called
call = item.stdout.write.call_args_list[0]
(args, kwargs) = call
data = args[0]
print(data)
_tag = "TEST".ljust(15)
_app_space = len("TEST") + 5
_app = "{0:s} ".format("TEST").ljust(_app_space)
content = "{0:s}{1:s}{2:s} {3:s}{4:s} {5:s}{6:s}\n".format(RESET_ALL,
_app,
_style.scheme,
_tag,
"|",
RESET_ALL,
"This is a test!")
self.assertTrue(write_called)
self.assertIn(message.app, data)
self.assertIn(message.tag, data)
self.assertIn(message.content, data)
self.assertEqual(content, data)
return
def test_write_08(self):
message = Message(app="TEST", level="INFORM", tag="TEST", content="This is a test!")
_style = _Style("INFORM", "BRIGHT", "GREEN", "")
item = ConsoleWriter()
item.setup(app_space=15)
item.open()
item.stdout = SysWrite()
item.write(message)
write_called = item.stdout.write.called
call = item.stdout.write.call_args_list[0]
(args, kwargs) = call
data = args[0]
print(data)
_tag = "TEST".ljust(15)
_app_space = len("TEST") + 5
_app = "{0:s} ".format("TEST").ljust(15)
content = "{0:s}{1:s}{2:s} {3:s}{4:s} {5:s}{6:s}\n".format(RESET_ALL,
_app,
_style.scheme,
_tag,
"|",
RESET_ALL,
"This is a test!")
self.assertTrue(write_called)
self.assertIn(message.app, data)
self.assertIn(message.tag, data)
self.assertIn(message.content, data)
self.assertEqual(content, data)
return
def test_write_09(self):
message = Message(app="TEST", level="INFORM", tag="TEST", content="This is a test!")
_style = _Style("INFORM", "BRIGHT", "GREEN", "")
item = ConsoleWriter()
item.setup(app_space=10)
item.open()
item.stdout = SysWrite()
item.write(message)
write_called = item.stdout.write.called
call = item.stdout.write.call_args_list[0]
(args, kwargs) = call
data = args[0]
print(data)
_tag = "TEST".ljust(15)
_app_space = len("TEST") + 5
_app = "{0:s} ".format("TEST").ljust(15)
content = "{0:s}{1:s}{2:s} {3:s}{4:s} {5:s}{6:s}\n".format(RESET_ALL,
_app,
_style.scheme,
_tag,
"|",
RESET_ALL,
"This is a test!")
self.assertTrue(write_called)
self.assertIn(message.app, data)
self.assertIn(message.tag, data)
self.assertIn(message.content, data)
self.assertNotEqual(content, data)
return
def test_write_10(self):
message = Message(app="TEST", level="INFORM", tag="TEST", content="This is a test!")
_style = _Style("INFORM", "BRIGHT", "GREEN", "")
item = ConsoleWriter()
item.setup(text_space=10)
item.open()
item.stdout = SysWrite()
item.write(message)
write_called = item.stdout.write.called
call = item.stdout.write.call_args_list[0]
(args, kwargs) = call
data = args[0]
print(data)
_tag = "TEST".ljust(10)
_app_space = len("TEST") + 5
_app = "{0:s} ".format("TEST").ljust(_app_space)
content = "{0:s}{1:s}{2:s} {3:s}{4:s} {5:s}{6:s}\n".format(RESET_ALL,
_app,
_style.scheme,
_tag,
"|",
RESET_ALL,
"This is a test!")
self.assertTrue(write_called)
self.assertIn(message.app, data)
self.assertIn(message.tag, data)
self.assertIn(message.content, data)
self.assertEqual(content, data)
return
def test_clear_01(self):
message = Message(app="TEST", content="This is a test!", raw=True)
item = ConsoleWriter()
item.open()
item.stdout = SysWrite()
item.write(message)
item.clear()
count = item.stdout.write.call_count
write_called = item.stdout.write.called
call = item.stdout.write.call_args_list[1]
(args, kwargs) = call
data = args[0]
self.assertTrue(write_called)
self.assertIn('\r', data)
self.assertEqual(count, 2)
return
def test_clear_02(self):
message = Message(app="TEST", content="This is a test!", level="ERROR")
item = ConsoleWriter()
item.setup(error_index=["ERROR"])
item.open()
item.stderr = SysWrite()
item.write(message)
item.clear()
count = item.stderr.write.call_count
write_called = item.stderr.write.called
call = item.stderr.write.call_args_list[1]
(args, kwargs) = call
data = args[0]
self.assertTrue(write_called)
self.assertIn('\r', data)
self.assertEqual(count, 2)
return
| UTF-8 | Python | false | false | 13,088 | py | 33 | console.py | 29 | 0.508175 | 0.495492 | 0 | 443 | 28.544018 | 92 |
briancabbott/GitHub-Repository-Downloader | 3,745,211,489,833 | 9d33b3de675befb9a416ffe6d8428a5362a97ee0 | 5c157c4e76ca54d30f543e0393eae29d49d90962 | /TypeScript/resources/code/ts_lib/Apollo-Latest/apollographql/internal-platform-orb/src/scripts/generate-mustache-parameters.py | f4f570adfb61f710f7bbec7bff6265bf01d77989 | [
"MIT"
] | permissive | https://github.com/briancabbott/GitHub-Repository-Downloader | a69ccc97100947525fd77e822544b84b82c5636a | b2ea9502f68e64ff4c8e02ff6113f4f4dc927f60 | refs/heads/master | "2023-05-25T07:21:43.908126" | "2023-05-21T08:05:45" | "2023-05-21T08:05:45" | 148,926,714 | 1 | 0 | null | false | "2022-12-08T23:32:10" | "2018-09-15T17:59:55" | "2022-07-10T13:31:30" | "2022-12-08T23:32:10" | 126,905 | 0 | 0 | 3 | TypeScript | false | false | #!/usr/bin/env python3
# PORTIONS OF THIS CODE from
# https://github.com/CircleCI-Public/path-filtering-orb/blob/main/src/scripts/create-parameters.py
import json
import os
import re
import subprocess
from functools import reduce
def checkout(revision):
"""
Helper function for checking out a branch
:param revision: The revision to checkout
:type revision: str
"""
subprocess.run(
['git', 'checkout', revision],
check=True
)
output_path = os.environ.get('OUTPUT_PATH')
head = os.environ.get('CIRCLE_SHA1')
base_revision = os.environ.get('BASE_REVISION')
mapping = os.environ.get("MAPPING")
if not mapping:
print("fallback to reading the file")
mapping = open(os.environ.get("MAPPING_FILE")).read()
#checkout(base_revision) # Checkout base revision to make sure it is available for comparison
# checkout(head) # return to head commit
base = subprocess.run(
['git', 'merge-base', base_revision, head],
check=True,
capture_output=True
).stdout.decode('utf-8').strip()
if head == base:
try:
# If building on the same branch as BASE_REVISION, we will get the
# current commit as merge base. In that case try to go back to the
# first parent, i.e. the last state of this branch before the
# merge, and use that as the base.
base = subprocess.run(
['git', 'rev-parse', 'HEAD~1'], # FIXME this breaks on the first commit, fallback to something
check=True,
capture_output=True
).stdout.decode('utf-8').strip()
except:
# This can fail if this is the first commit of the repo, so that
# HEAD~1 actually doesn't resolve. In this case we can compare
# against this magic SHA below, which is the empty tree. The diff
# to that is just the first commit as patch.
base = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'
print('Comparing {}...{}'.format(base, head))
changes = subprocess.run(
['git', 'diff', '--name-only', base, head],
check=True,
capture_output=True
).stdout.decode('utf-8').splitlines()
mappings = [
m.split() for m in
mapping.splitlines()
]
def check_mapping(m):
if 3 != len(m):
raise Exception(f"Invalid mapping ({m})")
path, param, value = m
regex = re.compile(r'^' + path + r'$')
for change in changes:
if regex.match(change):
return True
return False
# HERE IS THE DIVERGENCE FROM PATH-FILTERING
def convert_mapping(accumulator, current):
"""
arguments:
accumulator -- dictionary type. Keys and values will be passed to mustache to enrich template
current -- array of the last two values in the matching MAPPING line. aka will be ["build-what", "one"]
"""
parameter_name = current[1]
parameter_value = json.loads(current[2])
is_parameter_an_array = isinstance(parameter_value, list)
if is_parameter_an_array:
parameter_array = accumulator.get(parameter_name, [])
parameter_value_array_value = parameter_value[0]
parameter_array.append(parameter_value_array_value)
accumulator[parameter_name] = parameter_array
else:
accumulator[parameter_name] = parameter_value
return accumulator
# END DIVERGENCE FROM PATH-FILTERING
mappings = filter(check_mapping, mappings)
mappings = reduce(convert_mapping, mappings, {}) # (I also changed this to a reduce function...)
with open(output_path, 'w') as fp:
fp.write(json.dumps(mappings))
| UTF-8 | Python | false | false | 3,338 | py | 3,942 | generate-mustache-parameters.py | 1,770 | 0.696225 | 0.68574 | 0 | 108 | 29.907407 | 109 |
mehultandale/hotel-management | 4,870,492,952,485 | 1f9df9ea3838d5c9d2284584ffb90d4aceddb173 | 0ef0fbee7338d3e72a415a2d31db1a42cc1e9bb0 | /hotel_management/rooms/urls.py | 6cbced0a81d705f3b8c72c529a6f20dae9b44fbc | [] | no_license | https://github.com/mehultandale/hotel-management | a3d6c26077d074bb3cf7b438d185c6930286ebd4 | 693f4accb31b33660fa0d75fe433f0f6071c4147 | refs/heads/master | "2021-04-18T21:28:51.974126" | "2018-03-27T12:25:20" | "2018-03-27T12:25:20" | 126,471,608 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.conf.urls import url, include
from django.contrib import admin
from .views import room_list, FloorList
urlpatterns = [
url(r'^room-list/', room_list),
url(r'^floor-list/', FloorList.as_view())
] | UTF-8 | Python | false | false | 212 | py | 6 | urls.py | 5 | 0.721698 | 0.721698 | 0 | 10 | 20.2 | 42 |
EvelynZhou/separation | 9,904,194,628,326 | 3041ab7cf11d24926ea0164399b56f4659be84a9 | 39e4acd72a12e203ceeef2330cbca194c2299fbd | /preprocess.py | a4569ed21f7a833fdb044b3f8a5f935085337a48 | [] | no_license | https://github.com/EvelynZhou/separation | 0351049d1088d5b698f83ba003bf02e714a5aac0 | cb05b3a881a1a99586c0fe33ee903e3e32e091e4 | refs/heads/master | "2020-06-24T09:01:09.382200" | "2019-07-26T01:21:24" | "2019-07-26T01:21:24" | 198,923,384 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# !/usr/bin/env python
import librosa
import numpy as np
import soundfile as sf
import os
import random
from config import ModelConfig
def get_filenames(datafilename):
wavf = os.listdir(datafilename)
wavfile = []
for file in wavf:
if file.endswith(".wav"):
wavfile.append(datafilename + '/' + file)
return wavfile
def get_random_wav_batch(filenames1, filenames2, sec, sr=ModelConfig.SR):
# filenames1่ฎพ็ฝฎไธบ่ฏญ้ณ๏ผfilenames2่ฎพ็ฝฎไธบๅชๅฃฐ๏ผๅ
ถไธญ่ฏญ้ณๅๅง็น้ๆบ้ๅ
filen1 = get_filenames(filenames1)
filen2 = get_filenames(filenames2)
f1 = random.sample(filen1, ModelConfig.file_Num)
f2 = random.sample(filen2, ModelConfig.file_Num)
list1 = np.array(
list(map(lambda f: _sample_range(_pad_wav(librosa.load(f, sr=sr, mono=True)[0], sr, sec), sr, sec), f1)))
list2 = np.array(
list(map(lambda f: _sample_range(_pad_wav(librosa.load(f, sr=sr, mono=True)[0], sr, sec), sr, sec), f2)))
minxed = list1 + list2
return minxed, list1, list2
def enframe(signal,nw = ModelConfig.L_FRAME, inc = ModelConfig.L_HOP, winfunc = 'hann'):
'''
:param signal: ่พๅ
ฅ็่ฏญ้ณไฟกๅท
:param nw: ๅธง้ฟ
:param inc: ๅธง็งป
:param winfunc: ็ชๅฝๆฐ็ฑปๅ๏ผ้ป่ฎคๅ ๆฑๅฎ็ช
:return: ๅธงไฟกๅท็ฉ้ต
'''
signal_len = len(signal)
if signal_len <= nw:
nframe = 1
else:
nframe = int(np.ceil((1.0*signal_len-nw+inc)/inc))
pad_length = int((nframe-1)*inc + nw)
zeros = np.zeros((pad_length - signal_len))
pad_signal = np.concatenate((signal, zeros))
indices = np.tile(np.arange(0,nw), (nframe,1)) + np.tile(np.arange(0,nframe*inc,inc), (nw,1)).T
indices = np.array(indices, dtype=np.int32)
frames = pad_signal[indices]
winf = np.hamming(nw)
win = np.tile(winf, (nframe, 1))
return frames*win
def get_batch_frame(signal):
batch_size = signal.shape[0]
batch_data=[]
for i in range(0,batch_size):
batch_data.append(enframe(signal[i]))
return np.array(batch_data)
def batch_to_frame(pre_data, mixed_phase):
'''
:param pre_data: ้ขๆตๅพๅฐ็้ข่ฐฑ็ปๆ
:param mixed_phase: ๆททๅ่ฏญ้ณ็็ธไฝไฟกๆฏ
:return: ไป้ขๆต้ข่ฐฑๅพๅฐ็ๆถๅ่ฏญ้ณไฟกๅท
'''
rebuild_frame = []
nframe = pre_data.shape[0]
for i in range(0, nframe):
freq_positive = pre_data[i] * np.exp(1.j * mixed_phase[i])
freq_positive_len = len(freq_positive)
# ่ฟ้่่ไบๅฅๆฐ็นๅถๆฐ็น็้ฎ้ข
freq_negative = []
if freq_positive_len % 2 == 1:
for j in range(2, freq_positive_len):
freq_negative.append(np.conjugate(freq_positive[freq_positive_len - j]))
else:
for j in range(1, freq_positive_len):
freq_negative.append(np.conjugate(freq_positive[freq_positive_len - j]))
freq_negative = np.array(freq_negative)
freq_batch = np.r_[freq_positive, freq_negative]
data = np.fft.ifft(freq_batch)
rebuild_frame.append(data)
return np.array(rebuild_frame)
def frame_to_wav(frame, nw = ModelConfig.L_FRAME, inc = ModelConfig.L_HOP):
'''
:param frame: ่พๅ
ฅ็่ฟ็ปญ็ๅคๅธงๆถๅไฟกๅท
:param nw: ๅธง้ฟ
:param inc: ๅธง็งป
:return: ๆถๅ่ฏญ้ณไฟกๅท
'''
n_frame, freq = frame.shape
wav_len = (n_frame - 1) * inc + nw
wav = np.array(np.zeros([wav_len],dtype = np.float))
for i in range(0,n_frame):
# a = np.array(wav[ i*inc : i*inc+nw])
# b= np.array(frame[i,:])
# c=a+b
wav[i*inc : i*inc+nw ] = wav[i*inc : i*inc+nw] + frame[i,:]
return wav
def get_spec(wav, freq_nums = ModelConfig.L_FRAME):
# return np.array(list(map(lambda w: (np.fft.fft(w))[:,0:int(freq_nums/2 +1 )], wav)))
batch_size, frame_num, freq = wav.shape
spec = np.array(list(map(lambda w: (np.fft.fft(w))[:,0:int(freq_nums/2 +1 )], wav)))
return spec
# Batch considered ่ฝฌไธบ้ข่ฐฑ
def to_spectrogram(wav, len_frame=ModelConfig.L_FRAME, len_hop=ModelConfig.L_HOP):
return np.array(list(map(lambda w: librosa.stft(w, n_fft=len_frame, hop_length=len_hop), wav)))
# Batch considered ้่ฟๅน
ๅผๅ็ธไฝ่ฟๅ่ฏญ้ณ
def to_wav(mag, phase, len_hop=ModelConfig.L_HOP):
stft_matrix = get_stft_matrix(mag, phase)
return np.array(list(map(lambda s: librosa.istft(s, hop_length=len_hop), stft_matrix)))
# Batch considered ้ขๅ็ดๆฅ่ฝฌๆขไธบๆถๅ
def to_wav_from_spec(stft_maxrix, len_hop=ModelConfig.L_HOP):
return np.array(list(map(lambda s: librosa.istft(s, hop_length=len_hop), stft_maxrix)))
# Batch considered
def to_wav_mag_only(mag, init_phase, len_frame=ModelConfig.L_FRAME, len_hop=ModelConfig.L_HOP, num_iters=50):
# return np.array(list(map(lambda m_p: griffin_lim(m, len_frame, len_hop, num_iters=num_iters, phase_angle=p)[0], list(zip(mag, init_phase))[1])))
return np.array(list(map(lambda m: lambda p: griffin_lim(m, len_frame, len_hop, num_iters=num_iters, phase_angle=p),
list(zip(mag, init_phase))[1])))
# Batch considered ๅฏน้ข่ฐฑๅๅน
ๅผ
def get_magnitude(stft_matrixes):
return np.abs(stft_matrixes)
# Batch considered ๅฏน้ข่ฐฑๅ็ธไฝ
def get_phase(stft_maxtrixes):
return np.angle(stft_maxtrixes)
# Batch considered ๅน
ๅผ็ธไฝ่ฝฌไธบ้ข่ฐฑ
def get_stft_matrix(magnitudes, phases):
return magnitudes * np.exp(1.j * phases)
# Batch considered
def soft_time_freq_mask(target_src, remaining_src):
mask = np.abs(target_src) / (np.abs(target_src) + np.abs(remaining_src) + np.finfo(float).eps)
return mask
# Batch considered
def hard_time_freq_mask(target_src, remaining_src):
mask = np.where(target_src > remaining_src, 1., 0.)
return mask
def write_wav(data, path, sr=ModelConfig.SR, format='wav', subtype='PCM_16'):
sf.write('{}.wav'.format(path), data, sr, format=format, subtype=subtype)
def griffin_lim(mag, len_frame, len_hop, num_iters, phase_angle=None, length=None):
assert (num_iters > 0)
if phase_angle is None:
phase_angle = np.pi * np.random.rand(*mag.shape)
spec = get_stft_matrix(mag, phase_angle)
for i in range(num_iters):
wav = librosa.istft(spec, win_length=len_frame, hop_length=len_hop, length=length)
if i != num_iters - 1:
spec = librosa.stft(wav, n_fft=len_frame, win_length=len_frame, hop_length=len_hop)
_, phase = librosa.magphase(spec)
phase_angle = np.angle(phase)
spec = get_stft_matrix(mag, phase_angle)
return wav
def _pad_wav(wav, sr, duration): # ่กฅ0
assert (wav.ndim <= 2)
n_samples = int(sr * duration)
pad_len = np.maximum(0, n_samples - wav.shape[-1])
if wav.ndim == 1:
pad_width = (0, pad_len)
else:
pad_width = ((0, 0), (0, pad_len))
wav = np.pad(wav, pad_width=pad_width, mode='constant', constant_values=0)
return wav
def _sample_range(wav, sr, duration): # ้ๆบ้ๆ ทๅๅง็น
assert (wav.ndim <= 2)
target_len = int(sr * duration)
wav_len = wav.shape[-1]
start = np.random.choice(range(np.maximum(1, wav_len - target_len)), 1)[0]
end = start + target_len
if wav.ndim == 1:
wav = wav[start:end]
else:
wav = wav[:, start:end]
return wav
def spec_to_batch(src):
# shape = (batch_size, n_frames, n_freq) => (batch_size, n_freq, n_frames)
# num_wavs, freq, n_frames = src.shape
num_wavs, n_frames, freq = src.shape
# Padding
pad_len = 0
if n_frames % ModelConfig.SEQ_LEN > 0:
pad_len = (ModelConfig.SEQ_LEN - (n_frames % ModelConfig.SEQ_LEN))
pad_width = ((0, 0), (0, pad_len), (0, 0))
padded_src = np.pad(src, pad_width=pad_width,
mode='constant', constant_values=0)
# assert((padded_src.shape[-1] % ModelConfig.SEQ_LEN )== 0)
# batch = np.reshape(padded_src.transpose(0, 2, 1),
# (-1, ModelConfig.SEQ_LEN, freq))
batch = np.reshape(padded_src,(-1, ModelConfig.SEQ_LEN, freq))
return batch, padded_src
def batch_to_spec(src, num_wav):
# shape = (batch_size, n_frames, n_freq) => (batch_size, n_freq,
# n_frames)
batch_size, seq_len, freq = src.shape
src = np.reshape(src, (num_wav, -1, freq))
src = src.transpose(0, 2, 1)
return src | UTF-8 | Python | false | false | 8,357 | py | 10 | preprocess.py | 8 | 0.624704 | 0.612477 | 0 | 235 | 33.110638 | 150 |
collective/transmogrify.webcrawler | 17,351,667,910,027 | e7d9321ac8cc8b457b4f27af205847b64ebabefe | 858e645a8b087da48d8031c1c0cfa2d089e002da | /transmogrify/webcrawler/webcrawler.py | d0e61a3baf8db290c3102abf307f35a8b213adb7 | [] | no_license | https://github.com/collective/transmogrify.webcrawler | 4cfa089140be8a535f8abd89201b9ff25bbf8d5c | 4a169c043b40b96121c0e874033393218e1371cc | refs/heads/master | "2023-08-25T03:16:37.610710" | "2014-02-03T05:45:40" | "2014-02-03T05:45:40" | 573,984 | 0 | 2 | null | false | "2014-11-20T17:42:06" | "2010-03-22T13:39:11" | "2014-02-03T05:45:56" | "2014-11-20T17:37:29" | 1,002 | 13 | 7 | 3 | Python | null | null | from _socket import socket
from transmogrify.webcrawler.staticcreator import OpenOnRead
from zope.interface import implements
from zope.interface import classProvides
from collective.transmogrifier.interfaces import ISectionBlueprint
from collective.transmogrifier.interfaces import ISection
from transmogrify.webcrawler.external import webchecker
from transmogrify.webcrawler.external.webchecker import Checker,Page
from transmogrify.webcrawler.external.webchecker import MyHTMLParser,MyStringIO
import re
from htmlentitydefs import entitydefs
from bs4 import UnicodeDammit
import urllib,os, urlparse
from sys import stderr
import urlparse
import logging
from ConfigParser import ConfigParser
from staticcreator import CachingURLopener
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
"""
transmogrify.webcrawler
=======================
A source blueprint for crawling content from a site or local html files.
Webcrawler imports HTML either from a live website, for a folder on disk, or a folder
on disk with html which used to come from a live website and may still have absolute
links refering to that website.
To crawl a live website supply the crawler with a base http url to start crawling with.
This url must be the url which all the other urls you want from the site start with.
For example ::
[crawler]
blueprint = transmogrify.webcrawler
url = http://www.whitehouse.gov
max = 50
will restrict the crawler to the first 50 pages.
You can also crawl a local directory of html with relative links by just using a file: style url ::
[crawler]
blueprint = transmogrify.webcrawler
url = file:///mydirectory
or if the local directory contains html saved from a website and might have absolute urls in it
the you can set this as the cache. The crawler will always look up the cache first ::
[crawler]
blueprint = transmogrify.webcrawler
url = http://therealsite.com --crawler:cache=mydirectory
The following will not crawl anything larget than 4Mb ::
[crawler]
blueprint = transmogrify.webcrawler
url = http://www.whitehouse.gov
maxsize=400000
To skip crawling links by regular expression ::
[crawler]
blueprint = transmogrify.webcrawler
url=http://www.whitehouse.gov
ignore = \.mp3
\.mp4
If webcrawler is having trouble parsing the html of some pages you can preprocesses
the html before it is parsed. e.g. ::
[crawler]
blueprint = transmogrify.webcrawler
patterns = (<script>)[^<]*(</script>)
subs = \1\2
If you'd like to skip processing links with certain mimetypes you can use the
drop:condition. This TALES expression determines what will be processed further.
see http://pypi.python.org/pypi/collective.transmogrifier/#condition-section
::
[drop]
blueprint = collective.transmogrifier.sections.condition
condition: python:item.get('_mimetype') not in ['application/x-javascript','text/css','text/plain','application/x-java-byte-code'] and item.get('_path','').split('.')[-1] not in ['class']
Options:
:site_url:
- the top url to crawl
:ignore:
- list of regex for urls to not crawl
:whitelist:
- list of regex for urls. If enabled only urls that match these expressions will be crawled
:cache:
- local directory to read crawled items from instead of accessing the site directly
:patterns:
- Regular expressions to substitute before html is parsed. New line seperated
:subs:
- Text to replace each item in patterns. Must be the same number of lines as patterns. Due to the way buildout handles empty lines, to replace a pattern with nothing (eg to remove the pattern), use ``<EMPTYSTRING>`` as a substitution.
:maxsize:
- don't crawl anything larger than this
:max:
- Limit crawling to this number of pages
:start-urls:
- a list of urls to initially crawl
:ignore-robots:
- if set, will ignore the robots.txt directives and crawl everything
:post-only:
- if 'true' any url with a query string will submit with a POST instead of a GET
WebCrawler will emit items like ::
item = dict(_site_url = "Original site_url used",
_path = "The url crawled without _site_url,
_content = "The raw content returned by the url",
_content_info = "Headers returned with content"
_backlinks = names,
_sortorder = "An integer representing the order the url was found within the page/site
)
"""
VERBOSE = 0 # Verbosity level (0-3)
MAXPAGE = 0 # Ignore files bigger than this
CHECKEXT = False # Check external references (1 deep)
VERBOSE = 0 # Verbosity level (0-3)
MAXPAGE = 150000 # Ignore files bigger than this
NONAMES = 0 # Force name anchor checking
def match_first(pat_list, text):
"""return the first pattern to match the text"""
for pat in pat_list:
if pat and pat.search(text):
return pat.pattern
class WebCrawler(object):
classProvides(ISectionBlueprint)
implements(ISection)
def __init__(self, transmogrifier, name, options, previous):
self.previous = previous
try:
self.feedback = ISectionFeedback(transmogrifier)
except:
self.feedback = None
#self.open_url = MyURLopener().open
self.options = options
self.ignore_re = [re.compile(pat.strip()) for pat in options.get("ignore",'').split('\n') if pat]
self.whitelist_re = [re.compile(pat.strip()) for pat in options.get("whitelist",'').split('\n') if pat]
self.logger = logging.getLogger(name)
self.checkext = options.get('checkext', CHECKEXT)
self.verbose = options.get('verbose', VERBOSE)
self.maxpage = options.get('maxsize', None)
self.nonames = options.get('nonames', NONAMES)
self.site_url = options.get('site_url', options.get('url', None))
self.starts = [u for u in options.get('start-urls', '').strip().split() if u]
self.max = options.get('max',None)
self.cache = options.get('cache', None)
self.postonly = options.get('post-only', 'false').lower() in ["true","yes"]
self.context = transmogrifier.context
#self.alias_bases = [a for a in options.get('alias_bases', '').split() if a]
# make sure we end with a /
if self.site_url[-1] != '/':
self.site_url += '/'
if os.path.exists(self.site_url):
self.site_url = 'file://'+urllib.pathname2url(self.site_url)
def __iter__(self):
for item in self.previous:
yield item
if not self.site_url:
return
options = self.options
def pagefactory(text, url, verbose=VERBOSE, maxpage=MAXPAGE, checker=None):
try:
page = LXMLPage(text,url,verbose,maxpage,checker,options,self.logger)
except HTMLParseError, msg:
#msg = self.sanitize(msg)
##elf.note(0, "Error parsing %s: %s",
# self.format_url(url), msg)
# Dont actually mark the URL as bad - it exists, just
# we can't parse it!
page = None
return page
webchecker.Page = pagefactory
self.checker = MyChecker(self.site_url, self.cache)
#self.checker.alias_bases = self.alias_bases
self.checker.setflags(checkext = self.checkext,
verbose = self.verbose,
maxpage = self.maxpage,
nonames = self.nonames)
self.checker.postonly = self.postonly
self.checker.ignore_robots = options.get('ignore_robots', "false").lower() in ['true','on']
self.checker.resetRun()
#must take off the '/' for the crawler to work
self.checker.addroot(self.site_url[:-1])
self.checker.sortorder[self.site_url] = 0
# make sure start links go first
root = self.checker.todo.popitem()
for url in self.starts:
if url == self.site_url[:-1]:
continue
self.checker.newtodolink((url,''), '<root>')
self.checker.sortorder[url] = 0
self.checker.todo[root[0]] = root[1]
#for root in self.alias_bases:
# self.checker.addroot(root, add_to_do = 0)
# self.checker.sortorder[root] = 0
while self.checker.todo:
if self.max and len(self.checker.done) == int(self.max):
break
urls = self.checker.todo.keys()
#urls.sort()
del urls[1:]
for url,part in urls:
ignore_pat = match_first(self.ignore_re, url)
whitelist_pat = match_first(self.whitelist_re, url)
if ignore_pat:
self.logger.debug("Ignoring: %s due to '%s'" % (str(url), patstr))
self.checker.markdone((url,part))
yield dict(_bad_url = url)
elif len(self.whitelist_re) > 0 and not whitelist_pat:
self.checker.markdone((url,part))
self.logger.debug("not in whitelist: %s" %str(url))
yield dict(_bad_url = url)
elif not url.startswith(self.site_url[:-1]):
self.checker.markdone((url,part))
self.logger.debug("External: %s" %str(url))
yield dict(_bad_url = url)
else:
base = self.site_url
self.checker.dopage((url,part))
page = self.checker.name_table.get(url) #have to usse unredirected
origin = url
url = self.checker.redirected.get(url,url)
names = self.checker.link_names.get(url,[])
path = url[len(self.site_url):]
info = self.checker.infos.get(url)
file = self.checker.files.get(url)
sortorder = self.checker.sortorder.get(origin,0)
text = page and page.html() or file
#clean url. if trailing slash html has already had links rewritten
path = '/'.join([p for p in path.split('/') if p])
# unquote the url as plone id does not support % or + but do support space
path = urllib.unquote_plus(path)
if info and text:
if origin != url:
# we've been redirected. emit a redir item so we can put in place redirect
orig_path = origin[len(self.site_url):]
orig_path = '/'.join([p for p in orig_path.split('/') if p])
#import pdb; pdb.set_trace()
if orig_path:
# unquote the url as plone id does not support % or + but do support space
orig_path = urllib.unquote_plus(orig_path)
yield(dict(_path = orig_path,
_site_url = base,
_sortorder = sortorder,
_orig_path = orig_path,
_redir = path))
else:
orig_path = None
item = dict(_path = path,
_site_url = base,
_backlinks = names,
_sortorder = sortorder,
_content = text,
_content_info = info,
_orig_path = path)
# don't rewrite it we have a link object
# if orig_path is not None:
# # we got redirected, let's rewrite the links
# item['_origin'] = orig_path
if page and page.html():
item['_html'] = page.text #so cache save no cleaned version
if self.feedback:
self.feedback.success('webcrawler',msg)
ctype = item.get('_content_info',{}).get('content-type','')
csize = item.get('_content_info',{}).get('content-length',0)
date = item.get('_content_info',{}).get('date','')
self.logger.info("Crawled: %s (%d links, size=%s, %s %s)" % (
str(url),
len(item.get('_backlinks',[])),
csize,
ctype,
date))
yield item
else:
self.logger.debug("Error: %s" %str(url))
yield dict(_bad_url = origin)
class MyChecker(Checker):
link_names = {} #store link->[name]
def __init__(self, site_url, cache):
self.cache = cache
self.site_url = site_url
self.reset()
def message(self, format, *args):
pass # stop printing out crap
def reset(self):
self.infos = {}
self.files = {}
self.redirected = {}
self.alias_bases = {}
self.sortorder = {}
self.counter = 0
Checker.reset(self)
self.urlopener = CachingURLopener(cache = self.cache, site_url=self.site_url)
def resetRun(self):
self.roots = []
self.todo = OrderedDict()
self.done = {}
self.bad = {}
def readhtml(self, url_pair):
res = Checker.readhtml(self, url_pair)
return res
def openhtml(self, url_pair):
oldurl, fragment = url_pair
f = self.openpage(url_pair)
if f:
url = f.geturl()
if url != oldurl:
self.redirected[oldurl] = url
self.infos[url] = info = f.info()
#Incement counter to get ordering of links within pages over whole site
if not self.checkforhtml(info, url):
#self.files[url] = f.read()
self.files[url] = f
#self.safeclose(f)
f = None
else:
url = oldurl
return f, url
def openpage(self, url_pair):
url, fragment = url_pair
old_pair = url_pair
old_url = url
# actually open alias instead
# if self.site_url.endswith('/'):
# realbase=self.site_url[:-1]
# for a in self.alias_bases:
# if a.endswith('/'):
# a=a[:-1]
# if a and url.startswith(a):
# base = url[:len(a)]
# path = url[len(a):]
# url = realbase+path
# break
if self.postonly and '?' in url:
parts = urlparse.urlparse(url)
url = urlparse.urlunparse(parts[:4]+('',''))
data = parts.query
else:
data = None
try:
return self.urlopener.open(old_url, data=data)
except (OSError, IOError), msg:
msg = self.sanitize(msg)
self.note(0, "Error %s", msg)
if self.verbose > 0:
self.show(" HREF ", url, " from", self.todo[url_pair])
self.setbad(old_pair, msg)
return None
def setSortOrder(self, link):
""" give each link a counter as it's encountered to later use in sorting """
if link not in self.sortorder:
self.sortorder[link] = self.counter
self.counter = self.counter + 1
def isallowed(self, root, url):
if self.ignore_robots:
return True
return Checker.isallowed(self, root, url)
import lxml.html
import lxml.html.soupparser
from lxml.html.clean import Cleaner
from lxml.html.clean import clean_html
import HTMLParser
from HTMLParser import HTMLParseError
from lxml.etree import tostring
# do tidy and parsing and links via lxml. also try to encode page properly
class LXMLPage:
def __init__(self, text, url, verbose=VERBOSE, maxpage=MAXPAGE, checker=None, options=None, logger=None):
self.text = text
self.url = url
self.verbose = verbose
self.maxpage = maxpage
self.logger = logger
self.checker = checker
self.options = options
# The parsing of the page is done in the __init__() routine in
# order to initialize the list of names the file
# contains. Stored the parser in an instance variable. Passed
# the URL to MyHTMLParser().
size = len(self.text)
if self.maxpage and size > self.maxpage:
self.logger.info("%s Skip huge file (%.0f Kbytes)" % (self.url, (size*0.001)))
self.parser = None
return
if options:
text = self.reformat(text, url)
self.logger.debug("Parsing %s (%d bytes)" %( self.url, size) )
#text = clean_html(text)
self.parser = None
try:
# http://stackoverflow.com/questions/2686709/encoding-in-python-with-lxml-complex-solution
info = self.checker.infos.get(url)
try:
http_charset = info.getheader('Content-Type').split('charset=')[1]
except:
http_charset = ""
if http_charset == "":
ud = UnicodeDammit(text, is_html=True)
else:
ud = UnicodeDammit(text, override_encodings=[http_charset], is_html=True)
if not ud.unicode_markup:
raise UnicodeDecodeError(
"Failed to detect encoding, tried [%s]",
', '.join(converted.triedEncodings))
# print converted.originalEncoding
# we shouldn't decode to unicode first http://lxml.de/parsing.html#python-unicode-strings
# but we will try it anyway. If there is a conflict we get a ValueError
self.parser = lxml.html.fromstring(ud.unicode_markup)
except UnicodeDecodeError, HTMLParseError:
self.logger.error("HTMLParseError %s"%url)
pass
except ValueError:
self.logger.error("HTMLParseError %s"%url)
pass
# fallback to lxml beautifulsoup parser
if self.parser is None:
try:
self.parser = lxml.html.soupparser.fromstring(text)
except HTMLParser.HTMLParseError:
self.logger.log(logging.INFO, "webcrawler: HTMLParseError %s"%url)
raise
self.parser.resolve_base_href()
if '../images/doh/transparent.gif' in text:
pass
#some sites do this instead of setting the base tag. It messes with funnelweb and plone
#so we'll just rewrite all the links so we can take the / off :)
self.parser.make_links_absolute(url, resolve_base_href=True)
strip = lambda l: l.rstrip('/') if l.startswith(url) else l
self.parser.rewrite_links(strip)
self._html = tostring(self.parser,
encoding=unicode,
method="html",
pretty_print=True)
assert self._html is not None
def note(self, level, msg, *args):
pass
# Method to retrieve names.
def getnames(self):
#if self.parser:
# return self.parser.names
#else:
return []
def html(self):
if self.parser is None:
return ''
#cleaner = Cleaner(page_structure=False, links=False)
#rhtml = cleaner.clean_html(html)
return self._html
def getlinkinfos(self):
# File reading is done in __init__() routine. Store parser in
# local variable to indicate success of parsing.
# If no parser was stored, fail.
if self.parser is None: return []
base = urlparse.urljoin(self.url, self.parser.base_url or "")
infos = []
for element, attribute, rawlink, pos in self.parser.iterlinks():
t = urlparse.urlparse(rawlink)
# DON'T DISCARD THE FRAGMENT! Instead, include
# it in the tuples which are returned. See Checker.dopage().
fragment = t[-1]
t = t[:-1] + ('',)
if '../images/doh/transparent.gif' in rawlink:
pass
rawlink = urlparse.urlunparse(t)
link = urlparse.urljoin(base, rawlink)
if link[-1] == '/':
link = link[:-1]
#override to get link text
if attribute == 'href':
name = ' '.join(element.text_content().split())
self.checker.link_names.setdefault(link,[]).extend([(self.url,name)])
elif attribute == 'src':
name = element.get('alt','')
self.checker.link_names.setdefault(link,[]).extend([(self.url,name)])
self.checker.setSortOrder(link)
#and to filter list
infos.append((link, rawlink, fragment))
# Need to include any redirects via refresh meta tag
# e.g.'<meta http-equiv="refresh" content="1;url=http://blah.com" />'
# TODO: should really be counted as redirect not a link
for tag in self.parser.iterdescendants(tag='meta'):
if tag.get('http-equiv','').lower() == 'refresh':
#import pdb; pdb.set_trace()
url = tag.get('content','')
if url:
_,rawlink = url.lower().split("url=",1)
link = urlparse.urljoin(base, rawlink)
infos.append((link,rawlink,""))
return infos
def reformat(self, text, url):
pattern = self.options.get('patterns','')
replace = self.options.get('subs','')
replace = ['<EMPTYSTRING>' != item and item or '' \
for item in replace.split('\n')]
for p,r in zip(pattern.split('\n'),replace):
if p and r:
text,n = re.subn(p,r,text)
if n:
self.logger.debug( "patching %s with %i * %s" % (url,n,p) )
return text
| UTF-8 | Python | false | false | 22,404 | py | 9 | webcrawler.py | 5 | 0.559141 | 0.555749 | 0 | 600 | 36.336667 | 236 |
gmbz/frro-soporte-TPI-09 | 18,751,827,219,837 | 23276924e8bbb0de6849dab4f16f9610131b2925 | e55cc6431790570c2534ebe094ec2be0d36c2385 | /app/controller/users_controller.py | 2eea19afb5c4f1f8c39c44a8564f90131c20d813 | [
"MIT"
] | permissive | https://github.com/gmbz/frro-soporte-TPI-09 | 3e4212be279ff026cb8dc9d995709a5d4ed8230f | 5161f5002fa11307c357920ec2278bb97f92f926 | refs/heads/main | "2023-08-24T18:25:02.846157" | "2021-10-20T17:39:00" | "2021-10-20T17:39:00" | 403,168,548 | 2 | 0 | MIT | true | "2021-10-17T19:36:05" | "2021-09-04T22:37:27" | "2021-10-16T22:49:51" | "2021-10-17T19:36:05" | 162 | 1 | 0 | 0 | Python | false | false | from typing import Optional
from ..database import user_db
from ..helpers import helper
from ..models.exceptions import UserNotValid, UserAlreadyExists, UserNotFound
from ..models.models import Usuario
def register_user(user_: Usuario) -> Usuario:
try:
if helper.validate_registration(user_):
user_.set_password(user_.password)
new_user = user_db.register_user(user_)
return new_user
except UserNotValid as exc:
return exc
except UserAlreadyExists as exc:
return exc
def autenticacion(user_: Usuario) -> Usuario:
try:
user = user_db.autenticacion(user_)
return user
except UserNotFound as exc:
return exc
def buscar_id(id_usuario: int) -> Optional[Usuario]:
return user_db.buscar_id(id_usuario)
def change_pass(user_: Usuario):
try:
if helper.validate_pass(user_):
user = user_db.buscar_id_user(user_)
user.set_password(user_.password)
user_db.change_pass()
return user
except UserNotFound as exc:
return exc
| UTF-8 | Python | false | false | 1,098 | py | 53 | users_controller.py | 30 | 0.644809 | 0.644809 | 0 | 41 | 25.780488 | 77 |
astrojuanlu/ezaero | 6,176,163,004,552 | 902ca7aa16c31f781268126ec9c22e2cb88e550e | 65d061dfd0a8504dda48faac1898d8e9fd264f78 | /setup.py | 9b23ca879c072f06d19ccf675f4b64144bef38e0 | [
"MIT"
] | permissive | https://github.com/astrojuanlu/ezaero | 10c0956b3f0df2b0781abbde13205dfe78aec677 | 564b3c3442081a5b18c30a0e884ffa217381c859 | refs/heads/master | "2022-01-26T02:19:48.660176" | "2019-05-15T21:39:15" | "2019-05-15T21:39:15" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from glob import glob
from os.path import basename, splitext
from setuptools import find_packages, setup
NAME = 'ezaero'
DESCRIPTION = 'Aerodynamics in Python.'
URL = 'https://github.com/partmor/ezaero'
EMAIL = 'part.morales@gmail.com'
AUTHOR = 'Pedro Arturo Morales Maries'
REQUIRES_PYTHON = '>=3.5'
VERSION = '0.1.dev0'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
setup(
name=NAME,
version=VERSION,
license='MIT',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# TODO
# complete classifier list:
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
],
python_requires=REQUIRES_PYTHON,
install_requires=[
'matplotlib>=2.0',
'numpy'
],
extras_require={
'dev': [
'flake8',
'isort',
'pytest',
'sphinx',
'sphinx_rtd_theme',
'tox'
],
'docs': [
'sphinx',
'sphinx-gallery',
'sphinx_rtd_theme',
]
}
)
| UTF-8 | Python | false | false | 1,321 | py | 12 | setup.py | 7 | 0.572294 | 0.564724 | 0 | 56 | 22.589286 | 74 |
guru-14/CodeChef | 4,595,615,024,135 | 3c248200ad4f6da0b913238d85f854c7e0ec9eab | 7d122522df1d34c47ebbb11c8f3971232a23a441 | /December Long Challenge 2019/WATSCORE.py | a50af37aa8631c098d785a80890467070ead558f | [] | no_license | https://github.com/guru-14/CodeChef | 0098fafa28d7321bbaa09bd863300a3653da1466 | 3ccb2766c6f56b8fd76afdf31eb5aa8e875bea17 | refs/heads/master | "2020-07-09T09:37:16.466541" | "2020-04-14T19:58:37" | "2020-04-14T19:58:37" | 203,940,655 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Problem link : https://www.codechef.com/DEC19B/problems/WATSCORE
for _ in range(int(input())):
n = int(input())
scores = [0] * 8
for _ in range(n):
p, s = map(int, input().split())
if p == 9 or p == 10 or p == 11:
continue
else:
if s > scores[p - 1]:
scores[p - 1] = s
print (sum(scores))
| UTF-8 | Python | false | false | 372 | py | 89 | WATSCORE.py | 88 | 0.473118 | 0.443548 | 0 | 13 | 27.615385 | 66 |
sanotaku/fem | 4,947,802,363,756 | b38313be174d2859298ff68c1996e897fa497b9d | fe4333ca3c70523f117285f50389ba0e97d9a15f | /fem/boundary_condition.py | 34880191f84bfbb8d26fc8cc44c34dfd998f8d4c | [] | no_license | https://github.com/sanotaku/fem | 96088caa115b33eb1d370f7b70eedf4e997f8a10 | 522b7f2abd259294f7fc5b1b65c19c7ca577e54b | refs/heads/master | "2023-05-07T10:40:40.374697" | "2021-06-03T14:18:47" | "2021-06-03T14:18:47" | 320,246,490 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import List
from fem.utils import ModelTypeError
class HoldCondition:
def __init__(self) -> None:
"""
Example...
hold_condition = {1: True, 5: True}
"""
self.hold_condition = {}
def set_hold(self, global_node_no: int, is_hold=True):
if type(is_hold) != bool:
raise ModelTypeError()
self.hold_condition[global_node_no] = is_hold
class ForceCondition2d:
def __init__(self) -> None:
"""
Example...
force_condition = {1: [None, -100]}
"""
self.force_condition = {}
def set_hold(self, global_node_no: int, forces: List):
if list is not type(forces):
raise ModelTypeError()
self.force_condition[global_node_no] = forces
| UTF-8 | Python | false | false | 793 | py | 13 | boundary_condition.py | 6 | 0.553594 | 0.544767 | 0 | 31 | 24.580645 | 58 |
PlatONnetwork/client-sdk-python | 15,547,781,614,328 | 07cf9115af72c0994521c067df41a6d2b61eaa45 | b64fcb9da80d12c52bd24a7a1b046ed9952b0026 | /client_sdk_python/net.py | 9eddcb018088398998223abb2938ddbd6625c7ae | [
"MIT"
] | permissive | https://github.com/PlatONnetwork/client-sdk-python | e59f44a77690806c8763ed6db938ed8447d42417 | 94ad57bb34b5ee7bb314ac858071686382c55402 | refs/heads/master | "2022-07-09T08:49:07.312759" | "2021-12-24T08:15:46" | "2021-12-24T08:15:46" | 173,032,954 | 7 | 16 | MIT | false | "2022-08-31T02:19:42" | "2019-02-28T03:18:03" | "2021-12-24T09:48:30" | "2022-06-22T04:33:46" | 1,027 | 6 | 9 | 5 | Python | false | false | from client_sdk_python.module import (
Module,
)
from client_sdk_python.utils.decorators import (
deprecated_in_v5,
)
class Net(Module):
@property
def listening(self):
return self.web3.manager.request_blocking("net_listening", [])
@property
def peerCount(self):
return self.web3.manager.request_blocking("net_peerCount", [])
@property
@deprecated_in_v5
def chainId(self):
return None
@property
def version(self):
return self.web3.manager.request_blocking("net_version", [])
| UTF-8 | Python | false | false | 555 | py | 218 | net.py | 198 | 0.654054 | 0.645045 | 0 | 25 | 21.2 | 70 |
RubensIA/Kmeans | 6,373,731,478,358 | 88393073bd7c09c56f50689c59cadc1fee868edd | 98a2af78e752d7a560875b1e936d732fe355c1ff | /Kmeans7.py | 4164aa1c5dd55ce00ccb5c9eb0f92821ac9a00c9 | [] | no_license | https://github.com/RubensIA/Kmeans | ebc9857705cc881e762533216385cab76e0872b3 | 0f4e1761c7c38f3dc5389b05618246569b61b0c9 | refs/heads/master | "2020-03-07T03:35:41.659415" | "2018-04-13T21:02:16" | "2018-04-13T21:02:16" | 127,240,740 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 16:49:22 2018
@author: Rubens
"""
"algoritmo para clusterizaรงรฃo em 3 clusters"
import xlrd
import numpy as np
data = [[1,2,5,8],[2,4,7,3],[1,9,89,76],[10,32,87,56],[11,54,89,34],[12,89,90,27],[10,99,67,45]]
data_aux = list(data)
#Cria 3 centrรณides aleatรณrias
data_aux = list(data)
#Cria 3 centrรณides aleatรณrias
import random
from random import randrange
centroide = []
semente = 13773
random.seed(semente)
for b in range(3):
random_index = randrange(0,len(data_aux))
#print('O elemento sorteado foi:',sorteio)
centroide[1:1] = [data_aux[random_index]]
del data_aux[random_index]
print('centroide inicial',centroide)
np.array(centroide)
#print(lista)
from math import sqrt
def euclidian(v1,v2):
#Essa funรงรฃo recebe duas
# listas e retorna a
# distancia entre elas
#Armazena o quadrado da distรขncia
dist = 0.0
for x in range(len(v1)):
dist += pow((v1[x] - v2[x]),2)
#Tira a raiz quadrada da soma
eucli = sqrt(dist)
#print(eucli)
return eucli
def group(j):
# j recebe o centroide
# group j retorna clusters para o centroide recebido
del cluster1[:]
del cluster2[:]
del cluster3[:]
for z in data:
a = []
for w in j:
d = (euclidian(w,z))
a.append(d)
#print(z , a)
minimo = min(a)
pos = a.index(minimo)
#print(minimo)
#print(pos)
if pos == 0:
cluster1[1:1] = [z]
elif pos == 1:
cluster2[1:1] = [z]
elif pos == 2:
cluster3[1:1] = [z]
clusters = [cluster1,cluster2,cluster3]
return clusters
def err(centroide_anterior,centroide):
for i in range(3):
distancias = []
distancias.append(euclidian(centroide_anterior[i],centroide[i]))
maxdist = max(distancias)
return maxdist
return erro
def subs(centroide):
def summ(v):
soma = sum(np.array(v))
#print(soma)
return soma
for m in [cluster1]:
n_elem = len(cluster1)
somacluster1 = summ(m)
#print(somacluster1)
mediacluster1 = somacluster1/n_elem
#print('mediacluster1',mediacluster1)
for n in [cluster2]:
n_elem = len(cluster2)
somacluster2 = summ(n)
mediacluster2 = somacluster2/n_elem
#print('mediacluster2',mediacluster2)
for n in [cluster3]:
n_elem = len(cluster3)
somacluster3 = summ(n)
mediacluster3 = somacluster3/n_elem
#print('mediacluster3',mediacluster3)
centroide = [mediacluster1,mediacluster2,mediacluster3]
group(centroide)
print('cluster1',cluster1)
print('cluster2',cluster2)
print('cluster3',cluster3)
return centroide
cluster1 = []
cluster2 = []
cluster3 = []
group(centroide)
print('cluster1',cluster1)
print('cluster2',cluster2)
print('cluster3',cluster3)
b = 0
while b < 6:
b+=1
centroide_anterior = centroide[:]
centroide = subs(centroide)
#print('centroide anterior',centroide_anterior)
#print('centroide novo',centroide)
#epsilon = err(centroide_anterior,centroide)
#print('epsilon',epsilon)
print('cluster1 resultante',cluster1)
print('cluster2 resultante',cluster2)
print('cluster3 resultante',cluster3)
| UTF-8 | Python | false | false | 3,481 | py | 5 | Kmeans7.py | 4 | 0.593318 | 0.550979 | 0 | 173 | 19.052023 | 96 |
tstrait/Unit1Project | 6,073,083,781,324 | 5daaf022d1d58a396cb47da44b4d058a3dbcbff7 | 1fd587d717c75965765ba935809238493e44ac4f | /stringcompression.py | d344a3cc6295ada589a342766aa04da29c6986f3 | [] | no_license | https://github.com/tstrait/Unit1Project | 85ea53b9dc12e9847ee075a08592683b89e52966 | 99688af2973b67f8f26bafd19d46c34d51b26457 | refs/heads/master | "2020-03-13T03:05:03.385296" | "2018-04-28T21:47:24" | "2018-04-28T21:47:24" | 130,937,195 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | mystring = 'abbbcccdddde'
def string_compression(mystring):
print("Original string:", mystring)
a=[]
compressed=[]
#for each character in string, check if next character is the same or not
for i in range(0,len(mystring)-1):
#if next character is the same, add to a temporary string
if mystring[i] == mystring[i+1]:
a.append(mystring[i+1])
#if not, add current character and count the length of group of same characters; add that to the compressedstring list
else:
a.append(mystring[i])
compresseditem = a[0] + str(len(a))
compressed.append(compresseditem)
a=[]
#add the last character
a.append(mystring[-1:])
compresseditem = a[0] + str(len(a))
compressed.append(compresseditem)
#join list elements into a compressed string
compressedstring = ''.join(compressed)
#only return compressed version if it's shorter than original string
if len(compressedstring) >= len(mystring):
print("Original string is shorter than compressed:", mystring)
else:
print("Compressed string:", compressedstring)
string_compression(mystring) | UTF-8 | Python | false | false | 1,188 | py | 6 | stringcompression.py | 5 | 0.657407 | 0.651515 | 0 | 30 | 38.633333 | 126 |
pgunnink/magnons-python | 10,797,547,825,884 | f4baa87274ff6d5051b1c93c2e2b3b03085e3f0f | 3416d6a60032f32f2da0512152a36dc18b1b3a9f | /magnons/plot.py | d8be912c8b9d6a3940f819f4367a68eed44b0b4c | [] | no_license | https://github.com/pgunnink/magnons-python | 5e6d533554aa4efcf759b09586001d50f35ea0bf | 42a1a4139e3ab403a62cee0670dd590669aa27e7 | refs/heads/master | "2020-08-19T02:36:20.089621" | "2019-12-02T14:08:14" | "2019-12-02T14:08:14" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from magnons.spin import get_spincurrent_total
from tqdm import tqdm
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
def create_edges(x, upper_lim=None, lower_lim=0):
N = len(x)
new_x = []
for i in range(N):
if i == 0:
diff = x[1] - x[0]
else:
diff = x[i] - x[i - 1]
new_x.append(x[i] - .5 * diff)
new_x.append(x[-1] + .5 * diff)
if lower_lim is not None and new_x[0] < lower_lim:
new_x[0] = 0
if upper_lim is not None and new_x[-1] > upper_lim:
new_x[-1] = upper_lim
return new_x
def add_colorbar_subplots(fig, cmap, norm=None):
if norm is None:
norm = plt.Normalize(0, 1)
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
ax=fig.get_axes())
def add_colorbar_oneplot(ax, cmap, norm):
divider = make_axes_locatable(ax)
ax_cb = divider.append_axes('right', size='5%', pad=0.05)
mpl.colorbar.ColorbarBase(ax_cb,
cmap=cmap,
norm=norm,
orientation='vertical')
def plot_colored_dispersion(k, E, colors, ax=None, norm=None, cmap='jet'):
if isinstance(cmap, str):
cmap = mpl.cm.get_cmap(cmap)
if norm is None:
norm = plt.Normalize(colors.min(), colors.max())
if ax is None:
# create axes and add a colorbar
fig, ax = plt.subplots()
add_colorbar_oneplot(ax, cmap, norm)
for i in range(E.shape[-1]):
y_points = E[:, i]
c = colors[:, i]
points = np.array([k, y_points]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = mpl.collections.LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(c)
ax.add_collection(lc)
ax.set_xlim(k.min(), k.max())
ax.set_ylim(E.min(), E.max())
return ax
def plot_totalspincurrent(process, S=1, J=1):
values = list(process.get_all())
current = [get_spincurrent_total(x[2], J=J, S=S) for x in tqdm(values)]
current = np.array(current)
current /= np.max(np.abs(current))
alpha = np.array([x[3]['alpha'] for x in values])
phi = np.array([x[3]['phi'] for x in values])
kvalues = np.array([x[0] for x in values])
kvalues *= 10**(-4) # convert to micrometer^-1
kabs = np.sqrt(np.sum(kvalues[0]**2, axis=1))
sort_idx = np.argsort(phi)
alpha = alpha[sort_idx]
phi = phi[sort_idx]
current = np.array(current)[sort_idx]
alpha = create_edges(alpha, upper_lim=90)
phi = create_edges(phi, upper_lim=90)
kabs = create_edges(kabs)
X, Y = np.meshgrid(kabs, phi)
plt.pcolormesh(X, Y, current, cmap='jet')
plt.colorbar()
plt.xlabel('|k|')
plt.ylabel('Internal Magnetization angle')
plt.show()
| UTF-8 | Python | false | false | 2,884 | py | 49 | plot.py | 35 | 0.57975 | 0.564494 | 0 | 91 | 30.692308 | 75 |
Janluke0/DSBDA-Project | 2,619,930,098,419 | 87322ee6fb1223bd4f7004ba9489ebf6652e2d12 | 1d4a027a06375372f9554613fd4d7c7ec570efa6 | /hadoop/join_works_hadoop/join_oscar_people/mapper_reducer/cast_crew_people/mymapper_join2.py | f45f94ba45182490e18150daa01a6f615ff592e1 | [] | no_license | https://github.com/Janluke0/DSBDA-Project | 09d748c51431a07d45b7d471f84d34cc9fb5aa6a | 6bce1dbb744cc28a24710155e4356c267bcccc46 | refs/heads/master | "2023-02-09T21:33:26.985716" | "2021-01-09T17:38:48" | "2021-01-09T17:38:48" | 292,589,236 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
import logging
import sys
import re
from pprint import pprint
log = logging.getLogger(__name__)
from pymongo_hadoop import BSONMapper
def mapper(documents):
try:
#print(documents, file=sys.stderr)
#f=open("doc_formatted.txt","a+")
for doc in documents:
doc_formatted = {"_id": ""}
keys = [key for key in doc.keys()]
keys.remove("_id")
collection_name = doc["coll_name"]
if collection_name == "people":
doc_formatted["_id"] = int(doc["_id"]["_id"])
elif collection_name == "cast" or collection_name == "crew" :
doc_formatted["_id"] = int(doc["person_id"])
for key in keys:
doc_formatted[key] = doc[key]
yield doc_formatted
#f.close()
except Exception as e:
print("End Mapper\n" + str(e), file=sys.stderr)
#log.exception(e)
BSONMapper(mapper)
| UTF-8 | Python | false | false | 971 | py | 47 | mymapper_join2.py | 25 | 0.549949 | 0.548919 | 0 | 35 | 26.742857 | 73 |
dangxiaobin123/leetcodeAC | 5,952,824,674,116 | 1136a9d9a5a557815377b9a1880bcd76270c07fd | 9f144992eb5f41f624d2327967ba1498b36a7d05 | /leetcode/0004. ๅฏปๆพไธคไธชๆๅบๆฐ็ป็ไธญไฝๆฐ/python/median-of-two-sorted-arrays.py | 206fcea607ec2b584b2b9f62a74357b966498bdd | [] | no_license | https://github.com/dangxiaobin123/leetcodeAC | 6c93729bd2cc7edd4ed80387688555fb4718af26 | 64d26abe124267abf08afaf28fcbcd16be4f6ca4 | refs/heads/master | "2021-01-03T08:31:14.224703" | "2020-10-28T16:08:36" | "2020-10-28T16:08:36" | 240,001,477 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
a, b = len(nums1), len(nums2)
l, r = (a+b+1)>>1, (a+b+2)>>1
return (self.findKthNum(nums1, nums2, l)+self.findKthNum(nums1, nums2, r))/2
def findKthNum(self, nums1, nums2, k):
if len(nums1) == 0:
return nums2[k-1]
if len(nums2) == 0:
return nums1[k-1]
if k == 1:
return min(nums1[0], nums2[0])
m = k>>1
l = nums1[m-1] if m-1<len(nums1) else sys.maxsize
r = nums2[m-1] if m-1<len(nums2) else sys.maxsize
return self.findKthNum(nums1[m:], nums2, k-m) if l<r else self.findKthNum(nums1, nums2[m:], k-m) | UTF-8 | Python | false | false | 727 | py | 76 | median-of-two-sorted-arrays.py | 59 | 0.539202 | 0.482806 | 0 | 21 | 33.619048 | 104 |
blandinw/wit_ros | 4,930,622,483,958 | d1d07f2c27c3900d5ccfc684a7b3ffcf40d3c611 | e9e4d588368b22e4b70f2279513c5ca18d3b820a | /src/wit_ros/wit_ros.py | 8de39eb745c89fddc06cb4acdffdb3e8b19b4b97 | [
"MIT"
] | permissive | https://github.com/blandinw/wit_ros | 5ecffa3f5d566eb3ae60a5f15a8de3216bed6cda | 8fe0b1d2132956f1f3c0114e815e6a3d1fadb959 | refs/heads/master | "2021-01-24T00:09:09.756688" | "2013-09-15T21:06:59" | "2013-09-15T21:06:59" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""ROS node for the Wit.ai API"""
import roslib
roslib.load_manifest('wit_ros')
global APIKEY
APIKEY = None
import rospy
import requests
from wit_ros.srv import Interpret, InterpretResponse
from wit_ros.msg import Outcome, Entity
def interpret(rosrequest):
rospy.logdebug("Interpreting {0}".format(rosrequest.sentence))
httpresponse = requests.get('https://api.wit.ai/message?q={sentence}'.format(sentence=rosrequest.sentence),
headers={"Authorization":"Bearer {key}".format(key=APIKEY)})
data = httpresponse.json()
rospy.logdebug("Data: {0}".format(data))
entities = []
for name, json in data["outcome"]["entities"].iteritems():
entity = Entity(name = str(name),
body = str(json["body"]),
start = int(json["start"]),
end = int(json["end"]),
value = str(json["value"]))
entities += [entity]
outcome = Outcome( confidence = float(data["outcome"]["confidence"]),
entities = entities,
intent = str(data["outcome"]["intent"]))
response = InterpretResponse( msg_body = str(data["msg_body"]),
msg_id = str(data["msg_id"]),
outcome = outcome)
return response
if __name__ == "__main__":
rospy.init_node("wit_ros")
if rospy.has_param('~api_key'):
APIKEY = rospy.get_param("~api_key")
rospy.Service('wit/interpret', Interpret, interpret)
rospy.spin()
else:
rospy.logerr("No API key set (via parameter server). Please set one. " +
"API keys can be obtained via the http://www.wit.ai") | UTF-8 | Python | false | false | 1,804 | py | 3 | wit_ros.py | 2 | 0.551552 | 0.550443 | 0 | 53 | 33.056604 | 112 |
huilongan/Python-for-data-processing | 16,277,926,088,796 | d1a86ac7f6bb675749ca0fdd2d1380c4f9ca7805 | ea7043301d765717e3a09df68ccc160a356933eb | /emailsent2_2.py | 19f19cac935b47dbc520c7156e89683310a8b370 | [] | no_license | https://github.com/huilongan/Python-for-data-processing | b31ed0aea62ba2ea9865ef3e2441f5b664185707 | cacb187632d84718cda47c3551a0779a6af55f04 | refs/heads/master | "2020-06-22T07:20:31.442702" | "2018-02-13T23:22:49" | "2018-02-13T23:22:49" | 74,599,467 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 10 10:58:50 2016
@author: Andy
"""
from email.message import EmailMessage
from email.headerregistry import Address
from email.utils import make_msgid
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
from smtplib import SMTPException
from email.headerregistry import Address
import argparse
import smtplib
import urllib.request
import bs4
import re
import csv
# get the message
def get_info(url):
html = urllib.request.urlopen(url)
bsobj = bs4.BeautifulSoup(html,'xml')
body = bsobj.find('div',{'id':'body'})
name = bsobj.find('div',{'id':'name'})
sign = bsobj.find('div',{'id':'sign'})
return body,name,sign
# for 29 categories
#divs = bsobj.findAll('div',id = re.compile('Category [0-9]+'))
#
#names = []
#bodys = []
## bodys
#for i in divs:
# body = i.find('div',{'id':'message'})
# name = i.find('p',{'id':'name'})
# bodys.append(body)
# names.append(name)
# Create the base text message.
#fussellolivia@yahoo.com
#: ofussell@cincs.com
'''
Get the result
'''
def get_names(address):
result = []
with open(address,'r',encoding='ISO-8859-1') as f:
fieldnames = ['ind','full_name','F_name','L_name','Email']
csvreader = csv.DictReader(f,fieldnames = fieldnames)
for reader in csvreader:
full_name = reader['full_name']
F_name = reader['F_name']
L_name = reader['L_name']
Emaila = reader['Email']
result.append((full_name,F_name,L_name,Emaila))
f.close()
result.pop(0)
return result
def send_message(Toname,Toadd,Todomain,Tofirst,emailAd,emailPs):
msg = EmailMessage()
msg['Subject'] = "How can we protect our Properties and Communities from Climate Risk?"
msg['From'] = Address("Olivia Fussell", "ofussell", "cincs.com")
msg['To'] = Address(Toname, Toadd, Todomain)
# Add the html version. This converts the message into a multipart/alternative
# container, with the original text message as the first part and the new html
# message as the second part.
asparagus_cid = make_msgid()
msg.add_alternative("""<p> Dear {name},</p><br>{content}<br>{signiture}
""".format(name=Tofirst,content = body,signiture = sign), subtype='html')
# note that we needed to peel the <> off the msgid for use in the html.
# Send the message via local SMTP server.
try:
server = smtplib.SMTP("smtp.mail.yahoo.com",587)
server.ehlo()
server.starttls()
server.login(emailAd,emailPs)
server.send_message(msg)
base = '/users/andy/desktop/api/records/'
nameForMSG = 'To_'+ Toname+'.msg'
with open(base + nameForMSG, 'wb') as f:
f.write(bytes(msg))
f.close()
print('Successed to send email to '+Toname)
except Exception:
print('failed to send email to '+Toname)
def main():
print('{}'.format('********************************'))
print('{}'.format('Auto email-send machine'))
print('{}'.format('By Andy An'))
print('{}'.format('********************************'))
urlEmail = str(input('Where is your email HTML? for example:\'file:///Users/Andy/Desktop/demo_files/email.html\' '))
body,name,sign = get_info(urlEmail)
emailAd = str(input('eamilAddress: '))
emailPs = str(input('emailPassword: '))
address = str(input('Your email list? for example: \'/users/andy/desktop/api/emailS.csv\' '))
lists = get_names(address)
for i in lists:
if '@' in i[3]:
Toadd,Todomain = i[3].split('@')
send_message(i[0],Toadd,Todomain,i[1],emailAd,emailPs)
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,801 | py | 8 | emailsent2_2.py | 4 | 0.623257 | 0.614838 | 0 | 114 | 32.342105 | 120 |
idancie/PersonalCoach | 12,567,074,321,900 | 3cb0a5aa997c790dc3d68488a3b417a74b339324 | 72effb6b9b3252ed348291c56e9050f4af5e0e83 | /personal_coach/trainings/models/exercise.py | 195cc6af32333f7f33220fed556d7053aff28ae4 | [] | no_license | https://github.com/idancie/PersonalCoach | 301831a20d962293afc088326aebd5e530a4982a | a6ed5c9f9eda97ea6158feb83a5510b9e1ec7660 | refs/heads/master | "2020-05-18T01:26:44.602486" | "2014-03-02T19:47:05" | "2014-03-02T19:47:05" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
from datetime import datetime
from pytz import utc
class Exercise(models.Model):
'''Excercise that someone has performed
'''
plan = models.ForeignKey('trainings.ExercisePlan',
null=True,
blank=False,
)
type = models.ForeignKey('trainings.TrainingType',
null=False,
blank=False,
on_delete=models.PROTECT,
)
times = models.FloatField(
null=False,
blank=False,
default=.0,
)
breaks = models.IntegerField(
null=False,
blank=False,
default=0,
)
class Meta:
app_label = 'trainings'
def __unicode__(self):
return u'{} from {} performed with {}/{}'.format(
self.type.name,
self.plan,
self.times,
self.breaks,
)
def add_approach(self, times):
total = self.breaks * self.times + times
self.breaks += 1
self.times = total / self.breaks
self.save()
from django.contrib import admin
admin.site.register(Exercise)
| UTF-8 | Python | false | false | 1,180 | py | 14 | exercise.py | 11 | 0.524576 | 0.522034 | 0 | 48 | 23.583333 | 57 |
gkoren/TempMonitor | 1,949,915,154,938 | 879762c7da7db38564d16a15bff144a3ad115ccb | 1e7b7a336dbb37a8624e7aca7553530d1ac9f237 | /python/plots_area.py | 762eaaf7830cc64ea0eee259dcfee214c397887a | [] | no_license | https://github.com/gkoren/TempMonitor | d8d8160fff193b78d922794f4b4e09953244134e | 155ac66d5b3e43fbcd0e96407d10f1923cc4949b | refs/heads/master | "2021-07-06T21:06:58.094487" | "2021-05-27T10:33:05" | "2021-05-27T10:33:05" | 100,924,134 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
#from PyQt4.uic import loadUiType
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from PlotsWindow import Ui_PlotsWindow
from matplotlib.lines import Line2D
#import matplotlib.pyplot as plt
import numpy as np
#Ui_MainWindow, QMainWindow = loadUiType('plots_area.ui')
class plots_window(QMainWindow, Ui_PlotsWindow):
def __init__(self, parent=None):
super(plots_window, self).__init__(parent)
self.setupUi(self)
self.canvas = LiveCanvas()
self.plots_Layout.addWidget(self.canvas)
self.canvas.draw()
self.toolbar = NavigationToolbar(self.canvas, self.plots_container, coordinates=True)
#self.plots_Layout.addWidget(self.canvas)
self.plots_Layout.addWidget(self.toolbar)
self.on_show = False
self.plots_dict = {}
self.current_plot = 0 #Default is all sensors
self.plots_list.itemClicked.connect(self.change_figure)
#self.plots_list.addItem(name)
#plot.plot([],[]])
#plot.plot(self.data[0][-20:],self.data[1][-20:])
#self.plot.set_title("Temp1")
def add_list_entry(self,index,name):
self.plots_list.addItem(name)
self.plots_dict[name] = index
#Temp for controlling the figure"
def clear_plots_list(self):
self.plots_list.clear()
def change_figure(self,item):
text = item.text()
self.current_plot = self.plots_dict[str(text)]
self.canvas.current_plot = self.plots_dict[str(text)]
#self.canvas.update_canvas(xdata,ydata)
class LiveCanvas(FigureCanvas):
def __init__(self):
self.fig = Figure()
self.plot = self.fig.add_subplot(111)
self.xdata = []
self.ydata = []
self.current_plot = 1
FigureCanvas.__init__(self,self.fig)
def update_canvas(self,xdata,ydata):
self.plot.clear()
self.plot.set_xlabel("Time [s]")
self.plot.set_ylabel("Temperature [C]")
if self.current_plot == 0 and isinstace(ydata[0],list):
self.plot.set_title("All Sensors")
all_data = []
colors = ["blue", "red","black", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "purple", "silver", "teal", "yellow"]
self.plot.grid(True)
for i,iList in enumerate(ydata):
self.plot.plot(xdata,iList,label="Sensor "+str(i+1),color=colors[i])
else:
self.plot.set_title("Sensor "+str(self.current_plot))
self.plot.set_autoscaley_on(True)
self.plot.plot(xdata,ydata)
self.plot.grid(True)
self.plot.set_ylim(min(ydata)*0.95,max(ydata)*1.05)
self.draw()
def draw_all_sensors(self,xdata,all_ydata):
self.plot.clear()
self.plot.set_xlabel("Time [s]")
self.plot.set_ylabel("Temperature [C]")
self.plot.set_title("All Sensors")
#self.plot.plot(xdata,ydata)
self.plot.grid(True)
all_temps = []
#max_val = 0
#min_val = 1000
colors = ["blue", "red","black", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "purple", "silver", "teal", "yellow"]
for i,iList in enumerate(all_ydata):
all_temps.append(max(iList))
all_temps.append(min(iList))
self.plot.plot(xdata[-20:],iList[-20:],label="Sensor "+str(i+1),color=colors[i])
self.plot.set_ylim(min(all_temps)*0.95,max(all_temps)*1.05)
legend = self.plot.legend()
self.draw()
#self.plot.set_ylim(min(ydata)*0.95,max(ydata)*1.05)
#self.show()
#plt.figure()
#self.canvas = FigureCanvas(self.figure)
| UTF-8 | Python | false | false | 3,884 | py | 16 | plots_area.py | 11 | 0.611998 | 0.599125 | 0 | 107 | 35.299065 | 145 |
shaily99/Fake-News-Detection | 19,439,022,008,504 | b6afdb7fbdb95d7d812898bfd5f85a1b7f21e15e | d82f7439c2fbb78cbc5bcf6a34068029ffc654b9 | /make_natlang_csv.py | fb60b48b60aa54cbbee676eace1b6d293267a551 | [] | no_license | https://github.com/shaily99/Fake-News-Detection | f29f25f54f6c69bc29fa326857a732d012b44bdc | f770b81dfb50f5543c98ce30770bfc7841632a1c | refs/heads/master | "2020-05-31T22:30:23.586125" | "2019-06-06T06:13:20" | "2019-06-06T06:13:20" | 190,521,611 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pprint import pprint
import os
import json
import csv
name_ = 'keyw_celeb_fake.csv'
f = open(name_, 'w+')
outWrite = csv.writer(f)
path_ = './natlang_responses/celebrityDataset/fake'
for file_ in os.listdir(path_):
with open(os.path.join(path_, file_), 'r+') as f:
res = json.load(f)
row10 = [file_, res['categories'][0]['label'], res['categories'][0]['score']]
for concept in res['concepts']:
row10.append(concept['text'])
row10.append(concept['relevance'])
if len(row10) != 12:
print('LACKS CONCEPT')
else:
print(row10)
outWrite.writerow(row10)
f.close()
| UTF-8 | Python | false | false | 634 | py | 12 | make_natlang_csv.py | 11 | 0.615142 | 0.589905 | 0 | 29 | 20.862069 | 81 |
Eomys/pyleecan | 5,480,378,297,105 | bf9203f049a9f157fe104c92daf33111a4d26247 | 3e9e7a0fc3c3cb4af50360be4f1f917f94dce3e2 | /pyleecan/Methods/Machine/LamHoleNS/comp_periodicity_geo.py | 640df34f6b87f569abc3c665b607e9efdca83af2 | [
"Apache-2.0"
] | permissive | https://github.com/Eomys/pyleecan | ba3bc98cddc2a1d014fd3a95e84d671f5ea3dcd7 | 2140c1532632b67c1a3a10441de52aa0713d562b | refs/heads/master | "2023-08-17T10:53:50.867613" | "2023-06-22T15:18:31" | "2023-06-22T15:18:31" | 110,973,589 | 144 | 111 | Apache-2.0 | false | "2023-09-14T15:25:34" | "2017-11-16T13:17:16" | "2023-08-31T12:26:58" | "2023-09-14T15:25:33" | 348,050 | 130 | 111 | 92 | Jupyter Notebook | false | false | def comp_periodicity_geo(self):
"""Compute the geometric periodicity factor of the lamination
Parameters
----------
self : LamHoleNS
A LamHoleNS object
Returns
-------
per_a : int
Number of spatial periodicities of the lamination
is_antiper_a : bool
True if an spatial anti-periodicity is possible after the periodicities
"""
return self.comp_periodicity_spatial()
| UTF-8 | Python | false | false | 432 | py | 2,074 | comp_periodicity_geo.py | 1,750 | 0.645833 | 0.645833 | 0 | 17 | 24.411765 | 79 |
BalterNotz/huntjob | 326,417,558,618 | f4494a98b0f0dc37c67be35aa18ca475337bc71b | f83df26ec704ec5091ac8665b84869e768827c84 | /main.py | f61627b690a2d46b7968f5e0a5b959f77c3ccef9 | [] | no_license | https://github.com/BalterNotz/huntjob | 31d1c50f55b1b922510e94e798bc7e8445975574 | 5c4816a39b8e0f1c79dfcf0ec4b51c8bd064b279 | refs/heads/master | "2021-04-30T07:29:04.031471" | "2018-03-31T13:47:38" | "2018-03-31T13:47:38" | 121,397,913 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# Python3
'''
'''
import time
from selenium import webdriver
liepinurl = "http://www.liepin.com/"
liepinzhaopinurl = "https://www.liepin.com/zhaopin"
try:
fireFoxOptions = webdriver.FirefoxOptions()
# fireFoxOptions.set_headless()
driver = webdriver.Firefox(firefox_options=fireFoxOptions)
# webdriver.FirefoxOptions
driver.get(liepinzhaopinurl)
select_city = driver.find_element_by_xpath("/html/body/div[1]/form/div[1]/div/div/ul/li[1]/span/em")
select_city.click()
shenzhen_city = driver.find_element_by_xpath("/html/body/div[9]/div[2]/div[2]/div/div[3]/div/div[1]/ul/li[4]/a")
shenzhen_city.click()
confirm_shenzhen = driver.find_element_by_xpath("//div[@class = 'vd-footer vd-footer-ltr']/a[@data-name = 'ok' and text() = '็กฎๅฎ']")
confirm_shenzhen.click()
seach = driver.find_element_by_name("key")
print(str(seach))
seach.send_keys("Python")
seach.submit()
# button = driver.find_element_by_class_name("search-btn float-right")
# print(str(button))
input("Press any key to continue...")
except Exception as e:
print(e)
finally:
try:
driver.close()
except:
pass
| UTF-8 | Python | false | false | 1,197 | py | 7 | main.py | 7 | 0.665549 | 0.657167 | 0 | 40 | 28.825 | 135 |
pars-linux/pardus-1.0 | 19,009,525,275,546 | c3a5d497d5aa23155ea90421d7b32e349ecdd37c | 884112003b7bd1e200325bba3e3ac9a2f9d3ea8b | /programming/languages/blackdown-jre/actions.py | b005fa370c331536e67332bd6d308a74cdf22936 | [] | no_license | https://github.com/pars-linux/pardus-1.0 | e1a4049c17ac9f2fbc2ae50c61d81c15e03e5823 | 4d2196b7558b3870908e799e02568ee9a6eee419 | refs/heads/master | "2021-01-24T19:12:02.809085" | "2006-06-14T07:07:39" | "2006-06-14T07:07:39" | 82,460,626 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
#
# S.รaฤlar Onur <caglar@pardus.org.tr>
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
WorkDir = "."
NoStrip = "/"
def setup():
# Magic
shelltools.system("agreed=1 sh j2re-1.4.2-03-linux-i586.bin")
# go into work directory
shelltools.cd("j2re1.4.2")
shelltools.chmod("lib/unpack")
PACKED_JARS=("lib/tools.jar", "lib/rt.jar", "lib/jsse.jar", \
"lib/charsets.jar", "lib/ext/localedata.jar", "lib/plugin.jar", \
"lib/javaws.jar")
# Unpack all packed jars and remove pack files...
for jar in PACKED_JARS:
PACK_FILE = jar.replace("jar", "") + "pack"
if shelltools.can_access_file(PACK_FILE):
shelltools.system("lib/unpack %s %s" % (PACK_FILE, jar))
shelltools.unlink(PACK_FILE)
shelltools.unlink("lib/unpack")
def install():
shelltools.cd("j2re1.4.2")
pisitools.dodir("/opt/blackdown-jre")
INSTALL_DIRS = ("bin", "plugin", "lib", "man")
for dir in INSTALL_DIRS:
shelltools.copytree(dir, "%s/opt/blackdown-jre/%s" % (get.installDIR(), dir))
pisitools.dodoc("COPYRIGHT", "LICENSE", "README.html")
# Install mozilla plugin
pisitools.dodir("/usr/lib/nsbrowser/plugins")
pisitools.dosym("/opt/blackdown-jre/plugin/i386/mozilla/libjavaplugin_oji.so", "/usr/lib/nsbrowser/plugins/javaplugin.so")
pisitools.dosed("%s/opt/blackdown-jre/lib/font.properties" % get.installDIR(), "standard symbols l", "symbol")
| UTF-8 | Python | false | false | 1,715 | py | 1,370 | actions.py | 319 | 0.652072 | 0.636311 | 0 | 53 | 31.320755 | 126 |
fluffynuts/scripts | 3,418,794,002,780 | cc7626bc02a1308c9f7b5a2ca08f46f417b23cf4 | 6e46ad114b6a542b3fede3e511dbe2ffc8d34176 | /DropboxPushbulletPusher.py | 52c753b2c0450475a7164dff8be2a30bc25a2205 | [
"BSD-2-Clause"
] | permissive | https://github.com/fluffynuts/scripts | dffa978fd1ff360fe5cf630407f2eb5805fcf02b | 8fc9493ac6c3b43f885742b504385bb74c8b7374 | refs/heads/master | "2023-09-04T11:02:17.356787" | "2023-08-23T14:36:40" | "2023-08-23T14:38:28" | 139,478,534 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
import sys
import os
from pushbullet import Pushbullet
import ConfigParser
import dropbox
class Pusher:
def __init__(self):
self._loadConfig()
def _loadConfig(self):
home = os.path.expanduser('~')
self._secretsFile = os.path.join(home, 'secrets.ini')
self._loadConfigFile()
def _loadConfigFile(self):
cfg = ConfigParser.ConfigParser()
cfg.read(self._secretsFile)
self._initDropbox(cfg)
self._initPushbullet(cfg)
def _initPushbullet(self, cfg):
section = 'pushbullet'
self._pbToken = cfg.get(section, 'token')
self._pbDevice = int(cfg.get(section, 'device'))
def _initDropbox(self, cfg):
section = 'dropbox'
self._key = cfg.get(section, 'key')
self._secret = cfg.get(section, 'secret')
if cfg.has_option(section, 'token'):
self._token = cfg.get(section, 'token')
else:
self._token = self._authorize()
self._setPersistentToken(cfg, section, self._token)
def _setPersistentToken(self, cfg, section, token):
cfg.set(section, 'token', token)
fp = open(self._secretsFile, 'w')
fp.truncate()
cfg.write(fp)
def _authorize(self):
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(self._key, self._secret)
authorize_url = flow.start()
print '1. Go to: ' + authorize_url
print '2. Click "Allow" (you might have to log in first)'
print '3. Copy the authorization code.'
auth_token = raw_input("Enter the authorization code here: ").strip()
access_token, user_id = flow.finish(auth_token)
print(access_token)
print(user_id)
return access_token
def push(self, path):
url = self._pushToDropbox(path)
self._pushToPushbullet(url)
def _pushToDropbox(self, path):
client = dropbox.client.DropboxClient(self._token)
print('uploading %s...' % (path))
uploadName = os.path.basename(path)
with open(path, 'r') as fp:
response = client.put_file(uploadName, fp, overwrite=True)
print('upload complete!')
m = client.metadata(uploadName)
return client.share(m['path'])['url']
def _pushToPushbullet(self, url):
pb = Pushbullet(self._pbToken)
device = pb.devices[self._pbDevice]
device.push_note('New ROM!', url)
if __name__ == '__main__':
pusher = Pusher()
for arg in sys.argv[1:]:
print('%s:%s' % (arg, pusher.push(arg)))
| UTF-8 | Python | false | false | 2,394 | py | 238 | DropboxPushbulletPusher.py | 221 | 0.639933 | 0.637845 | 0 | 82 | 28.195122 | 78 |
jkpubsrc/python-module-jk-jsoncfghelper2 | 1,159,641,205,896 | 3d4708362a9a7d51e06c5b2dc5f17b7d9f545e2f | 004646388f3a40d4e6ee306730dad104b20a237b | /testing/test-compile-from-jdef.py | fc0c540e49c07b293e8716cb74114d0be5fff93f | [
"Apache-2.0"
] | permissive | https://github.com/jkpubsrc/python-module-jk-jsoncfghelper2 | 326e68391aac92f5782b5b54388580d3030285de | 2c584c946db5d290525ff49f5c809cec14616247 | refs/heads/master | "2022-09-28T20:15:32.674328" | "2020-01-20T18:33:11" | "2020-01-20T18:33:11" | 235,169,634 | 0 | 1 | null | false | "2022-09-03T13:57:00" | "2020-01-20T18:29:07" | "2020-01-20T18:34:17" | "2020-01-20T18:34:14" | 175 | 0 | 1 | 1 | Python | false | false | #!/usr/bin/python3
from jk_jsoncfghelper2 import *
MAIN_DEF = JDefStructure("authData", structure=[
JDef("authMethod", dataType="str", required=True),
JDef("authLogin", dataType="str", required=True),
JDef("authToken", dataType="str", required=True),
JDef("info", dataType=JDefStructure("authDataClientInfo", structure=[
JDef("uid", dataType="int", required=True),
JDef("uname", dataType="str", required=True),
JDef("gid", dataType="int", required=True),
JDef("gname", dataType="str", required=True),
JDef("pid", dataType="int", required=True),
JDef("subsystem", dataType="str", required=True),
JDef("version", dataType="str", required=True),
JDef("commands", dataType="list", elementTypes=[
JDefStructure("cmdDef", structure=[
JDef("description", dataType="str", required=True),
JDef("cmd", dataType="str", required=True),
JDef("msgType", dataType="str", required=True),
JDef("dataResponses", dataType="list", required=False),
JDef("errorResponses", dataType="list", required=False),
JDef("errID", dataType="str", required=False),
JDef("forwardSubsystem", dataType="str", required=False),
JDef("isForward", dataType="bool", required=False),
])
], required=True),
]), required=True)
])
scmgr = compileFromDefs(MAIN_DEF)
print("Elements:")
for name in scmgr:
print("\t" + name)
print()
checker = scmgr.get("authData")
print("Errors:")
n = 0
for path, message in checker.check({
"authMethod": "plaintext",
"authLogin": "somebody",
"authToken": "abc123ghj789",
"info": {
"uid": 0,
"uname": "root",
"gid": 0,
"gname": "root",
"pid": 12345,
"subsystem": "abc",
"version": "0.2019.11.1",
"commands": [
]
}
}):
print("\t" + path + ": " + message)
n += 1
if n == 0:
print("\t(no errors)")
| UTF-8 | Python | false | false | 1,789 | py | 77 | test-compile-from-jdef.py | 8 | 0.645612 | 0.631079 | 0 | 72 | 23.791667 | 70 |
foothillsgeek/imgFTPur | 6,167,573,058,842 | f3ee5607e8725834d12d186beebcf9cc2633edd1 | 7cf8bcdf866c28022b088faa32490766dcd364f7 | /imgFTPur.py | acc6c4510527d1ba0a61d44e85cc167e375372ba | [] | no_license | https://github.com/foothillsgeek/imgFTPur | c9d29c595e6112b3c027cd459c8a951701170728 | 8165f7014beca187ec737e964b39ddd55274c416 | refs/heads/master | "2021-01-19T00:46:13.282548" | "2015-06-17T01:55:37" | "2015-06-17T01:55:37" | 37,226,198 | 2 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #/usr/bin/python env
import argparse
import ConfigParser
import sys
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--list_config', action="store_true")
return parser.parse_args()
def list_config(config):
for section in config.sections():
print "[" + section + "]"
for option in config.options(section):
print option + ": " + config.get(section, option)
def get_config(conf_file):
config = ConfigParser.ConfigParser()
config.read(conf_file)
if len(config.sections()) == 0:
sys.exit("Configuration file missing or not readable")
else:
return config
def ftpd(config):
authorizer = DummyAuthorizer()
authorizer.add_user(
str(config.get('FTPSettings', 'username')),
config.get('FTPSettings', 'password'),
config.get('FTPSettings', 'incomingfolder'),
perm="elradfmw")
handler = FTPHandler
handler.authorizer = authorizer
server = FTPServer((config.get('FTPSettings', 'ipaddress'), config.get('FTPSettings', 'port')), handler)
server.serve_forever()
config_file = 'imgFTPur.conf'
config = get_config(config_file)
args = arg_parser()
if args.list_config:
list_config(config)
sys.exit(1)
ftpd(config)
| UTF-8 | Python | false | false | 1,404 | py | 2 | imgFTPur.py | 1 | 0.678063 | 0.676638 | 0 | 51 | 26.529412 | 108 |
azmweb/tyty | 3,573,412,824,931 | 64a16c9ffbf9f15ceb5cd322713cb79c4f96b318 | 77e55c43ccfd96402ce9d10595a2ee563e7d6f96 | /tyty/migrations/0003_auto_20190708_1755.py | a79cea58071bf2b2f142a8aaf66df345a85cc017 | [] | no_license | https://github.com/azmweb/tyty | 7677d40ae9498491490dbfd9a11a484e49e1c8e9 | 2d1ec308a0d8e878938bd92ee8408da5f9b9fae3 | refs/heads/master | "2020-06-21T15:44:41.163899" | "2019-07-18T02:54:14" | "2019-07-18T02:54:14" | 197,493,870 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.2 on 2019-07-08 08:55
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tyty', '0002_auto_20190705_1044'),
]
operations = [
migrations.AddField(
model_name='rank',
name='pt',
field=models.IntegerField(null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(100)]),
),
migrations.AlterField(
model_name='rank',
name='cs',
field=models.IntegerField(null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(50)]),
),
migrations.AlterField(
model_name='rank',
name='ht',
field=models.IntegerField(null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(50)]),
),
migrations.AlterField(
model_name='rank',
name='jq',
field=models.IntegerField(null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(50)]),
),
migrations.AlterField(
model_name='rank',
name='js',
field=models.IntegerField(null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(50)]),
),
migrations.AlterField(
model_name='rank',
name='name',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='rank',
name='sex',
field=models.IntegerField(null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)]),
),
]
| UTF-8 | Python | false | false | 1,969 | py | 34 | 0003_auto_20190708_1755.py | 22 | 0.606907 | 0.580498 | 0 | 49 | 38.183673 | 154 |
rhanmar/python_explore | 4,604,204,958,911 | a19e7ecac9dbb2b74046be4fe3d6d5e49220ad1e | f0dec8189259791f4ccca11b9b372989a25d0b24 | /tasks_basics/tuple_rotation.py | 07d003b400609ae1edfbbc3f1873d7c68c1c4101 | [] | no_license | https://github.com/rhanmar/python_explore | ff536ecfd0b7f7691c0c9612c3395184fc30c081 | 92ce62118180dcb6a44c964e27d2772bbb915165 | refs/heads/master | "2020-11-28T22:58:51.994509" | "2020-01-06T16:12:54" | "2020-01-06T16:12:54" | 229,943,175 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def rotate_left(triple):
(a, b, c) = triple
return (b, c, a)
def rotate_right(triple):
(a, b, c) = triple
return (c, a, b)
triple = ('A', 'B', 'C')
print rotate_left(triple) # ('B', 'C', 'A')
print rotate_right(triple) # ('C', 'A', 'B') | UTF-8 | Python | false | false | 256 | py | 44 | tuple_rotation.py | 44 | 0.519531 | 0.519531 | 0 | 13 | 18.769231 | 44 |
Afra-intellisoft/Musa-Trading | 3,719,441,711,728 | 21398a37ad3d6c0f9079d2b3f1fc3c00a23cdd3a | b8760f70f2fa9a7714b890eb5b8794ef1ddd2d98 | /check_followups/report/all_check_report.py | bf96413d2b72f1cc8920e2cceb9fc22c85803df1 | [] | no_license | https://github.com/Afra-intellisoft/Musa-Trading | f87b98fed4478b040a744a9a86669f53611916bf | e07b0cbc1283d790db7c9d1c226d37ba81bae10d | refs/heads/master | "2022-10-15T10:52:55.415361" | "2020-06-13T15:21:06" | "2020-06-13T15:21:06" | 272,035,372 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from odoo import api, models, fields
#
# from openerp import api, models, fields
# from openerp.tools import float_round
# from openerp.exceptions import UserError, Warning
class all_check_report(models.AbstractModel):
_name = 'report.check_followups.all_check_report'
def get_check(self):
res = []
check_list = []
num_line =0
total_employee=0
records = False
active_ids = self._context.get('active_ids')
checks = self.env['all.check.report.wizard'].browse(active_ids)
for check in checks:
date_from=check.date_from
date_to=check.date_to
type=check.type
if type == 'check_recivce':
check_ids =self.env['check_followups.check_followups'].search(
[('Date', '>=',date_from), ('Date', '<=', date_to),
('state', 'in', ('withdrawal','donec')),
])
if check_ids:
records = self.env['check_followups.check_followups'].browse(check_ids)
for che in records:
num_line+=1
check_list.append({
'num_line': num_line,
'account_holder': che.id.account_holder.name,
'check_no': che.id.check_no,
'date': che.id.Date,
'amount': che.id.amount,
'notes': che.id.notes,
'communication': che.id.communication,
})
total_employee=num_line
return check_list
def get_check_return(self):
res = []
check_list_return = []
num_line =0
total_employee=0
records = False
active_ids = self._context.get('active_ids')
return_checks = self.env['all.check.report.wizard'].browse(active_ids)
for checks in return_checks:
date_from=checks.date_from
date_to=checks.date_to
type=checks.type
if type == 'check_return':
check_return_ids =self.env['check_followups.check_followups'].search(
[('Date', '>=',date_from), ('Date', '<=', date_to),
('state', '=', 'rdc'),
])
if check_return_ids:
records = self.env['check_followups.check_followups'].browse(check_return_ids)
for ches in records:
num_line+=1
check_list_return.append({
'num_line': num_line,
'account_holder': ches.id.account_holder.name,
'check_no': ches.id.check_no,
'date': ches.id.Date,
'amount': ches.id.amount,
'notes': ches.id.notes,
'communication': ches.id.communication,
})
total_employee=num_line
return check_list_return
def get_check_waiting(self):
res = []
check_list_waiting = []
num_line =0
total_employee=0
records = False
active_ids = self._context.get('active_ids')
return_checks = self.env['all.check.report.wizard'].browse(active_ids)
for checks in return_checks:
date_from=checks.date_from
date_to=checks.date_to
type=checks.type
if type == 'check_waiting':
check_waiting_ids =self.env['check_followups.check_followups'].search(
[('Date', '>=',date_from), ('Date', '<=', date_to),
('state', '=', 'wait_bank'),
])
if check_waiting_ids:
records = self.env['check_followups.check_followups'].browse(check_waiting_ids)
for wait in records:
num_line+=1
check_list_waiting.append({
'num_line': num_line,
'account_holder': wait.id.account_holder.name,
'check_no': wait.id.check_no,
'date': wait.id.Date,
'amount': wait.id.amount,
'notes': wait.id.notes,
'communication': wait.id.communication,
})
total_employee=num_line
return check_list_waiting
def get_check_reject(self):
res = []
check_list_reject = []
num_line = 0
total_employee = 0
records = False
active_ids = self._context.get('active_ids')
return_checks = self.env['all.check.report.wizard'].browse(active_ids)
for checks in return_checks:
date_from = checks.date_from
date_to = checks.date_to
type = checks.type
if type == 'check_reject':
check_reject_ids = self.env['check_followups.check_followups'].search(
[('Date', '>=', date_from), ('Date', '<=', date_to),
('state', '=', 'rdc'),
])
if check_reject_ids:
records = self.env['check_followups.check_followups'].browse(check_reject_ids)
for reject in records:
num_line += 1
check_list_reject.append({
'num_line': num_line,
'account_holder': reject.id.account_holder.name,
'check_no': reject.id.check_no,
'date': reject.id.Date,
'amount': reject.id.amount,
'notes': reject.id.notes,
'communication': reject.id.communication,
})
total_employee = num_line
return check_list_reject
def get_check_collection(self):
res = []
check_list_collection = []
num_line = 0
total_employee = 0
records = False
active_ids = self._context.get('active_ids')
return_checks = self.env['all.check.report.wizard'].browse(active_ids)
for checks in return_checks:
date_from = checks.date_from
date_to = checks.date_to
type = checks.type
print("lllllll")
if type == 'check_collection':
check_collection_ids = self.env['check_followups.check_followups'].search(
[('Date', '>=', date_from), ('Date', '<=', date_to),
('state', '=', 'under_collection'),
])
print(check_collection_ids,';;')
if check_collection_ids:
records = self.env['check_followups.check_followups'].browse(check_collection_ids)
for collect in records:
num_line += 1
check_list_collection.append({
'num_line': num_line,
'account_holder': collect.id.account_holder.name,
'check_no': collect.id.check_no,
'date': collect.id.Date,
'amount': collect.id.amount,
'notes': collect.id.notes,
'communication': collect.id.communication,
})
total_employee = num_line
return check_list_collection
@api.model
def render_html(self, docids, data):
self.model = self.env.context.get('active_model')
docargs = {
'doc_ids': self.ids,
'doc_model': self,
'docs': self,
'data': data['form'],
'type': data['form'][0]['type'],
'date_from': data['form'][0]['date_from'],
'date_to': data['form'][0]['date_to'], }
if data['form'][0]['type']:
if data['form'][0]['type'] == 'check_recivce':
docargs['check_ids'] = self.get_check()
elif data['form'][0]['type'] == 'check_return':
docargs['check_return_ids'] = self.get_check_return()
elif data['form'][0]['type'] == 'check_waiting':
docargs['check_waiting_ids'] = self.get_check_waiting()
elif data['form'][0]['type'] == 'check_reject':
docargs['check_reject_ids'] = self.get_check_reject()
elif data['form'][0]['type'] == 'check_collection':
docargs['check_collection_ids'] = self.get_check_collection()
return self.env['report'].render('check_followups.all_check_report', docargs)
| UTF-8 | Python | false | false | 8,742 | py | 238 | all_check_report.py | 101 | 0.475635 | 0.472775 | 0 | 221 | 38.556561 | 98 |
Soumithri/coding_problems | 2,637,109,927,478 | b1b1d475c4a4c6b624616d63ab7aae35bf74e849 | e470d8d230d96c1c2bba2b3b6ace84e640dbb2e5 | /src/hashes/answers/two_sum.py | 7ac0d98a5494c2a3afd5c9d4f1a42f2a615de0e1 | [
"MIT"
] | permissive | https://github.com/Soumithri/coding_problems | a4877b83ddbb4ef7e6f94d820b14c2ab41853b81 | b10820d81677ef0edc9a5f2b310720d8e1df6c76 | refs/heads/master | "2023-04-13T18:06:26.457593" | "2020-08-10T07:04:37" | "2020-08-10T07:04:37" | 284,794,694 | 0 | 0 | MIT | false | "2021-04-20T20:46:26" | "2020-08-03T19:58:52" | "2020-08-10T07:04:41" | "2021-04-20T20:46:26" | 68 | 0 | 0 | 1 | Python | false | false |
def twoSum(nums, target):
nums_set = set()
for i in nums:
diff = target - i
if diff in nums_set:
return True
else:
nums_set.add(i)
return False
| UTF-8 | Python | false | false | 205 | py | 34 | two_sum.py | 25 | 0.492683 | 0.492683 | 0 | 9 | 21.666667 | 28 |
neekin/djangoapp-rest | 1,752,346,697,724 | 9d1877919800760cd9fd9bd2652f4bda588ff548 | 28c9b4437817e4f849b4a1098855776278fa184a | /apps/details/migrations/0001_initial.py | 34771601e4a7c81d5f640797f90f2a65e5dc167f | [] | no_license | https://github.com/neekin/djangoapp-rest | 9b0e1cefd24413384b995694f3832eaaac3fa1c3 | f0e76794b098a6ff6f60e226dc4e9749fb282041 | refs/heads/master | "2021-07-23T11:55:46.845571" | "2017-11-04T01:30:02" | "2017-11-04T01:30:02" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-02 15:35
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Detail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='ๆ ้ข')),
('content', models.CharField(max_length=1000, verbose_name='ๆญฃๆ')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='ๆทปๅ ๆถ้ด')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='็จๆท')),
],
options={
'verbose_name': 'ๅ
ๅฎน',
'verbose_name_plural': 'ๅ
ๅฎน',
},
),
migrations.AlterUniqueTogether(
name='detail',
unique_together=set([('user',)]),
),
]
| UTF-8 | Python | false | false | 1,317 | py | 13 | 0001_initial.py | 13 | 0.588053 | 0.569434 | 0 | 38 | 32.921053 | 137 |
mbraak/django-file-form | 2,241,972,960,536 | db0c6a494fda9632e09f1698e9a6831f03f7c632 | a629162409ae8b7e42d90dd98cfeeda3036a559a | /django_file_form/migrations/0007_auto_20210119_0104.py | f7346c299948070a8b602bfe7035fed5598063f3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | https://github.com/mbraak/django-file-form | c0422237167961df97d2d6a9585e2554e0e38c3b | 672651166c0107c20f5191ce481f5049f88444d5 | refs/heads/master | "2023-08-28T16:25:25.981926" | "2023-08-28T06:13:11" | "2023-08-28T06:13:11" | 9,933,602 | 153 | 47 | NOASSERTION | false | "2023-09-11T06:16:57" | "2013-05-08T10:06:20" | "2023-09-02T16:10:00" | "2023-09-11T06:16:56" | 18,073 | 146 | 40 | 5 | JavaScript | false | false | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("django_file_form", "0006_auto_20200501_0908"),
]
operations = [
migrations.RenameModel(
new_name="TemporaryUploadedFile",
old_name="UploadedFile",
),
migrations.AlterModelTable(
name="TemporaryUploadedFile",
table="django_file_form_uploadedfile",
),
]
| UTF-8 | Python | false | false | 450 | py | 113 | 0007_auto_20210119_0104.py | 74 | 0.591111 | 0.555556 | 0 | 18 | 24 | 56 |
justagist/tf_notebook_playground | 8,126,078,135,755 | fcedb5018dd60355034093189f0539c1b163ce29 | 68814f1f35cfcec0361afe2df6c61a8f6e4710ee | /code/autoencoder_mnist.py | e43823ce6653d40ea61c268de11e0c6710d5521b | [] | no_license | https://github.com/justagist/tf_notebook_playground | 01b21a4eb873137f17b5392477b142194c0bbb7b | a290f8bc37f1b7de5f6e10b6ecb3ea9f65d02caf | refs/heads/master | "2021-04-28T10:49:17.958703" | "2018-02-20T23:46:45" | "2018-02-20T23:46:45" | 122,075,116 | 0 | 0 | null | true | "2018-02-19T14:37:34" | "2018-02-19T14:37:34" | "2017-01-22T14:16:29" | "2017-01-22T14:26:06" | 520 | 0 | 0 | 0 | null | false | null | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
# %matplotlib inline
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
'''
Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
'''
device_string = "/cpu:0"
with tf.device(device_string):
x = tf.placeholder(tf.float32, [None, 784])
## Encoder
with tf.variable_scope('encoder'):
W_fc1 = tf.Variable(tf.random_uniform([784,50], dtype=tf.float32))
b_fc1 = tf.Variable(tf.random_uniform([50], dtype=tf.float32))
## Bottleneck
W_fc2 = tf.Variable(tf.random_uniform([50,2], dtype=tf.float32))
b_fc2 = tf.Variable(tf.random_uniform([2], dtype=tf.float32))
h1_enc = tf.nn.tanh(tf.matmul(x, W_fc1) + b_fc1)
encoder_op = tf.nn.tanh(tf.matmul(h1_enc, W_fc2) + b_fc2)
with tf.variable_scope('decoder'):
code_in = tf.placeholder(tf.float32,[None,2])
W_fc1 = tf.Variable(tf.random_uniform([2,50], dtype=tf.float32))
b_fc1 = tf.Variable(tf.random_uniform([50], dtype=tf.float32))
W_fc2 = tf.Variable(tf.random_uniform([50,784], dtype=tf.float32))
b_fc2 = tf.Variable(tf.random_uniform([784], dtype=tf.float32))
h1_dec = tf.nn.tanh(tf.matmul(encoder_op, W_fc1) + b_fc1)
decode = tf.nn.tanh(tf.matmul(h1_dec, W_fc2) + b_fc2)
h1_dec = tf.nn.tanh(tf.matmul(code_in, W_fc1) + b_fc1)
decoder = tf.nn.tanh(tf.matmul(h1_dec, W_fc2) + b_fc2)
with tf.device(device_string):
y_ = tf.placeholder(tf.float32, [None, 784]) # Correct answer
pv = tf.placeholder(tf.float32, [1, 2]) # Sparsity prob
beta = tf.placeholder(tf.float32, [1, 1]) # Sparsity penalty (lagrange multiplier)
# Aditional loss for penalising high activations (http://ufldl.stanford.edu/tutorial/unsupervised/Autoencoders/)
# with tf.device(device_string):
# p = tf.nn.softmax(encoder_op)
# kl_divergence = tf.reduce_mean(tf.mul(pv,tf.log(tf.div(pv,p))))
# sparsity_loss = tf.mul(beta,kl_divergence)
with tf.device(device_string):
weight_decay_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
squared_loss = tf.reduce_sum(tf.square(decode - y_))
with tf.device(device_string):
loss_op = tf.reduce_mean(squared_loss) + 0.1*weight_decay_loss #+ sparsity_loss
with tf.device(device_string):
train_op = tf.train.AdadeltaOptimizer(learning_rate=0.1, rho=0.1, epsilon=0.0001).minimize(loss_op)
init_op = tf.initialize_all_variables()
| UTF-8 | Python | false | false | 2,785 | py | 1 | autoencoder_mnist.py | 1 | 0.656373 | 0.617594 | 0 | 74 | 36.635135 | 112 |
aCoffeeYin/pyreco | 2,216,203,173,392 | 74917e702ddadb2a7333425c874e7bdc3826bef9 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/numba-numba/allPythonContent.py | ea59e539dc7d783472605598f66472d87c929e16 | [] | no_license | https://github.com/aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | "2020-12-14T14:10:05.763693" | "2016-06-27T05:15:15" | "2016-06-27T05:15:15" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __FILENAME__ = bm_euler
# Modified from a stackoverflow post by Hyperboreus:
# http://stackoverflow.com/questions/6964392/speed-comparison-with-project-euler-c-vs-python-vs-erlang-vs-haskell
from __future__ import print_function, division, absolute_import
import math
from numba import jit
from numba.utils import benchmark
def py_factorCount(n):
square = math.sqrt(n)
isquare = int (square)
count = -1 if isquare == square else 0
for candidate in range(1, isquare + 1):
if not n % candidate:
count += 2
return count
def py_euler():
triangle = 1
index = 1
while py_factorCount(triangle) < 1001:
index += 1
triangle += index
return triangle
@jit("intp(intp)", nopython=True)
def factorCount(n):
square = math.sqrt(n)
isquare = int (square)
count = -1 if isquare == square else 0
for candidate in range(1, isquare + 1):
if not n % candidate:
count += 2
return count
@jit("intp()", nopython=True)
def euler():
triangle = 1
index = 1
while factorCount(triangle) < 1001:
index += 1
triangle += index
return triangle
answer = 842161320
def numba_main():
result = euler()
assert result == answer
def python_main():
result = py_euler()
assert result == answer
if __name__ == '__main__':
print(benchmark(python_main))
print(benchmark(numba_main))
########NEW FILE########
__FILENAME__ = bm_laplace2d
from __future__ import absolute_import, print_function, division
import numpy as np
from numba import jit
from numba.utils import benchmark
def jocabi_relax_core(A, Anew):
error = 0.0
n = A.shape[0]
m = A.shape[1]
for j in range(1, n - 1):
for i in range(1, m - 1):
Anew[j, i] = 0.25 * ( A[j, i + 1] + A[j, i - 1] \
+ A[j - 1, i] + A[j + 1, i])
error = max(error, abs(Anew[j, i] - A[j, i]))
return error
numba_jocabi_relax_core = jit("float64[:,::1], float64[:,::1]", nopython=True)\
(jocabi_relax_core)
def run(fn):
NN = 1024
NM = 1024
A = np.zeros((NN, NM), dtype=np.float64)
Anew = np.zeros((NN, NM), dtype=np.float64)
n = NN
m = NM
iter_max = 10
tol = 1.0e-6
error = 1.0
for j in range(n):
A[j, 0] = 1.0
Anew[j, 0] = 1.0
it = 0
while error > tol and it < iter_max:
error = fn(A, Anew)
# swap A and Anew
tmp = A
A = Anew
Anew = tmp
it += 1
def python_main():
run(jocabi_relax_core)
def numba_main():
run(numba_jocabi_relax_core)
if __name__ == '__main__':
print(benchmark(python_main))
print(benchmark(numba_main))
########NEW FILE########
__FILENAME__ = runall
#! /usr/bin/env python
from __future__ import print_function, division, absolute_import
import os
import numpy as np
from matplotlib import pyplot
from numba.utils import benchmark
BENCHMARK_PREFIX = 'bm_'
def discover_files(startdir=os.curdir):
for root, dirs, files in os.walk(startdir):
for path in files:
if path.startswith(BENCHMARK_PREFIX):
fullpath = os.path.join(root, path)
yield fullpath
try:
from importlib import import_module
except ImportError:
# Approximative fallback for Python < 2.7
def import_module(modulename):
module = __import__(modulename)
for comp in modulename.split('.')[:-1]:
module = getattr(module, comp)
return module
def discover_modules():
for fullpath in discover_files():
path = os.path.relpath(fullpath)
root, ext = os.path.splitext(path)
if ext != '.py':
continue
modulename = root.replace(os.path.sep, '.')
yield import_module(modulename)
def discover():
for m in discover_modules():
yield m.main
def run(mod):
name = mod.__name__[len(BENCHMARK_PREFIX):]
print('running', name, end=' ...\n')
bmr = benchmark(mod.python_main)
python_best = bmr.best
print('\tpython', python_best, 'seconds')
bmr = benchmark(mod.numba_main)
numba_best = bmr.best
print('\tnumba', numba_best, 'seconds')
print('\tspeedup', python_best / numba_best)
return name, numba_best / python_best
def main():
# Generate timings
labels = []
scores = []
for mod in discover_modules():
label, result = run(mod)
labels.append(label)
scores.append(result)
# Plot
width = 0.8
ind = np.arange(len(labels))
fig, ax = pyplot.subplots()
ax.bar(ind, scores, width)
# Draw horizontal line at y=1
ax.axhline(y=1, xmax=ind[-1], color='r')
ax.set_ylabel('Normalized to CPython')
ax.set_title('Numba Benchmark')
ax.set_xticks(ind + (width/2))
ax.set_xticklabels(labels)
pyplot.show()
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = mandel
from numba import autojit
import numpy as np
#from pylab import imshow, jet, show, ion
@autojit
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return 255
@autojit
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
return image
image = np.zeros((500, 750), dtype=np.uint8)
create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
#jet()
#ion()
#show()
print("mandel OK")
########NEW FILE########
__FILENAME__ = run_test
import sys
import numba
if not numba.test():
print("Test failed")
sys.exit(1)
print('numba.__version__: %s' % numba.__version__)
########NEW FILE########
__FILENAME__ = mandel
from numba import autojit
import numpy as np
#from pylab import imshow, jet, show, ion
@autojit
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return 255
@autojit
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
return image
image = np.zeros((500, 750), dtype=np.uint8)
create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
#jet()
#ion()
#show()
print("mandel OK")
########NEW FILE########
__FILENAME__ = run_test
import sys
import numba
if not numba.test():
print("Test failed")
sys.exit(1)
print('numba.__version__: %s' % numba.__version__)
########NEW FILE########
__FILENAME__ = run_test
import os
import numba.testing
if int(os.environ.get("NUMBA_MULTITEST", 1)):
testfn = numba.testing.multitest
else:
testfn = numba.testing.test
if not testfn():
raise RuntimeError("Test failed")
print('numba.__version__: %s' % numba.__version__)
########NEW FILE########
__FILENAME__ = condatestall
"""
Uses conda to run and test all supported python + numpy versions.
"""
from __future__ import print_function
import itertools
import subprocess
import os
import sys
NPY = '16', '17'
PY = '26', '27', '33'
RECIPE_DIR = "./buildscripts/condarecipe.local"
def main():
failfast = '-v' in sys.argv[1:]
args = "conda build %s --no-binstar-upload" % RECIPE_DIR
failures = []
for py, npy in itertools.product(PY, NPY):
if py == '33' and npy == '16':
# Skip python3 + numpy16
continue
os.environ['CONDA_PY'] = py
os.environ['CONDA_NPY'] = npy
try:
subprocess.check_call(args.split())
except subprocess.CalledProcessError as e:
failures.append((py, npy, e))
if failfast:
break
print("=" * 80)
if failures:
for py, npy, err in failures:
print("Test failed for python %s numpy %s" % (py, npy))
print(err)
else:
print("All Passed")
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = run
import provider
import consumer
class ProviderSubclassOnHeap(provider.Provider):
pass
print consumer.sum_baseline(provider.Provider(), 4)
print consumer.sum_baseline(object(), 4)
print consumer.sum_baseline(ProviderSubclassOnHeap(), 4)
########NEW FILE########
__FILENAME__ = test_interning
from .. import intern
def test_global_interning():
# Can't really test for this with nose...
# try:
# intern.global_intern("hello")
# except AssertionError as e:
# pass
# else:
# raise Exception("Expects complaint about uninitialized table")
intern.global_intern_initialize()
id1 = intern.global_intern("hello")
id2 = intern.global_intern("hello")
id3 = intern.global_intern("hallo")
assert id1 == id2
assert id1 != id3
def test_interning():
table = intern.InternTable()
id1 = intern.global_intern("hello")
id2 = intern.global_intern("hello")
id3 = intern.global_intern("hallo")
assert id1 == id2
assert id1 != id3
def test_intern_many():
table = intern.InternTable()
itoid = {}
for i in range(1000000):
id = table.intern("my randrom string %d" % i)
itoid[i] = id
id1 = table.intern("my randrom string %d" % (i // 2))
id2 = table.intern("my randrom string %d" % (i // 4))
assert id1 == itoid[i//2]
assert id2 == itoid[i//4]
if __name__ == '__main__':
test_intern_many()
########NEW FILE########
__FILENAME__ = test_perfecthashing
import time
import itertools
from nose.tools import eq_, ok_
import numpy as np
from .. import extensibletype, methodtable
def test_binsort():
nbins = 64
p = np.zeros(nbins, dtype=np.uint16)
binsizes = np.random.randint(0, 7, size=nbins).astype(np.uint8)
num_by_size = np.zeros(8, dtype=np.uint16)
x = np.bincount(binsizes).astype(np.uint16)
num_by_size[:x.shape[0]] = x
extensibletype.bucket_argsort(p, binsizes, num_by_size)
assert np.all(sorted(binsizes) == binsizes[p][::-1])
def test_basic():
n=64
prehashes = extensibletype.draw_hashes(np.random, n)
assert len(prehashes) == len(set(prehashes))
p, r, m_f, m_g, d = extensibletype.perfect_hash(prehashes, repeat=10**5)
hashes = ((prehashes >> r) & m_f) ^ d[prehashes & m_g]
print(p)
print(d)
hashes.sort()
print(hashes)
assert len(hashes) == len(np.unique(hashes))
# ---
# Test methodtable
def make_signature(type_permutation):
return "".join(type_permutation[:-1]) + '->' + type_permutation[-1]
def make_ids():
types = ['f', 'd', 'i', 'l', 'O']
power = 5
return map(make_signature, itertools.product(*(types,) * power))
def build_and_verify_methodtable(ids, flags, funcs):
table = methodtable.PerfectHashMethodTable(methodtable.Hasher())
table.generate_table(len(ids), ids, flags, funcs)
for (signature, flag, func) in zip(ids, flags, funcs):
result = table.find_method(signature)
assert result is not None
got_func, got_flag = result
assert func == got_func, (func, got_func)
# assert flag == got_flag, (flag, got_flag)
def test_methodtable():
# ids = ["ff->f", "dd->d", "ii->i", "ll->l", "OO->O"]
ids = make_ids()
flags = range(1, len(ids) + 1)
funcs = range(len(ids))
step = 100
i = len(ids)
for i in range(1, len(ids), step):
t = time.time()
build_and_verify_methodtable(ids[:i], flags[:i], funcs[:i])
t = time.time() - t
print i, "table building took", t, "seconds."
if __name__ == '__main__':
test_methodtable()
########NEW FILE########
__FILENAME__ = test_pstdint
from . import pstdint
def test_pstdint():
pstdint.test_pstdint()
########NEW FILE########
__FILENAME__ = linregr_bench
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import linregr_python, linregr_numba, linregr_numbapro
import numpy as np
import pylab
from timeit import default_timer as timer
def populate_data(N):
noise = np.random.random(N).astype(np.float64)
X = np.arange(N, dtype=np.float64)
slope = 3
Y = noise * (slope * X)
return X, Y
def run(gradient_descent, X, Y, iterations=1000, alpha=1e-6):
theta = np.empty(2, dtype=X.dtype)
ts = timer()
gradient_descent(X, Y, theta, alpha, iterations)
te = timer()
timing = te - ts
print("x-offset = {} slope = {}".format(*theta))
print("time elapsed: {} s".format(timing))
return theta, timing
def plot(X, theta, c='r'):
result = theta[0] + theta[1] * X
pylab.plot(X, result, c=c, linewidth=2)
def main():
N = 50
X, Y = populate_data(N)
pylab.scatter(X, Y, marker='o', c='b')
pylab.title('Linear Regression')
print('Python'.center(80, '-'))
theta_python, time_python = run(linregr_python.gradient_descent, X, Y)
print('Numba'.center(80, '-'))
theta_numba, time_numba = run(linregr_numba.gradient_descent, X, Y)
print('NumbaPro'.center(80, '-'))
theta_numbapro, time_numbapro = run(linregr_numbapro.gradient_descent, X, Y)
# make sure all method yields the same result
assert np.allclose(theta_python, theta_numba)
assert np.allclose(theta_python, theta_numbapro)
print('Summary'.center(80, '='))
print('Numba speedup %.1fx' % (time_python / time_numba))
print('NumbaPro speedup %.1fx' % (time_python / time_numbapro))
plot(X, theta_numba, c='r')
pylab.show()
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = linregr_numba
# -*- coding: utf-8 -*-
'''
Numba does not support array expressions.
Expand the array-expression into loops.
'''
from __future__ import print_function, division, absolute_import
from numba import autojit, jit, f8, int32, void
@jit(void(f8[:], f8[:], f8[:], f8, int32))
def gradient_descent(X, Y, theta, alpha, num_iters):
m = Y.shape[0]
theta_x = 0.0
theta_y = 0.0
for i in range(num_iters):
err_acc_x = 0.0
err_acc_y = 0.0
for j in range(X.shape[0]):
predict = theta_x + theta_y * X[j]
err_acc_x += predict - Y[j]
err_acc_y += (predict - Y[j]) * X[j]
theta_x = theta_x - alpha * (1.0 / m) * err_acc_x
theta_y = theta_y - alpha * (1.0 / m) * err_acc_y
theta[0] = theta_x
theta[1] = theta_y
########NEW FILE########
__FILENAME__ = linregr_numbapro
# -*- coding: utf-8 -*-
'''
Only added decorators to the linregr_python.py implementation.
'''
from __future__ import print_function, division, absolute_import
import numbapro
from numba import autojit, jit, f8, int32, void
@jit(void(f8[:], f8[:], f8[:], f8, int32))
def gradient_descent(X, Y, theta, alpha, num_iters):
m = Y.shape[0]
theta_x = 0.0
theta_y = 0.0
for i in range(num_iters):
predict = theta_x + theta_y * X
err_x = (predict - Y)
err_y = (predict - Y) * X
theta_x = theta_x - alpha * (1.0 / m) * err_x.sum()
theta_y = theta_y - alpha * (1.0 / m) * err_y.sum()
theta[0] = theta_x
theta[1] = theta_y
########NEW FILE########
__FILENAME__ = linregr_numbapro_cuda
# -*- coding: utf-8 -*-
'''
NumbaPro CUDA implementation
'''
from __future__ import print_function, division, absolute_import
from numbapro import cuda
from numba import autojit, jit, f8, int32, void
import numpy as np
import pylab
from timeit import default_timer as timer
@cuda.jit(void(f8[:], f8[:], f8[:], f8[:], f8, f8))
def cu_compute_error(X, Y, Ex, Ey, theta_x, theta_y):
# Compute error for each element and store in the shared-memory
Exsm = cuda.shared.array((1024,), dtype=f8)
Eysm = cuda.shared.array((1024,), dtype=f8)
tid = cuda.threadIdx.x
base = cuda.blockIdx.x * cuda.blockDim.x
i = base + tid
x = X[i]
y = Y[i]
predict = theta_x + theta_y * x
Exsm[tid] = predict - y
Eysm[tid] = (predict - y) * x
# Sum-reduce errors in the shared-memory
n = cuda.blockDim.x
while n > 1:
cuda.syncthreads()
half = n // 2
if tid < half:
Exsm[tid] += Exsm[tid + half]
Eysm[tid] += Eysm[tid + half]
n = half
if tid == 0: # First of a block?
# Store result
Ex[cuda.blockIdx.x] = Exsm[0]
Ey[cuda.blockIdx.x] = Eysm[0]
def gradient_descent(X, Y, theta, alpha, num_iters):
N = X.size
NTID = 1024
NBLK = N // NTID
assert NBLK * NTID == N
Ex = np.empty(NBLK, dtype=X.dtype)
Ey = np.empty(NBLK, dtype=X.dtype)
theta_x, theta_y = 0, 0
# -----------------
# GPU work
dX = cuda.to_device(X)
dY = cuda.to_device(Y)
dEx = cuda.to_device(Ex, copy=False)
dEy = cuda.to_device(Ey, copy=False)
griddim = NBLK,
blockdim = NTID,
for _ in xrange(num_iters):
cu_compute_error[griddim, blockdim](dX, dY, dEx, dEy, theta_x, theta_y)
dEx.to_host()
dEy.to_host()
# -----------------
# CPU work
error_x = Ex.sum()
error_y = Ey.sum()
theta_x = theta_x - alpha * (1.0 / N) * error_x
theta_y = theta_y - alpha * (1.0 / N) * error_y
theta[0] = theta_x
theta[1] = theta_y
def populate_data(N):
noise = np.random.random(N).astype(np.float64)
X = np.arange(N, dtype=np.float64)
slope = 3
Y = noise * (slope * X)
return X, Y
def plot(X, theta, c='r'):
result = theta[0] + theta[1] * X
pylab.plot(X, result, c=c, linewidth=2)
def main():
NBLK = 10
NTID = 1024
N = NBLK * NTID
print 'N = %d' % N
X, Y = populate_data(N)
theta = np.zeros(2, dtype=X.dtype)
ts = timer()
gradient_descent(X, Y, theta, 1e-10, 1000)
te = timer()
timing = te - ts
print 'Time elapsed: %s' % timing
pylab.scatter(X, Y, marker='o', c='b')
pylab.title('Linear Regression')
plot(X, theta)
pylab.show()
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = linregr_python
# -*- coding: utf-8 -*-
'''
The following implementation references:
http://aimotion.blogspot.com/2011/10/machine-learning-with-python-linear.html
'''
from __future__ import print_function, division, absolute_import
def gradient_descent(X, Y, theta, alpha, num_iters):
m = Y.shape[0]
theta_x = 0.0
theta_y = 0.0
for i in range(num_iters):
predict = theta_x + theta_y * X
err_x = (predict - Y)
err_y = (predict - Y) * X
theta_x = theta_x - alpha * (1.0 / m) * err_x.sum()
theta_y = theta_y - alpha * (1.0 / m) * err_y.sum()
theta[0] = theta_x
theta[1] = theta_y
########NEW FILE########
__FILENAME__ = gh-pages
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to commit the doc build outputs into the github-pages repo.
Use:
gh-pages.py [tag]
If no tag is given, the current output of 'git describe' is used. If given,
that is how the resulting directory will be named.
In practice, you should use either actual clean tags from a current build or
something like 'current' as a stable URL for the most current version of the """
from __future__ import print_function, division, absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import shutil
import sys
from os import chdir as cd
from os.path import join as pjoin
from subprocess import Popen, PIPE, CalledProcessError, check_call
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
pages_dir = 'gh-pages'
html_dir = '_build/html'
pdf_dir = '_build/latex'
pages_repo = 'git@github.com:numba/numba-doc.git'
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def sh(cmd):
"""Execute command in a subshell, return status code."""
return check_call(cmd, shell=True)
def sh2(cmd):
"""Execute command in a subshell, return stdout.
Stderr is unbuffered from the subshell.x"""
p = Popen(cmd, stdout=PIPE, shell=True)
out = p.communicate()[0]
retcode = p.returncode
if retcode:
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip()
def sh3(cmd):
"""Execute command in a subshell, return stdout, stderr
If anything appears in stderr, print it out to sys.stderr"""
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
out, err = p.communicate()
retcode = p.returncode
if retcode:
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip(), err.rstrip()
def init_repo(path):
"""clone the gh-pages repo if we haven't already."""
sh("git clone %s %s"%(pages_repo, path))
here = os.getcwdu()
cd(path)
sh('git checkout gh-pages')
cd(here)
#-----------------------------------------------------------------------------
# Script starts
#-----------------------------------------------------------------------------
if __name__ == '__main__':
# The tag can be given as a positional argument
try:
tag = sys.argv[1]
except IndexError:
try:
tag = sh2('git describe --exact-match')
except CalledProcessError:
tag = "dev" # Fallback
print("Using dev")
startdir = os.getcwdu()
if not os.path.exists(pages_dir):
# init the repo
init_repo(pages_dir)
else:
# ensure up-to-date before operating
cd(pages_dir)
sh('git checkout gh-pages')
sh('git pull')
cd(startdir)
dest = pjoin(pages_dir, tag)
# don't `make html` here, because gh-pages already depends on html in Makefile
# sh('make html')
if tag != 'dev':
# only build pdf for non-dev targets
#sh2('make pdf')
pass
# This is pretty unforgiving: we unconditionally nuke the destination
# directory, and then copy the html tree in there
shutil.rmtree(dest, ignore_errors=True)
shutil.copytree(html_dir, dest)
if tag != 'dev':
#shutil.copy(pjoin(pdf_dir, 'ipython.pdf'), pjoin(dest, 'ipython.pdf'))
pass
try:
cd(pages_dir)
status = sh2('git status | head -1')
branch = re.match('\# On branch (.*)$', status).group(1)
if branch != 'gh-pages':
e = 'On %r, git branch is %r, MUST be "gh-pages"' % (pages_dir,
branch)
raise RuntimeError(e)
sh('git add -A %s' % tag)
sh('git commit -m"Updated doc release: %s"' % tag)
print()
print('Most recent 3 commits:')
sys.stdout.flush()
sh('git --no-pager log --oneline HEAD~3..')
finally:
cd(startdir)
print()
print('Now verify the build in: %r' % dest)
print("If everything looks good, 'git push'")
########NEW FILE########
__FILENAME__ = make_toc
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import glob
modules_dir = os.path.join("source", "modules")
# mods = [os.path.splitext(fn)[0]
# for fn in os.listdir(modules_dir) if fn.endswith(".rst")]
mods = [fn for fn in os.listdir(modules_dir) if fn.endswith(".rst")]
f = open(os.path.join(modules_dir, "modules.rst"), "w")
f.write("""
**********************
Numba Module Reference
**********************
Contents:
.. toctree::
:titlesonly:
:maxdepth: 2
""")
for mod in sorted(mods):
f.write(" %s\n" % mod)
########NEW FILE########
__FILENAME__ = conf
from __future__ import print_function, division, absolute_import
# -*- coding: utf-8 -*-
#
# llvmpy documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 8 17:33:58 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import numba
# pip install sphinxjp.themes.basicstrap
html_theme = 'basicstrap'
#html_theme = 'trstyle'
#html_theme = 'dotted'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinxjp.themecore',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'numba'
copyright = u'2012-2014, Continuum Analytics'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(numba.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = numba.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'numbadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'numba.tex', u'numba Documentation',
u'Continuum Analytics (2012)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'numba', u'numba Documentation',
[u'Continuum Analytics (2012)'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'numba', u'numba Documentation',
u'Continuum Analytics (2012)', 'numba', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
########NEW FILE########
__FILENAME__ = update-release-notes
# -*- coding: utf-8 -*-
'''
Run me to update the release notes in the documentation.
I will pull the content from ../CHANGE_LOG and insert that into
./source/releases.rst
'''
from __future__ import print_function, division, absolute_import
title_template = '''
.. DO NOT EDIT THIS FILE.
This file is automatically generated by %(this_script)s.
======================
Release Notes
======================
'''
def main():
from os.path import dirname, join, isfile
curdir = dirname(__file__)
changelog_path = join(curdir, '..', 'CHANGE_LOG')
assert isfile(changelog_path), ("%s does not exist" % changelog_path)
doc_releases = join(curdir, 'source', 'releases.rst')
with open(changelog_path) as fin:
content = fin.read()
with open(doc_releases, 'w') as fout:
fout.write(title_template % dict(this_script=__file__))
fout.write(content)
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = bubblesort
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
import numpy as np
from timeit import default_timer as timer
def bubblesort(X, doprint):
N = X.shape[0]
for end in range(N, 1, -1):
for i in range(end - 1):
cur = X[i]
if cur > X[i + 1]:
tmp = X[i]
X[i] = X[i + 1]
X[i + 1] = tmp
# if doprint:
# print("Iteration: %d" % X)
# bubblesort_fast = autojit(bubblesort)
bubblesort_fast = jit(void(int64[::1], boolean))(bubblesort)
dtype = np.int64
def main():
Xtest = np.array(list(reversed(range(8))), dtype=dtype)
print('== Test Pure-Python ==')
X0 = Xtest.copy()
bubblesort(X0, True)
print('== Test Numba == ')
X1 = Xtest.copy()
bubblesort_fast(X1, True)
# return
print(X0)
print(X1)
assert all(X0 == X1)
REP = 10
N = 1400
Xorig = np.array(list(reversed(range(N))), dtype=dtype)
t0 = timer()
for t in range(REP):
X0 = Xorig.copy()
bubblesort(X0, False)
tpython = (timer() - t0) / REP
t1 = timer()
for t in range(REP):
X1 = Xorig.copy()
bubblesort_fast(X1, False)
tnumba = (timer() - t1) / REP
assert all(X0 == X1)
print('Python', tpython)
print('Numba', tnumba)
print('Speedup', tpython / tnumba, 'x')
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = cffi_example
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import cffi_support, jit, double
from cffi import FFI
from math import pi
ffi = FFI()
ffi.cdef('double sin(double x);')
# loads the entire libm namespace
libm = ffi.dlopen("m")
c_sin = libm.sin
@jit(nopython=True)
def cffi_sin_example(x):
return c_sin(x)
print(cffi_sin_example(pi))
########NEW FILE########
__FILENAME__ = closure
# -*- coding: utf-8 -*-
"""
Example for closures. Closures may be of arbitrary dept, and they keep
the scope alive as long as the closure is alive. Only variables that are
closed over (cell variables in the defining function, free variables in the
closure), are kept alive. See also numba/tests/closures/test_closure.py
"""
from __future__ import print_function, division, absolute_import
from numba import autojit, jit, float_
from numpy import linspace
@jit
def generate_power_func(n):
@jit(float_(float_))
def nth_power(x):
return x ** n
# This is a native call
print(nth_power(10))
# Return closure and keep all cell variables alive
return nth_power
for n in range(2, 5):
func = generate_power_func(n)
print([func(x) for x in linspace(1.,2.,10.)])
########NEW FILE########
__FILENAME__ = compile_with_pycc
from numba import exportmany, export
def mult(a, b):
return a * b
export('multi i4(i4, i4)')(mult)
exportmany(['multf f4(f4, f4)', 'mult f8(f8, f8)'])(mult)
########NEW FILE########
__FILENAME__ = ctypes_example
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from ctypes import *
import sys
from math import pi
from numba import jit, double
is_windows = sys.platform.startswith('win32')
if is_windows:
raise OSError('Example does not work on Windows platforms yet.')
proc = CDLL(None)
c_sin = proc.sin
c_sin.argtypes = [c_double]
c_sin.restype = c_double
def use_c_sin(x):
return c_sin(x)
ctype_wrapping = CFUNCTYPE(c_double, c_double)(use_c_sin)
def use_ctype_wrapping(x):
return ctype_wrapping(x)
cfunc = jit(double(double))(use_c_sin)
print(cfunc(pi))
cfunc = jit(double(double))(use_ctype_wrapping)
print(cfunc(pi))
########NEW FILE########
__FILENAME__ = example
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from scipy.misc import lena
from numpy import ones
import numpy
from numba.decorators import jit
from numba import int32, int64
# Original approach will be slower for now due to the object mode failback
# for numpy.zero_like
#
# @jit(argtypes=[int32[:,:], int32[:,:]], restype=int32[:,:])
# def filter2d(image, filt):
# M, N = image.shape
# Mf, Nf = filt.shape
# Mf2 = Mf // 2
# Nf2 = Nf // 2
# result = numpy.zeros_like(image)
# for i in range(Mf2, M - Mf2):
# for j in range(Nf2, N - Nf2):
# num = 0.0
# for ii in range(Mf):
# for jj in range(Nf):
# num += (filt[Mf-1-ii, Nf-1-jj] * image[i-Mf2+ii, j-Nf2+jj])
# result[i, j] = num
# return result
@jit((int64[:,::1], int32[:,::1], int64[:,::1]), nopython=True)
def filter2d_core(image, filt, result):
M, N = image.shape
Mf, Nf = filt.shape
Mf2 = Mf // 2
Nf2 = Nf // 2
for i in range(Mf2, M - Mf2):
for j in range(Nf2, N - Nf2):
num = 0
for ii in range(Mf):
for jj in range(Nf):
num += (filt[Mf-1-ii, Nf-1-jj] * image[i-Mf2+ii,j-Nf2+jj])
result[i, j] = num
@jit
def filter2d(image, filt):
result = numpy.zeros_like(image)
filter2d_core(image, filt, result)
return result
image = lena()
filter = ones((7,7), dtype='int32')
result = filter2d(image, filter) # warm up
import time
start = time.time()
result = filter2d(image, filter)
duration = time.time() - start
from scipy.ndimage import convolve
start = time.time()
result2 = convolve(image, filter)
duration2 = time.time() - start
print("Time for LLVM code = %f\nTime for convolve = %f" % (duration, duration2))
from pylab import subplot, imshow, show, title, gray
subplot(1,2,1)
imshow(image)
title('Original Image')
gray()
subplot(1,2,2)
imshow(result)
title('Filtered Image')
gray()
show()
########NEW FILE########
__FILENAME__ = fbcorr
# -*- coding: utf-8 -*-
"""
This file demonstrates a filterbank correlation loop.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import numba
from numba.utils import IS_PY3
from numba.decorators import jit
nd4type = numba.double[:,:,:,:]
if IS_PY3:
xrange = range
@jit(argtypes=(nd4type, nd4type, nd4type))
def fbcorr(imgs, filters, output):
n_imgs, n_rows, n_cols, n_channels = imgs.shape
n_filters, height, width, n_ch2 = filters.shape
for ii in range(n_imgs):
for rr in range(n_rows - height + 1):
for cc in range(n_cols - width + 1):
for hh in xrange(height):
for ww in xrange(width):
for jj in range(n_channels):
for ff in range(n_filters):
imgval = imgs[ii, rr + hh, cc + ww, jj]
filterval = filters[ff, hh, ww, jj]
output[ii, ff, rr, cc] += imgval * filterval
def main ():
imgs = np.random.randn(10, 64, 64, 3)
filt = np.random.randn(6, 5, 5, 3)
output = np.zeros((10, 60, 60, 6))
import time
t0 = time.time()
fbcorr(imgs, filt, output)
print(time.time() - t0)
if __name__ == "__main__":
main()
########NEW FILE########
__FILENAME__ = findmulti
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import jit
@jit('i4(i4,f8,i4,f8[:])')
def FindMult(i,u,p,U):
s = 0
for j in range(0-p, p+2):
if U[i+j] == u:
s = s + 1
return s
########NEW FILE########
__FILENAME__ = mandel
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import jit
import numpy as np
from pylab import imshow, jet, show, ion
@jit
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return 255
@jit
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
return image
image = np.zeros((500, 750), dtype=np.uint8)
imshow(create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20))
jet()
ion()
show()
########NEW FILE########
__FILENAME__ = multithread
# -*- coding: utf-8 -*-
"""
Example of multithreading by releasing the GIL through ctypes.
"""
from __future__ import print_function, division, absolute_import
from timeit import repeat
import threading
from ctypes import pythonapi, c_void_p
from math import exp
import numpy as np
from numba import jit, void, double
nthreads = 2
size = 1e6
def timefunc(correct, s, func, *args, **kwargs):
print(s.ljust(20), end=" ")
# Make sure the function is compiled before we start the benchmark
res = func(*args, **kwargs)
if correct is not None:
assert np.allclose(res, correct)
# time it
print('{:>5.0f} ms'.format(min(repeat(lambda: func(*args, **kwargs),
number=5, repeat=2)) * 1000))
return res
def make_singlethread(inner_func):
def func(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
inner_func(result, *args)
return result
return func
def make_multithread(inner_func, numthreads):
def func_mt(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
args = (result,) + args
chunklen = (length + 1) // numthreads
chunks = [[arg[i * chunklen:(i + 1) * chunklen] for arg in args]
for i in range(numthreads)]
# You should make sure inner_func is compiled at this point, because
# the compilation must happen on the main thread. This is the case
# in this example because we use jit().
threads = [threading.Thread(target=inner_func, args=chunk)
for chunk in chunks[:-1]]
for thread in threads:
thread.start()
# the main thread handles the last chunk
inner_func(*chunks[-1])
for thread in threads:
thread.join()
return result
return func_mt
savethread = pythonapi.PyEval_SaveThread
savethread.argtypes = []
savethread.restype = c_void_p
restorethread = pythonapi.PyEval_RestoreThread
restorethread.argtypes = [c_void_p]
restorethread.restype = None
def inner_func(result, a, b):
threadstate = savethread()
for i in range(len(result)):
result[i] = exp(2.1 * a[i] + 3.2 * b[i])
restorethread(threadstate)
signature = void(double[:], double[:], double[:])
inner_func_nb = jit(signature, nopython=True)(inner_func)
func_nb = make_singlethread(inner_func_nb)
func_nb_mt = make_multithread(inner_func_nb, nthreads)
def func_np(a, b):
return np.exp(2.1 * a + 3.2 * b)
a = np.random.rand(size)
b = np.random.rand(size)
c = np.random.rand(size)
correct = timefunc(None, "numpy (1 thread)", func_np, a, b)
timefunc(correct, "numba (1 thread)", func_nb, a, b)
timefunc(correct, "numba (%d threads)" % nthreads, func_nb_mt, a, b)
########NEW FILE########
__FILENAME__ = LinearRegr
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Linear Regression with Gradient Descent Algorithm
#
# This notebook demonstrates the implementation of linear regression with gradient descent algorithm.
#
# Consider the following implementation of the gradient descent loop with NumPy arrays based upon [1]:
# <codecell>
def gradient_descent_numpy(X, Y, theta, alpha, num_iters):
m = Y.shape[0]
theta_x = 0.0
theta_y = 0.0
for i in range(num_iters):
predict = theta_x + theta_y * X
err_x = (predict - Y)
err_y = (predict - Y) * X
theta_x = theta_x - alpha * (1.0 / m) * err_x.sum()
theta_y = theta_y - alpha * (1.0 / m) * err_y.sum()
theta[0] = theta_x
theta[1] = theta_y
# <markdowncell>
# To speedup this implementation with Numba, we need to add the `@jit` decorator to annotate the function signature. Then, we need to expand the NumPy array expressions into a loop. The resulting code is shown below:
# <codecell>
from numba import autojit, jit, f8, int32, void
@jit(void(f8[:], f8[:], f8[:], f8, int32))
def gradient_descent_numba(X, Y, theta, alpha, num_iters):
m = Y.shape[0]
theta_x = 0.0
theta_y = 0.0
for i in range(num_iters):
err_acc_x = 0.0
err_acc_y = 0.0
for j in range(X.shape[0]):
predict = theta_x + theta_y * X[j]
err_acc_x += predict - Y[j]
err_acc_y += (predict - Y[j]) * X[j]
theta_x = theta_x - alpha * (1.0 / m) * err_acc_x
theta_y = theta_y - alpha * (1.0 / m) * err_acc_y
theta[0] = theta_x
theta[1] = theta_y
# <markdowncell>
# The rest of the code generates some artificial data to test our linear regression algorithm.
# <codecell>
import numpy as np
import pylab
from timeit import default_timer as timer
def populate_data(N):
noise = np.random.random(N).astype(np.float64)
X = np.arange(N, dtype=np.float64)
slope = 3
Y = noise * (slope * X)
return X, Y
def run(gradient_descent, X, Y, iterations=1000, alpha=1e-6):
theta = np.empty(2, dtype=X.dtype)
ts = timer()
gradient_descent(X, Y, theta, alpha, iterations)
te = timer()
timing = te - ts
print "x-offset = {} slope = {}".format(*theta)
print "time elapsed: {} s".format(timing)
return theta, timing
def plot(X, theta, c='r'):
result = theta[0] + theta[1] * X
pylab.plot(X, result, c=c, linewidth=2)
# <markdowncell>
# We will a benchmark with 50 elements to compare the pure python version against the numba version.
# <codecell>
N = 50
X, Y = populate_data(N)
pylab.scatter(X, Y, marker='o', c='b')
pylab.title('Linear Regression')
print 'Python'.center(30, '-')
theta_python, time_python = run(gradient_descent_numpy, X, Y)
print 'Numba'.center(30, '-')
theta_numba, time_numba = run(gradient_descent_numba, X, Y)
# make sure all method yields the same result
assert np.allclose(theta_python, theta_numba)
print 'Summary'.center(30, '=')
print 'Numba speedup %.1fx' % (time_python / time_numba)
plot(X, theta_numba, c='r')
pylab.show()
# <markdowncell>
#
# ## References
#
# [1] http://aimotion.blogspot.com/2011/10/machine-learning-with-python-linear.html
########NEW FILE########
__FILENAME__ = numba_decimal
from ctypes import CDLL, Structure, POINTER, c_longlong, c_uint, c_int, \
c_ubyte, c_char_p, byref, pointer
from numba import autojit, jit, void, c_string_type, int_, typeof, object_, void
import cdecimal
import random
class mpd_context(Structure):
_fields_ = [('prec', c_longlong),
('emax', c_longlong),
('emin', c_longlong),
('trap', c_uint),
('status', c_uint),
('newtrap', c_uint),
('round', c_int),
('clamp', c_int),
('allcr', c_int)]
class mpd_t(Structure):
_fields_ = [('flags', c_ubyte),
('exp', c_longlong),
('digits', c_longlong),
('len', c_longlong),
('alloc', c_longlong),
('data', POINTER(c_uint))]
dll = CDLL('/usr/local/lib/libmpdec.so.2.3')
dll.mpd_new.argtypes = []
dll.mpd_new.restype = POINTER(mpd_t)
dll.mpd_set_string.argtypes = [POINTER(mpd_t), c_char_p, POINTER(mpd_context)]
dll.mpd_set_string.restype = None
dll.mpd_to_sci.argtypes = [POINTER(mpd_t), c_int]
dll.mpd_to_sci.restype = c_char_p
dll.mpd_add.argtypes = [POINTER(mpd_t), POINTER(mpd_t), POINTER(mpd_t), POINTER(mpd_context)]
dll.mpd_add.restype = None
dll.mpd_sub.argtypes = [POINTER(mpd_t), POINTER(mpd_t), POINTER(mpd_t), POINTER(mpd_context)]
dll.mpd_sub.restype = None
dll.mpd_mul.argtypes = [POINTER(mpd_t), POINTER(mpd_t), POINTER(mpd_t), POINTER(mpd_context)]
dll.mpd_mul.restype = None
dll.mpd_div.argtypes = [POINTER(mpd_t), POINTER(mpd_t), POINTER(mpd_t), POINTER(mpd_context)]
dll.mpd_div.restype = None
mpd_new_func = dll.mpd_new
mpd_set_string_func = dll.mpd_set_string
mpd_to_sci_func = dll.mpd_to_sci
mpd_add_func = dll.mpd_add
mpd_sub_func = dll.mpd_sub
mpd_mul_func = dll.mpd_mul
mpd_div_func = dll.mpd_div
context = mpd_context()
context_ref = pointer(context)
dll.mpd_init(byref(context))
@jit
class NumbaDecimal(object):
@void(c_string_type)
def __init__(self, value):
with nopython:
self.mpd = mpd_new_func()
mpd_set_string_func(self.mpd, value, context_ref)
@c_string_type()
def __repr__(self):
with nopython:
result = mpd_to_sci_func(self.mpd, 0)
return result
@jit(NumbaDecimal.exttype(NumbaDecimal.exttype, NumbaDecimal.exttype))
def add(left, right):
with nopython:
mpd_result = mpd_new_func()
mpd_add_func(mpd_result, left.mpd, right.mpd, context_ref)
return NumbaDecimal(mpd_to_sci_func(mpd_result, 0))
@jit(NumbaDecimal.exttype(NumbaDecimal.exttype, NumbaDecimal.exttype))
def sub(left, right):
with nopython:
mpd_result = mpd_new_func()
mpd_sub_func(mpd_result, left.mpd, right.mpd, context_ref)
return NumbaDecimal(mpd_to_sci_func(mpd_result, 0))
@jit(NumbaDecimal.exttype(NumbaDecimal.exttype, NumbaDecimal.exttype))
def mul(left, right):
with nopython:
mpd_result = mpd_new_func()
mpd_mul_func(mpd_result, left.mpd, right.mpd, context_ref)
return NumbaDecimal(mpd_to_sci_func(mpd_result, 0))
@jit(NumbaDecimal.exttype(NumbaDecimal.exttype, NumbaDecimal.exttype))
def div(left, right):
with nopython:
mpd_result = mpd_new_func()
mpd_div_func(mpd_result, left.mpd, right.mpd, context_ref)
return NumbaDecimal(mpd_to_sci_func(mpd_result, 0))
@autojit
def numba_test(num1, num2):
x = NumbaDecimal(num1)
y = NumbaDecimal(num2)
return mul(x, y)
@autojit
def python_test(num1, num2):
x = cdecimal.Decimal(num1)
y = cdecimal.Decimal(num2)
return x * y
def benchmark_numba():
numba_test(str(random.random()), str(random.random()))
def benchmark_python():
python_test(str(random.random()), str(random.random()))
import timeit
timer = timeit.Timer("benchmark_numba()", "from __main__ import benchmark_numba")
print 'Numba extension type times:', timer.repeat(repeat=3, number=100000)
timer = timeit.Timer("benchmark_python()", "from __main__ import benchmark_python")
print 'Python cdecimal times:', timer.repeat(repeat=3, number=100000)
########NEW FILE########
__FILENAME__ = objects
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import jit
class MyClass(object):
def mymethod(self, arg):
return arg * 2
@jit
def call_method(obj):
print(obj.mymethod("hello")) # object result
mydouble = obj.mymethod(10.2) # native double
print(mydouble * 2) # native multiplication
call_method(MyClass())
########NEW FILE########
__FILENAME__ = pointers
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba
from numba import *
import numpy as np
int32p = int32.pointer()
voidp = void.pointer()
@jit
def test_pointer_arithmetic():
"""
>>> test_pointer_arithmetic()
48L
"""
p = int32p(Py_uintptr_t(0))
p = p + 10
p += 2
return Py_uintptr_t(p) # 0 + 4 * 12
@jit(locals={"pointer_value": Py_uintptr_t})
def test_pointer_indexing(pointer_value, type_p):
"""
>>> a = np.array([1, 2, 3, 4], dtype=np.float32)
>>> test_pointer_indexing(a.ctypes.data, float32.pointer())
(1.0, 2.0, 3.0, 4.0)
>>> a = np.array([1, 2, 3, 4], dtype=np.int64)
>>> test_pointer_indexing(a.ctypes.data, int64.pointer())
(1L, 2L, 3L, 4L)
"""
p = type_p(pointer_value)
return p[0], p[1], p[2], p[3]
@jit
def test_compare_null():
"""
>>> test_compare_null()
True
"""
return voidp(Py_uintptr_t(0)) == numba.NULL
test_pointer_arithmetic()
########NEW FILE########
__FILENAME__ = pycc_example
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import tempfile
import sys
from ctypes import *
from numba.pycc import find_shared_ending, main
is_windows = sys.platform.startswith('win32')
if is_windows:
raise OSError('Example does not work on Windows platforms yet.')
base_path = os.path.dirname(os.path.abspath(__file__))
modulename = os.path.join(base_path, 'compile_with_pycc')
cdll_modulename = modulename + find_shared_ending()
if os.path.exists(cdll_modulename):
os.unlink(cdll_modulename)
# Compile python module to library
main(args=[modulename + '.py'])
lib = CDLL(cdll_modulename)
# Load library with ctypes and call mult function
try:
lib.mult.argtypes = [POINTER(c_double), c_double, c_double]
lib.mult.restype = c_int
lib.multf.argtypes = [POINTER(c_float), c_float, c_float]
lib.multf.restype = c_int
res = c_double()
lib.mult(byref(res), 123, 321)
print('lib.mult(123, 321) = %f' % res.value)
res = c_float()
lib.multf(byref(res), 987, 321)
print('lib.multf(987, 321) = %f' % res.value)
finally:
del lib
if os.path.exists(cdll_modulename):
os.unlink(cdll_modulename)
modulename = os.path.join(base_path, 'compile_with_pycc')
tmpdir = tempfile.gettempdir()
print('tmpdir: %s' % tmpdir)
out_modulename = (os.path.join(tmpdir, 'compiled_with_pycc')
+ find_shared_ending())
# Compile python module to python extension
main(args=['--python', '-o', out_modulename, modulename + '.py'])
# Load compiled extension and call mult function
sys.path.append(tmpdir)
try:
import compiled_with_pycc as lib
try:
res = lib.mult(123, 321)
print('lib.mult(123, 321) = %f' % res)
assert res == 123 * 321
res = lib.multf(987, 321)
print('lib.multf(987, 321) = %f' % res)
assert res == 987 * 321
finally:
del lib
finally:
if os.path.exists(out_modulename):
os.unlink(out_modulename)
########NEW FILE########
__FILENAME__ = queens
# -*- coding: utf-8 -*-
# The best speedup is to just decorate hits() but this is to demonstrate
# the support of untyped object.
from __future__ import print_function, division, absolute_import
from numba import *
# Support for typedlist is removed in this release
# ListInt = nb.typeof(nb.typedlist(int_)) # Define List[int] type
@jit(boolean(int32, int32, int32, int32))
def hits(x1, y1, x2, y2):
"Check whether a queen positioned at (x1, y1) will hit a queen at position (x2, y2)"
return x1 == x2 or y1 == y2 or abs(x1 - x2) == abs(y1 - y2)
@jit
def hitsany(x, y, queens_x, queens_y):
"Check whether a queen positioned at (x1, y1) will hit any other queen"
for i in range(len(queens_x)):
if hits(x, y, queens_x[i], queens_y[i]):
return True
return False
@jit
def _solve(n, queens_x, queens_y):
"Solve the queens puzzle"
if n == 0:
return True
for x in range(1, 9):
for y in range(1, 9):
if not hitsany(x, y, queens_x, queens_y):
queens_x.append(x)
queens_y.append(y)
if _solve(n - 1, queens_x, queens_y):
return True
queens_x.pop()
queens_y.pop()
return False
@jit
def solve(n):
queens_x = []
queens_y = []
if _solve(n, queens_x, queens_y):
return queens_x, queens_y
else:
return None, None
solve.disable_compile()
_solve.disable_compile()
hitsany.disable_compile()
print(solve(8))
# %timeit solve(8)
# Comment out @jit/@autojit
# print(solve(8))
# %timeit solve(8)
########NEW FILE########
__FILENAME__ = ra24
from numba import jit
import numpy as np
import math
import time
@jit('f4[:,:](i2,f4[:,:])')
def ra_numba(doy, lat):
M, N = lat.shape
ra = np.zeros_like(lat)
Gsc = 0.0820
# math.pi doesnt work?
# NumbaError: 11:31: Binary operations mul on values typed object_ and object_ not (yet) supported)
pi = math.pi
#pi = 3.1415926535897932384626433832795
dr = 1 + 0.033 * math.cos( 2 * pi / 365 * doy)
decl = 0.409 * math.sin( 2 * pi / 365 * doy - 1.39 )
for i in range(M):
for j in range(N):
# it crashes without the float() wrapped around the array slicing?!
ws = math.acos(-1 * math.tan(float(lat[i,j])) * math.tan(decl))
ra[i,j] = 24 * 60 / pi * Gsc * dr * ( ws * math.sin(float(lat[i,j])) * math.sin(decl) + math.cos(float(lat[i,j])) * math.cos(decl) * math.sin(ws)) * 11.6
return ra
def ra_numpy(doy, lat):
Gsc = 0.0820
pi = math.pi
dr = 1 + 0.033 * np.cos( 2 * pi / 365 * doy)
decl = 0.409 * np.sin( 2 * pi / 365 * doy - 1.39 )
ws = np.arccos(-np.tan(lat) * np.tan(decl))
ra = 24 * 60 / pi * Gsc * dr * ( ws * np.sin(lat) * np.sin(decl) + np.cos(lat) * np.cos(decl) * np.sin(ws)) * 11.6
return ra
ra_python = ra_numba.py_func
doy = 120 # day of year
py = []
nump = []
numb = []
dims = []
for dim in [25,50,100,200,400,800,1600]:
dims.append(dim)
lat = np.deg2rad(np.ones((dim,dim), dtype=np.float32) * 45.) # array of 45 degrees latitude converted to rad
tic = time.clock()
ra_nb = ra_numba(doy, lat)
numb.append(time.clock() - tic)
tic = time.clock()
ra_np = ra_numpy(doy, lat)
nump.append(time.clock() - tic)
tic = time.clock()
ra_py = ra_python(doy, lat)
py.append(time.clock() - tic)
dims = np.array(dims)**2
py = np.array(py)
numb = np.array(numb)
nump = np.array(nump)
########NEW FILE########
__FILENAME__ = simpleadd
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import jit, autojit
@jit('f8(f8,f8)')
def addv(a,b): return a+b
@jit('f8(f8[:])')
def sum1d(A):
n = A.size
s = 0.0
for i in range(n):
a = A[i]
s = addv(s, A[i])
return s
########NEW FILE########
__FILENAME__ = strings
# -*- coding: utf-8 -*-
"""
Example of using strings with numba using libc and some basic string
functionality.
"""
from __future__ import division, absolute_import
import struct
import socket
import numba as nb
import cffi
ffi = cffi.FFI()
ffi.cdef("""
void abort(void);
char *strstr(const char *s1, const char *s2);
int atoi(const char *str);
char *strtok(char *restrict str, const char *restrict sep);
""")
lib = ffi.dlopen(None)
# For now, we need to make these globals so numba will recognize them
abort, strstr, atoi, strtok = lib.abort, lib.strstr, lib.atoi, lib.strtok
int8_p = nb.int8.pointer()
int_p = nb.int_.pointer()
@nb.autojit(nopython=True)
def parse_int_strtok(s):
"""
Convert an IP address given as a string to an int, similar to
socket.inet_aton(). Performs no error checking!
"""
result = nb.uint32(0)
current = strtok(s, ".")
for i in range(4):
byte = atoi(current)
shift = (3 - i) * 8
result |= byte << shift
current = strtok(int_p(nb.NULL), ".")
return result
@nb.autojit(nopython=True)
def parse_int_manual(s):
"""
Convert an IP address given as a string to an int, similar to
socket.inet_aton(). Performs no error checking!
"""
result = nb.uint32(0)
end = len(s)
start = 0
shift = 3
for i in range(end):
if s[i] == '.'[0] or i == end - 1:
byte = atoi(int8_p(s) + start)
result |= byte << (shift * 8)
shift -= 1
start = i + 1
return result
result1 = parse_int_strtok('192.168.1.2')
result2 = parse_int_manual('1.2.3.4')
print(socket.inet_ntoa(struct.pack('>I', result1)))
print(socket.inet_ntoa(struct.pack('>I', result2)))
########NEW FILE########
__FILENAME__ = structures
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import struct, jit, double
import numpy as np
record_type = struct([('x', double), ('y', double)])
record_dtype = record_type.get_dtype()
a = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=record_dtype)
@jit(argtypes=[record_type[:]])
def hypot(data):
# return types of numpy functions are inferred
result = np.empty_like(data, dtype=np.float64)
# notice access to structure elements 'x' and 'y' via attribute access
# You can also index by field name or field index:
# data[i].x == data[i]['x'] == data[i][0]
for i in range(data.shape[0]):
result[i] = np.sqrt(data[i].x * data[i].x + data[i].y * data[i].y)
return result
print(hypot(a))
# Notice inferred return type
print(hypot.signature)
# Notice native sqrt calls and for.body direct access to memory...
#print(hypot.lfunc)
########NEW FILE########
__FILENAME__ = sum
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import double
from numba.decorators import jit as jit
def sum2d(arr):
M, N = arr.shape
result = 0.0
for i in range(M):
for j in range(N):
result += arr[i,j]
return result
jitsum2d = jit(sum2d)
csum2d = jitsum2d.compile(double(double[:,::1]))
from numpy import random
arr = random.randn(100, 100)
import time
start = time.time()
res = sum2d(arr)
duration = time.time() - start
print("Result from python is %s in %s (msec)" % (res, duration*1000))
csum2d(arr) # warm up
start = time.time()
res = csum2d(arr)
duration2 = time.time() - start
print("Result from compiled is %s in %s (msec)" % (res, duration2*1000))
print("Speed up is %s" % (duration / duration2))
########NEW FILE########
__FILENAME__ = ufuncs
from numba import vectorize
from numba import autojit, double, jit
import math
import numpy as np
@vectorize(['f8(f8)','f4(f4)'])
def sinc(x):
if x == 0:
return 1.0
else:
return math.sin(x*math.pi) / (x*math.pi)
@vectorize(['int8(int8,int8)',
'int16(int16,int16)',
'int32(int32,int32)',
'int64(int64,int64)',
'f4(f4,f4)',
'f8(f8,f8)'])
def add(x,y):
return x + y
@vectorize(['f8(f8)','f4(f4)'])
def logit(x):
return math.log(x / (1-x))
@vectorize(['f8(f8)','f4(f4)'])
def expit(x):
if x > 0:
x = math.exp(x)
return x / (1 + x)
else:
return 1 / (1 + math.exp(-x))
@jit('f8(f8,f8[:])')
def polevl(x, coef):
N = len(coef)
ans = coef[0]
i = 1
while i < N:
ans = ans * x + coef[i]
i += 1
return ans
@jit('f8(f8,f8[:])')
def p1evl(x, coef):
N = len(coef)
ans = x + coef[0]
i = 1
while i < N:
ans = ans * x + coef[i]
i += 1
return ans
PP = np.array([
7.96936729297347051624E-4,
8.28352392107440799803E-2,
1.23953371646414299388E0,
5.44725003058768775090E0,
8.74716500199817011941E0,
5.30324038235394892183E0,
9.99999999999999997821E-1], 'd')
PQ = np.array([
9.24408810558863637013E-4,
8.56288474354474431428E-2,
1.25352743901058953537E0,
5.47097740330417105182E0,
8.76190883237069594232E0,
5.30605288235394617618E0,
1.00000000000000000218E0], 'd')
DR1 = 5.783185962946784521175995758455807035071
DR2 = 30.47126234366208639907816317502275584842
RP = np.array([
-4.79443220978201773821E9,
1.95617491946556577543E12,
-2.49248344360967716204E14,
9.70862251047306323952E15], 'd')
RQ = np.array([
# 1.00000000000000000000E0,
4.99563147152651017219E2,
1.73785401676374683123E5,
4.84409658339962045305E7,
1.11855537045356834862E10,
2.11277520115489217587E12,
3.10518229857422583814E14,
3.18121955943204943306E16,
1.71086294081043136091E18], 'd')
QP = np.array([
-1.13663838898469149931E-2,
-1.28252718670509318512E0,
-1.95539544257735972385E1,
-9.32060152123768231369E1,
-1.77681167980488050595E2,
-1.47077505154951170175E2,
-5.14105326766599330220E1,
-6.05014350600728481186E0], 'd')
QQ = np.array([
# 1.00000000000000000000E0,
6.43178256118178023184E1,
8.56430025976980587198E2,
3.88240183605401609683E3,
7.24046774195652478189E3,
5.93072701187316984827E3,
2.06209331660327847417E3,
2.42005740240291393179E2], 'd')
NPY_PI_4 = .78539816339744830962
SQ2OPI = .79788456080286535587989
@jit('f8(f8)')
def j0(x):
if (x < 0):
x = -x
if (x <= 5.0):
z = x * x
if (x < 1.0e-5):
return (1.0 - z / 4.0)
p = (z-DR1) * (z-DR2)
p = p * polevl(z, RP) / polevl(z, RQ)
return p
w = 5.0 / x
q = 25.0 / (x*x)
p = polevl(q, PP) / polevl(q, PQ)
q = polevl(q, QP) / p1evl(q, QQ)
xn = x - NPY_PI_4
p = p*math.cos(xn) - w * q * math.sin(xn)
return p * SQ2OPI / math.sqrt(x)
x = np.arange(10000, dtype='i8')
y = np.arange(10000, dtype='i8')
print(sum(x, y))
########NEW FILE########
__FILENAME__ = gen_type_conversion
# -*- coding: utf-8 -*-
"""
Generate generated_conversions.c
Utilities adjusted from Cython/Compiler/PyrexTypes.pyx
"""
from __future__ import print_function, division, absolute_import
import os
func_name = "__Numba_PyInt_As%(SignWord)s%(TypeName)s"
header = "static %%(type)s %(FuncName)s(PyObject* x)" % { 'FuncName' : func_name}
c_int_from_py_function = """
%(Header)s {
const %(type)s neg_one = (%(type)s)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(%(type)s) < sizeof(long)) {
long val = __Numba_PyInt_AsSignedLong(x);
if (unlikely(val != (long)(%(type)s)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to %(type)s" :
"value too large to convert to %(type)s");
}
return (%(type)s)-1;
}
return (%(type)s)val;
}
return (%(type)s)__Numba_PyInt_As%(SignWord)sLong(x);
}
"""
c_long_from_py_function = """
%(Header)s {
const %(type)s neg_one = (%(type)s)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to %(type)s");
return (%(type)s)-1;
}
return (%(type)s)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to %(type)s");
return (%(type)s)-1;
}
return (%(type)s)PyLong_AsUnsigned%(TypeName)s(x);
} else {
return (%(type)s)PyLong_As%(TypeName)s(x);
}
} else {
%(type)s val;
PyObject *tmp = __Numba_PyNumber_Int(x);
if (!tmp) return (%(type)s)-1;
val = __Numba_PyInt_As%(SignWord)s%(TypeName)s(tmp);
Py_DECREF(tmp);
return val;
}
}
"""
def rank(types):
types = [type for name, type in types]
return dict(zip(types, range(len(exact_types))))
# Builtin C types that we know how to convert to/from objects
exact_types = (
("Char", "char"),
("Short", "short"),
("Int", "int"),
("Long", "long"),
("LongLong", "PY_LONG_LONG"),
)
rank_exact = rank(exact_types)
# Types for which we don't know the mapping to exact_types
inexact_types = (
("Py_ssize_t", "Py_ssize_t"),
("size_t", "size_t"),
("npy_intp", "npy_intp"),
)
rank_inexact = rank(inexact_types)
signednesses = (
"signed",
"unsigned",
)
def write_utility(exact_type, exact_type_name, out_c, out_h, signedness):
# Select utility template
if rank_exact[exact_type] < rank_exact["long"]:
utility = c_int_from_py_function
else:
utility = c_long_from_py_function
# Build argument dict
fmtargs = { 'TypeName' : exact_type_name,
'SignWord' : signedness.title(),
'type' : signedness + " " + exact_type }
fmtargs.update(FuncName=func_name % fmtargs,
Header=header % fmtargs)
# Write results
conversion = utility % fmtargs
out_c.write(conversion)
out_h.write(header % fmtargs + ';\n')
# print_export(fmtargs)
# print_utility_load(fmtargs, signedness)
def generate_conversions(out_c, out_h):
"Generate numba/external/utilities/generated_conversions.c"
out_c.write("/* This file is generated by %s, do not edit */\n" %
__file__)
out_c.write('#include "generated_conversions.h"\n\n')
for exact_type_name, exact_type in exact_types:
for signedness in signednesses:
write_utility(exact_type, exact_type_name, out_c, out_h, signedness)
# write_utility("char", "Char", out_c, out_h, "signed")
print("Wrote %s and %s" % (out_c.name, out_h.name))
def print_export(fmtargs):
"Code to put in type_conversion.c"
print("EXPORT_FUNCTION(%(FuncName)s, module, error)" % fmtargs)
def print_utility_load(fmtargs, signedness):
"Code to put in numba.external.utility"
typename = fmtargs['TypeName'].lower()
if signedness == "unsigned":
typename = "u" + typename
print('%-10s : load("%s", %s(object_)),' % (typename, fmtargs['FuncName'],
typename))
def open_files():
numba_root = os.path.dirname(os.path.abspath(__file__))
root = os.path.join(numba_root, "numba", "external", "utilities")
out_c = open(os.path.join(root, "generated_conversions.c"), "w")
out_h = open(os.path.join(root, "generated_conversions.h"), "w")
return out_c, out_h
def run():
generate_conversions(*open_files())
if __name__ == "__main__":
run()
########NEW FILE########
__FILENAME__ = getfailed
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import pickle, sys, os
noseid_file = 'numba/tests/.noseids'
if len(sys.argv) > 1 and sys.argv[1] == '-reset':
os.remove(noseid_file)
else:
with open(noseid_file) as fin:
noseids = pickle.load(fin)
failed = map(int, noseids['failed'])
ids = noseids['ids']
for i in failed:
print ids[i]
########NEW FILE########
__FILENAME__ = hello
from __future__ import print_function
import numpy
from numba import jit, int8, int16, int32, float64, complex128
@jit((float64[:], float64), nopython=True)
def foo(a, b):
c = 0
for i in range(a.shape[0]):
c += a[i] + b
a[i] += b
return c
cfoo = foo.jit((int32[:], int32), nopython=True)
print(foo.inspect_types())
@jit((int32,))
def bar(a):
return str(a) + " is a number"
class Haha: pass
@jit
def ambiguous(x):
return x
ambiguous.jit((int16,))
ambiguous.jit((int8,))
ambiguous.jit((complex128,))
ambiguous.jit((float64,))
def main():
a = numpy.arange(100, dtype='int32')
b = 2
c = cfoo(a, b)
print(a)
print(c)
a = numpy.arange(100, dtype='float64')
b = 2.
foo.disable_compile()
c = foo(a, b)
print(foo.overloads.keys())
print(a)
print(c)
print(bar(2))
print(bar(Haha()))
bar.inspect_types()
print(ambiguous(numpy.int8(1)))
print(ambiguous(numpy.array(1, dtype='int16')))
print(ambiguous(numpy.float64(1)))
print(ambiguous(numpy.complex128(1)))
ambiguous(numpy.int32(1))
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = assume
"""
A place to store all assumptions made in various part of the code base.
This allow us to do a usage analysis to discover all code that is assuming
something.
Each assumption is defined as a global variable. Its value is the
description of the assumption. Code that makes the assumption should
`assert the_assumption`
"""
return_argument_array_only = '''Only array passed into the function as
argument can be returned'''
########NEW FILE########
__FILENAME__ = bytecode
"""
From NumbaPro
"""
from __future__ import print_function, division, absolute_import
import dis
import sys
import inspect
from collections import namedtuple
from numba import utils, targets
from numba.config import PYVERSION
opcode_info = namedtuple('opcode_info', ['argsize'])
def get_function_object(obj):
"""
Objects that wraps function should provide a "__numba__" magic attribute
that contains a name of an attribute that contains the actual python
function object.
"""
attr = getattr(obj, "__numba__", None)
if attr:
return getattr(obj, attr)
return obj
def get_code_object(obj):
"Shamelessly borrowed from llpython"
return getattr(obj, '__code__', getattr(obj, 'func_code', None))
def _make_bytecode_table():
if sys.version_info[:2] == (2, 6): # python 2.6
version_specific = [
('JUMP_IF_FALSE', 2),
('JUMP_IF_TRUE', 2),
]
elif sys.version_info[:2] >= (2, 7): # python 2.7+
version_specific = [
('POP_JUMP_IF_FALSE', 2),
('POP_JUMP_IF_TRUE', 2),
('JUMP_IF_TRUE_OR_POP', 2),
('JUMP_IF_FALSE_OR_POP', 2),
]
if sys.version_info[0] == 2:
version_specific += [
('BINARY_DIVIDE', 0),
('DUP_TOPX', 2),
('INPLACE_DIVIDE', 0),
('PRINT_ITEM', 0),
('PRINT_NEWLINE', 0),
('SLICE+0', 0),
('SLICE+1', 0),
('SLICE+2', 0),
('SLICE+3', 0),
('STORE_SLICE+0', 0),
('STORE_SLICE+1', 0),
('STORE_SLICE+2', 0),
('STORE_SLICE+3', 0),
]
elif sys.version_info[0] == 3:
version_specific += [
('DUP_TOP_TWO', 0)
]
bytecodes = [
# opname, operandlen
('BINARY_ADD', 0),
('BINARY_TRUE_DIVIDE', 0),
('BINARY_MULTIPLY', 0),
('BINARY_SUBSCR', 0),
('BINARY_SUBTRACT', 0),
('BINARY_FLOOR_DIVIDE', 0),
('BINARY_MODULO', 0),
('BINARY_POWER', 0),
('BINARY_AND', 0),
('BINARY_OR', 0),
('BINARY_XOR', 0),
('BINARY_LSHIFT', 0),
('BINARY_RSHIFT', 0),
('BREAK_LOOP', 0),
('BUILD_LIST', 2),
('BUILD_SLICE', 2),
('BUILD_TUPLE', 2),
('CALL_FUNCTION', 2),
('COMPARE_OP', 2),
('DUP_TOP', 0),
('FOR_ITER', 2),
('GET_ITER', 0),
('INPLACE_ADD', 0),
('INPLACE_SUBTRACT', 0),
('INPLACE_MULTIPLY', 0),
('INPLACE_TRUE_DIVIDE', 0),
('INPLACE_FLOOR_DIVIDE', 0),
('INPLACE_MODULO', 0),
('INPLACE_POWER', 0),
('INPLACE_AND', 0),
('INPLACE_OR', 0),
('INPLACE_XOR', 0),
('INPLACE_LSHIFT', 0),
('INPLACE_RSHIFT', 0),
('JUMP_ABSOLUTE', 2),
('JUMP_FORWARD', 2),
('LOAD_ATTR', 2),
('LOAD_CONST', 2),
('LOAD_FAST', 2),
('LOAD_GLOBAL', 2),
('POP_BLOCK', 0),
('POP_TOP', 0),
('RAISE_VARARGS', 2),
('RETURN_VALUE', 0),
('ROT_THREE', 0),
('ROT_TWO', 0),
('SETUP_LOOP', 2),
('STORE_FAST', 2),
# ('STORE_ATTR', 2), # not supported
('STORE_SUBSCR', 0),
('UNARY_POSITIVE', 0),
('UNARY_NEGATIVE', 0),
('UNARY_INVERT', 0),
('UNARY_NOT', 0),
('UNPACK_SEQUENCE', 2),
] + version_specific
return dict((dis.opmap[opname], opcode_info(argsize=argsize))
for opname, argsize in bytecodes)
def _as_opcodes(seq):
lst = []
for s in seq:
c = dis.opmap.get(s)
if c is not None:
lst.append(c)
return lst
BYTECODE_TABLE = _make_bytecode_table()
JREL_OPS = frozenset(dis.hasjrel)
JABS_OPS = frozenset(dis.hasjabs)
JUMP_OPS = JREL_OPS | JABS_OPS
TERM_OPS = frozenset(_as_opcodes(['RETURN_VALUE', 'RAISE_VARARGS']))
class ByteCodeInst(object):
'''
Attributes
----------
- offset:
byte offset of opcode
- opcode:
opcode integer value
- arg:
instruction arg
- lineno:
-1 means unknown
'''
__slots__ = 'offset', 'next', 'opcode', 'opname', 'arg', 'lineno'
def __init__(self, offset, opcode, arg):
self.offset = offset
self.next = offset + BYTECODE_TABLE[opcode].argsize + 1
self.opcode = opcode
self.opname = dis.opname[opcode]
self.arg = arg
self.lineno = -1 # unknown line number
@classmethod
def get(cls, offset, opname, arg):
return cls(offset, dis.opmap[opname], arg)
@property
def is_jump(self):
return self.opcode in JUMP_OPS
@property
def is_terminator(self):
return self.opcode in TERM_OPS
def get_jump_target(self):
assert self.is_jump
if self.opcode in JREL_OPS:
return self.next + self.arg
else:
assert self.opcode in JABS_OPS
return self.arg
def __repr__(self):
return '%s(arg=%s, lineno=%d)' % (self.opname, self.arg, self.lineno)
class ByteCodeIter(object):
def __init__(self, code):
self.code = code
if PYVERSION > (3, 0):
self.iter = enumerate(self.code.co_code)
else:
self.iter = ((i, ord(x)) for i, x in enumerate(self.code.co_code))
def __iter__(self):
return self
def next(self):
offset, opcode = next(self.iter)
try:
info = BYTECODE_TABLE[opcode]
except KeyError:
ts = "offset=%d opcode=%x opname=%s"
tv = offset, opcode, dis.opname[opcode]
raise NotImplementedError(ts % tv)
if info.argsize:
arg = self.read_arg(info.argsize)
else:
arg = None
return offset, ByteCodeInst(offset=offset, opcode=opcode, arg=arg)
__next__ = next
def read_arg(self, size):
buf = 0
for i in range(size):
_offset, byte = utils.iter_next(self.iter)
buf |= byte << (8 * i)
return buf
class ByteCodeOperation(object):
def __init__(self, inst, args):
self.inst = inst
self.args = args
class ByteCodeSupportError(Exception):
pass
class ByteCodeBase(object):
__slots__ = 'func', 'func_name', 'argspec', 'filename', 'co_names', \
'co_varnames', 'co_consts', 'table', 'labels'
def __init__(self, func, func_name, argspec, filename, co_names,
co_varnames, co_consts, table, labels):
self.func = func
self.module = inspect.getmodule(func)
self.func_name = func_name
self.argspec = argspec
self.filename = filename
self.co_names = co_names
self.co_varnames = co_varnames
self.co_consts = co_consts
self.table = table
self.labels = labels
self.firstlineno = min(inst.lineno for inst in self.table.values())
def __iter__(self):
return utils.dict_itervalues(self.table)
def __getitem__(self, offset):
return self.table[offset]
def __contains__(self, offset):
return offset in self.table
def dump(self):
def label_marker(i):
if i[1].offset in self.labels:
return '>'
else:
return ' '
return '\n'.join('%s %10d\t%s' % ((label_marker(i),) + i)
for i in utils.dict_iteritems(self.table))
class CustomByteCode(ByteCodeBase):
pass
class ByteCode(ByteCodeBase):
def __init__(self, func):
func = get_function_object(func)
code = get_code_object(func)
if not code:
raise ByteCodeSupportError("%s does not provide its bytecode" %
func)
if code.co_freevars:
raise ByteCodeSupportError("does not support freevars")
if code.co_cellvars:
raise ByteCodeSupportError("does not support cellvars")
table = utils.SortedMap(ByteCodeIter(code))
labels = set(dis.findlabels(code.co_code))
labels.add(0)
self._mark_lineno(table, code)
super(ByteCode, self).__init__(func=func,
func_name=func.__name__,
argspec=inspect.getargspec(func),
filename=code.co_filename,
co_names=code.co_names,
co_varnames=code.co_varnames,
co_consts=code.co_consts,
table=table,
labels=list(sorted(labels)))
@classmethod
def _mark_lineno(cls, table, code):
'''Fill the lineno info for all bytecode inst
'''
for offset, lineno in dis.findlinestarts(code):
if offset in table:
table[offset].lineno = lineno
known = -1
for inst in table.values():
if inst.lineno >= 0:
known = inst.lineno
else:
inst.lineno = known
########NEW FILE########
__FILENAME__ = callwrapper
from __future__ import print_function, division, absolute_import
from llvm.core import Type, Builder, Constant
import llvm.core as lc
from numba import types, cgutils
class PyCallWrapper(object):
def __init__(self, context, module, func, fndesc):
self.context = context
self.module = module
self.func = func
self.fndesc = fndesc
def build(self):
wrapname = "wrapper.%s" % self.func.name
pyobj = self.context.get_argument_type(types.pyobject)
fnty = Type.function(pyobj, [pyobj, pyobj, pyobj])
wrapper = self.module.add_function(fnty, name=wrapname)
builder = Builder.new(wrapper.append_basic_block('entry'))
# builder = cgutils.VerboseProxy(builder)
_, args, kws = wrapper.args
api = self.context.get_python_api(builder)
self.build_wrapper(api, builder, args, kws)
wrapper.verify()
return wrapper, api
def build_wrapper(self, api, builder, args, kws):
nargs = len(self.fndesc.args)
keywords = self.make_keywords(self.fndesc.args)
fmt = self.make_const_string("O" * nargs)
objs = [api.alloca_obj() for _ in range(nargs)]
parseok = api.parse_tuple_and_keywords(args, kws, fmt, keywords, *objs)
pred = builder.icmp(lc.ICMP_EQ, parseok, Constant.null(parseok.type))
with cgutils.if_unlikely(builder, pred):
builder.ret(api.get_null_object())
innerargs = []
for obj, ty in zip(objs, self.fndesc.argtypes):
#api.context.debug_print(builder, "%s -> %s" % (obj, ty))
#api.print_object(builder.load(obj))
val = api.to_native_arg(builder.load(obj), ty)
innerargs.append(val)
status, res = self.context.call_function(builder, self.func,
self.fndesc.restype,
self.fndesc.argtypes,
innerargs)
with cgutils.if_likely(builder, status.ok):
with cgutils.ifthen(builder, status.none):
api.return_none()
retval = api.from_native_return(res, self.fndesc.restype)
builder.ret(retval)
with cgutils.ifthen(builder, builder.not_(status.exc)):
# User exception raised
# TODO we will just raise a RuntimeError for now.
api.raise_native_error("error in native function: %s" %
self.fndesc.mangled_name)
builder.ret(api.get_null_object())
def make_const_string(self, string):
return self.context.insert_const_string(self.module, string)
def make_keywords(self, kws):
strings = []
stringtype = Type.pointer(Type.int(8))
for k in kws:
strings.append(self.make_const_string(k))
strings.append(Constant.null(stringtype))
kwlist = Constant.array(stringtype, strings)
gv = self.module.add_global_variable(kwlist.type, name=".kwlist")
gv.global_constant = True
gv.initializer = kwlist
gv.linkage = lc.LINKAGE_INTERNAL
return Constant.bitcast(gv, Type.pointer(stringtype))
########NEW FILE########
__FILENAME__ = cffi_support
# -*- coding: utf-8 -*-
"""
Support for CFFI. Allows checking whether objects are CFFI functions and
obtaining the pointer and numba signature.
"""
from __future__ import print_function, division, absolute_import
from numba import types, typing
try:
import cffi
ffi = cffi.FFI()
except ImportError:
ffi = None
SUPPORTED = ffi is not None
def is_cffi_func(obj):
"""Check whether the obj is a CFFI function"""
try:
return ffi.typeof(obj).kind == 'function'
except TypeError:
return False
def get_pointer(cffi_func):
"""
Get a pointer to the underlying function for a CFFI function as an
integer.
"""
return int(ffi.cast("uintptr_t", cffi_func))
def map_type(cffi_type):
"""Map CFFI type to numba type"""
kind = getattr(cffi_type, 'kind', '')
if kind in ('struct', 'union'):
raise TypeError("No support for struct or union")
elif kind == 'function':
if cffi_type.ellipsis:
raise TypeError("vararg function is not supported")
restype = map_type(cffi_type.result)
argtypes = [map_type(arg) for arg in cffi_type.args]
return typing.signature(restype, *argtypes)
else:
result = type_map.get(cffi_type)
if result is None:
raise TypeError(cffi_type)
return result
def make_function_type(cffi_func):
cffi_type = ffi.typeof(cffi_func)
signature = map_type(cffi_type)
cases = [signature]
template = typing.make_concrete_template("CFFIFuncPtr", cffi_func, cases)
result = types.FunctionPointer(template, get_pointer(cffi_func))
return result
if ffi is not None:
type_map = {
ffi.typeof('char') : types.int8,
ffi.typeof('short') : types.short,
ffi.typeof('int') : types.intc,
ffi.typeof('long') : types.long_,
ffi.typeof('long long') : types.longlong,
ffi.typeof('unsigned char') : types.uchar,
ffi.typeof('unsigned short') : types.ushort,
ffi.typeof('unsigned int') : types.uintc,
ffi.typeof('unsigned long') : types.ulong,
ffi.typeof('unsigned long long') : types.ulonglong,
ffi.typeof('int8_t') : types.char,
ffi.typeof('uint8_t') : types.uchar,
ffi.typeof('int16_t') : types.short,
ffi.typeof('uint16_t') : types.ushort,
ffi.typeof('int32_t') : types.intc,
ffi.typeof('uint32_t') : types.uintc,
ffi.typeof('int64_t') : types.longlong,
ffi.typeof('uint64_t') : types.ulonglong,
ffi.typeof('float') : types.float_,
ffi.typeof('double') : types.double,
# ffi.typeof('long double') : longdouble,
ffi.typeof('char *') : types.voidptr,
ffi.typeof('void *') : types.voidptr,
ffi.typeof('ssize_t') : types.intp,
ffi.typeof('size_t') : types.uintp,
ffi.typeof('void') : types.void,
}
########NEW FILE########
__FILENAME__ = cgutils
from __future__ import print_function, division, absolute_import
from contextlib import contextmanager
import functools
from llvm.core import Constant, Type
import llvm.core as lc
true_bit = Constant.int(Type.int(1), 1)
false_bit = Constant.int(Type.int(1), 0)
true_byte = Constant.int(Type.int(8), 1)
false_byte = Constant.int(Type.int(8), 0)
def as_bool_byte(builder, value):
return builder.zext(value, Type.int(8))
class Structure(object):
def __init__(self, context, builder, value=None, ref=None):
self._type = context.get_struct_type(self)
self._builder = builder
if ref is None:
self._value = alloca_once(builder, self._type)
if value is not None:
assert not is_pointer(value.type)
assert value.type == self._type, (value.type, self._type)
builder.store(value, self._value)
else:
assert value is None
assert self._type == ref.type.pointee
self._value = ref
self._fdmap = {}
base = Constant.int(Type.int(), 0)
for i, (k, _) in enumerate(self._fields):
self._fdmap[k] = (base, Constant.int(Type.int(), i))
def __getattr__(self, field):
if not field.startswith('_'):
offset = self._fdmap[field]
ptr = self._builder.gep(self._value, offset)
return self._builder.load(ptr)
else:
raise AttributeError(field)
def __setattr__(self, field, value):
if field.startswith('_'):
return super(Structure, self).__setattr__(field, value)
offset = self._fdmap[field]
ptr = self._builder.gep(self._value, offset)
assert ptr.type.pointee == value.type, (str(ptr.type.pointee),
str(value.type))
self._builder.store(value, ptr)
def _getpointer(self):
return self._value
def _getvalue(self):
return self._builder.load(self._value)
def __iter__(self):
def iterator():
for field, _ in self._fields:
yield getattr(self, field)
return iter(iterator())
def get_function(builder):
return builder.basic_block.function
def get_module(builder):
return builder.basic_block.function.module
def append_basic_block(builder, name=''):
return get_function(builder).append_basic_block(name)
@contextmanager
def goto_block(builder, bb):
bbold = builder.basic_block
if bb.instructions and bb.instructions[-1].is_terminator:
builder.position_before(bb.instructions[-1])
else:
builder.position_at_end(bb)
yield
builder.position_at_end(bbold)
@contextmanager
def goto_entry_block(builder):
fn = get_function(builder)
with goto_block(builder, fn.entry_basic_block):
yield
def alloca_once(builder, ty, name=''):
with goto_entry_block(builder):
return builder.alloca(ty, name=name)
def terminate(builder, bbend):
bb = builder.basic_block
instr = bb.instructions
if not instr or not instr[-1].is_terminator:
builder.branch(bbend)
def get_null_value(ltype):
return Constant.null(ltype)
def is_null(builder, val):
null = get_null_value(val.type)
return builder.icmp(lc.ICMP_EQ, null, val)
def is_not_null(builder, val):
null = get_null_value(val.type)
return builder.icmp(lc.ICMP_NE, null, val)
is_true = is_not_null
is_false = is_null
def set_branch_weight(builder, brinst, trueweight, falseweight):
module = get_module(builder)
mdid = lc.MetaDataString.get(module, "branch_weights")
trueweight = lc.Constant.int(Type.int(), trueweight)
falseweight = lc.Constant.int(Type.int(), falseweight)
md = lc.MetaData.get(module, [mdid, trueweight, falseweight])
brinst.set_metadata("prof", md)
@contextmanager
def if_unlikely(builder, pred):
bb = builder.basic_block
with ifthen(builder, pred):
yield
brinst = bb.instructions[-1]
set_branch_weight(builder, brinst, trueweight=1, falseweight=99)
@contextmanager
def if_likely(builder, pred):
bb = builder.basic_block
with ifthen(builder, pred):
yield
brinst = bb.instructions[-1]
set_branch_weight(builder, brinst, trueweight=99, falseweight=1)
@contextmanager
def ifthen(builder, pred):
bb = builder.basic_block
bbif = append_basic_block(builder, bb.name + '.if')
bbend = append_basic_block(builder, bb.name + '.endif')
builder.cbranch(pred, bbif, bbend)
with goto_block(builder, bbif):
yield bbend
terminate(builder, bbend)
builder.position_at_end(bbend)
@contextmanager
def ifnot(builder, pred):
with ifthen(builder, builder.not_(pred)):
yield
@contextmanager
def ifelse(builder, pred, expect=None):
bbtrue = append_basic_block(builder, 'if.true')
bbfalse = append_basic_block(builder, 'if.false')
bbendif = append_basic_block(builder, 'endif')
br = builder.cbranch(pred, bbtrue, bbfalse)
if expect is not None:
if expect:
set_branch_weight(builder, br, trueweight=99, falseweight=1)
else:
set_branch_weight(builder, br, trueweight=1, falseweight=99)
then = IfBranchObj(builder, bbtrue, bbendif)
otherwise = IfBranchObj(builder, bbfalse, bbendif)
yield then, otherwise
builder.position_at_end(bbendif)
class IfBranchObj(object):
def __init__(self, builder, bbenter, bbend):
self.builder = builder
self.bbenter = bbenter
self.bbend = bbend
def __enter__(self):
self.builder.position_at_end(self.bbenter)
def __exit__(self, exc_type, exc_val, exc_tb):
terminate(self.builder, self.bbend)
@contextmanager
def for_range(builder, count, intp):
start = Constant.int(intp, 0)
stop = count
bbcond = append_basic_block(builder, "for.cond")
bbbody = append_basic_block(builder, "for.body")
bbend = append_basic_block(builder, "for.end")
bbstart = builder.basic_block
builder.branch(bbcond)
ONE = Constant.int(intp, 1)
with goto_block(builder, bbcond):
index = builder.phi(intp, name="loop.index")
pred = builder.icmp(lc.ICMP_SLT, index, stop)
builder.cbranch(pred, bbbody, bbend)
with goto_block(builder, bbbody):
yield index
bbbody = builder.basic_block
incr = builder.add(index, ONE)
terminate(builder, bbcond)
index.add_incoming(start, bbstart)
index.add_incoming(incr, bbbody)
builder.position_at_end(bbend)
@contextmanager
def loop_nest(builder, shape, intp):
with _loop_nest(builder, shape, intp) as indices:
assert len(indices) == len(shape)
yield indices
@contextmanager
def _loop_nest(builder, shape, intp):
with for_range(builder, shape[0], intp) as ind:
if len(shape) > 1:
with _loop_nest(builder, shape[1:], intp) as indices:
yield (ind,) + indices
else:
yield (ind,)
def pack_array(builder, values):
n = len(values)
ty = values[0].type
ary = Constant.undef(Type.array(ty, n))
for i, v in enumerate(values):
ary = builder.insert_value(ary, v, i)
return ary
def unpack_tuple(builder, tup, count):
vals = [builder.extract_value(tup, i)
for i in range(count)]
return vals
def get_item_pointer(builder, aryty, ary, inds, wraparound=False):
if wraparound:
# Wraparound
shapes = unpack_tuple(builder, ary.shape, count=aryty.ndim)
indices = []
for ind, dimlen in zip(inds, shapes):
ZERO = Constant.null(ind.type)
negative = builder.icmp(lc.ICMP_SLT, ind, ZERO)
wrapped = builder.add(dimlen, ind)
selected = builder.select(negative, wrapped, ind)
indices.append(selected)
else:
indices = inds
del inds
intp = indices[0].type
# Indexing code
if aryty.layout == 'C':
# C contiguous
shapes = unpack_tuple(builder, ary.shape, count=aryty.ndim)
steps = []
for i in range(len(shapes)):
last = Constant.int(intp, 1)
for j in shapes[i + 1:]:
last = builder.mul(last, j)
steps.append(last)
loc = Constant.int(intp, 0)
for i, s in zip(indices, steps):
tmp = builder.mul(i, s)
loc = builder.add(loc, tmp)
ptr = builder.gep(ary.data, [loc])
return ptr
else:
# Any layout
strides = unpack_tuple(builder, ary.strides, count=aryty.ndim)
dimoffs = [builder.mul(s, i) for s, i in zip(strides, indices)]
offset = functools.reduce(builder.add, dimoffs)
base = builder.ptrtoint(ary.data, offset.type)
where = builder.add(base, offset)
ptr = builder.inttoptr(where, ary.data.type)
return ptr
def get_item_pointer2(builder, data, shape, strides, layout, inds,
wraparound=False):
if wraparound:
# Wraparound
indices = []
for ind, dimlen in zip(inds, shape):
ZERO = Constant.null(ind.type)
negative = builder.icmp(lc.ICMP_SLT, ind, ZERO)
wrapped = builder.add(dimlen, ind)
selected = builder.select(negative, wrapped, ind)
indices.append(selected)
else:
indices = inds
del inds
intp = indices[0].type
# Indexing code
if layout in 'CF':
steps = []
# Compute steps for each dimension
if layout == 'C':
# C contiguous
for i in range(len(shape)):
last = Constant.int(intp, 1)
for j in shape[i + 1:]:
last = builder.mul(last, j)
steps.append(last)
elif layout == 'F':
# F contiguous
for i in range(len(shape)):
last = Constant.int(intp, 1)
for j in shape[:i]:
last = builder.mul(last, j)
steps.append(last)
else:
raise Exception("unreachable")
# Compute index
loc = Constant.int(intp, 0)
for i, s in zip(indices, steps):
tmp = builder.mul(i, s)
loc = builder.add(loc, tmp)
ptr = builder.gep(data, [loc])
return ptr
else:
# Any layout
dimoffs = [builder.mul(s, i) for s, i in zip(strides, indices)]
offset = functools.reduce(builder.add, dimoffs)
base = builder.ptrtoint(data, offset.type)
where = builder.add(base, offset)
ptr = builder.inttoptr(where, data.type)
return ptr
def normalize_slice(builder, slice, length):
"""
Clip stop
"""
stop = slice.stop
doclip = builder.icmp(lc.ICMP_SGT, stop, length)
slice.stop = builder.select(doclip, length, stop)
def get_range_from_slice(builder, slicestruct):
diff = builder.sub(slicestruct.stop, slicestruct.start)
length = builder.sdiv(diff, slicestruct.step)
is_neg = is_neg_int(builder, length)
length = builder.select(is_neg, get_null_value(length.type), length)
return length
def get_strides_from_slice(builder, ndim, strides, slice, ax):
oldstrides = unpack_tuple(builder, strides, ndim)
return builder.mul(slice.step, oldstrides[ax])
class MetadataKeyStore(object):
def __init__(self, module, name):
self.module = module
self.key = name
self.nmd = self.module.get_or_insert_named_metadata("python.module")
def set(self, value):
"""
Add a string value
"""
md = lc.MetaData.get(self.module,
[lc.MetaDataString.get(self.module, value)])
self.nmd.add(md)
def get(self):
"""
Get string value
"""
node = self.nmd._ptr.getOperand(0)
return lc._make_value(node.getOperand(0)).string
def is_scalar_zero(builder, value):
nullval = Constant.null(value.type)
if value.type in (Type.float(), Type.double()):
isnull = builder.fcmp(lc.FCMP_OEQ, nullval, value)
else:
isnull = builder.icmp(lc.ICMP_EQ, nullval, value)
return isnull
def guard_null(context, builder, value):
with if_unlikely(builder, is_scalar_zero(builder, value)):
context.return_errcode(builder, 1)
guard_zero = guard_null
def is_struct(ltyp):
return ltyp.kind == lc.TYPE_STRUCT
def is_pointer(ltyp):
return ltyp.kind == lc.TYPE_POINTER
def is_struct_ptr(ltyp):
return is_pointer(ltyp) and is_struct(ltyp.pointee)
def is_neg_int(builder, val):
return builder.icmp(lc.ICMP_SLT, val, get_null_value(val.type))
# ------------------------------------------------------------------------------
# Debug
class VerboseProxy(object):
"""
Use to wrap llvm.core.Builder to track where segfault happens
"""
def __init__(self, obj):
self.__obj = obj
def __getattr__(self, key):
fn = getattr(self.__obj, key)
if callable(fn):
def wrapped(*args, **kws):
import traceback
traceback.print_stack()
print(key, args, kws)
try:
return fn(*args, **kws)
finally:
print("ok")
return wrapped
return fn
def printf(builder, format_string, *values):
str_const = Constant.stringz(format_string)
global_str_const = get_module(builder).add_global_variable(str_const.type, '')
global_str_const.initializer = str_const
idx = [Constant.int(Type.int(32), 0), Constant.int(Type.int(32), 0)]
str_addr = global_str_const.gep(idx)
args = []
for v in values:
if isinstance(v, int):
args.append(Constant.int(Type.int(), v))
elif isinstance(v, float):
args.append(Constant.real(Type.double(), v))
functype = Type.function(Type.int(32), [Type.pointer(Type.int(8))], True)
fn = get_module(builder).add_function(functype, 'printf')
builder.call(fn, [str_addr] + args)
########NEW FILE########
__FILENAME__ = compiler
from __future__ import print_function, division, absolute_import
from pprint import pprint
from contextlib import contextmanager
from collections import namedtuple, defaultdict
from numba import (bytecode, interpreter, typing, typeinfer, lowering,
irpasses, utils, config, type_annotations, types, ir,
assume, looplifting, macro)
from numba.targets import cpu
class Flags(utils.ConfigOptions):
OPTIONS = frozenset(['enable_looplift',
'enable_pyobject',
'force_pyobject',
'no_compile'])
DEFAULT_FLAGS = Flags()
CR_FIELDS = ["typing_context",
"target_context",
"entry_point",
"entry_point_addr",
"typing_error",
"type_annotation",
"llvm_module",
"llvm_func",
"signature",
"objectmode",
"lifted",
"fndesc"]
CompileResult = namedtuple("CompileResult", CR_FIELDS)
def compile_result(**kws):
keys = set(kws.keys())
fieldset = set(CR_FIELDS)
badnames = keys - fieldset
if badnames:
raise NameError(*badnames)
missing = fieldset - keys
for k in missing:
kws[k] = None
return CompileResult(**kws)
def compile_isolated(func, args, return_type=None, flags=DEFAULT_FLAGS,
locals={}):
"""
Compile the function is an isolated environment.
Good for testing.
"""
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
return compile_extra(typingctx, targetctx, func, args, return_type, flags,
locals)
class _CompileStatus(object):
"""
Used like a C record
"""
__slots__ = 'fail_reason', 'use_python_mode', 'can_fallback'
@contextmanager
def _fallback_context(status):
try:
yield
except Exception as e:
if not status.can_fallback:
raise
status.fail_reason = e
status.use_python_mode = True
def compile_extra(typingctx, targetctx, func, args, return_type, flags,
locals):
"""
Args
----
- return_type
Use ``None`` to indicate
"""
bc = bytecode.ByteCode(func=func)
if config.DEBUG:
print(bc.dump())
return compile_bytecode(typingctx, targetctx, bc, args,
return_type, flags, locals)
def compile_bytecode(typingctx, targetctx, bc, args, return_type, flags,
locals, lifted=()):
interp = translate_stage(bc)
nargs = len(interp.argspec.args)
if len(args) > nargs:
raise TypeError("Too many argument types")
status = _CompileStatus()
status.can_fallback = flags.enable_pyobject
status.fail_reason = None
status.use_python_mode = flags.force_pyobject
targetctx = targetctx.localized()
if not status.use_python_mode:
with _fallback_context(status):
legalize_given_types(args, return_type)
# Type inference
typemap, return_type, calltypes = type_inference_stage(typingctx,
interp,
args,
return_type,
locals)
if not status.use_python_mode:
with _fallback_context(status):
legalize_return_type(return_type, interp, targetctx)
if status.use_python_mode and flags.enable_looplift:
assert not lifted
# Try loop lifting
entry, loops = looplifting.lift_loop(bc)
if not loops:
# No extracted loops
pass
else:
loopflags = flags.copy()
# Do not recursively loop lift
loopflags.unset('enable_looplift')
loopdisp = looplifting.bind(loops, typingctx, targetctx, locals,
loopflags)
lifted = tuple(loopdisp)
cres = compile_bytecode(typingctx, targetctx, entry, args,
return_type, loopflags, locals,
lifted=lifted)
return cres._replace(lifted=lifted)
if status.use_python_mode:
# Object mode compilation
func, fnptr, lmod, lfunc, fndesc = py_lowering_stage(targetctx, interp,
flags.no_compile)
typemap = defaultdict(lambda: types.pyobject)
calltypes = defaultdict(lambda: types.pyobject)
return_type = types.pyobject
if len(args) != nargs:
# append missing
args = tuple(args) + (types.pyobject,) * (nargs - len(args))
else:
# Native mode compilation
func, fnptr, lmod, lfunc, fndesc = native_lowering_stage(targetctx,
interp,
typemap,
return_type,
calltypes,
flags.no_compile)
type_annotation = type_annotations.TypeAnnotation(interp=interp,
typemap=typemap,
calltypes=calltypes,
lifted=lifted)
if config.ANNOTATE:
print("ANNOTATION".center(80, '-'))
print(type_annotation)
print('=' * 80)
signature = typing.signature(return_type, *args)
assert lfunc.module is lmod
cr = compile_result(typing_context=typingctx,
target_context=targetctx,
entry_point=func,
entry_point_addr=fnptr,
typing_error=status.fail_reason,
type_annotation=type_annotation,
llvm_func=lfunc,
llvm_module=lmod,
signature=signature,
objectmode=status.use_python_mode,
lifted=lifted,
fndesc=fndesc,)
return cr
def _is_nopython_types(t):
return t != types.pyobject and not isinstance(t, types.Dummy)
def legalize_given_types(args, return_type):
# Filter argument types
for i, a in enumerate(args):
if not _is_nopython_types(a):
raise TypeError("Arg %d of %s is not legal in nopython "
"mode" % (i, a))
# Filter return type
if (return_type and return_type != types.none and
not _is_nopython_types(return_type)):
raise TypeError('Return type of %s is not legal in nopython '
'mode' % (return_type,))
def legalize_return_type(return_type, interp, targetctx):
"""
Only accept array return type iff it is passed into the function.
"""
assert assume.return_argument_array_only
if not isinstance(return_type, types.Array):
return
# Walk IR to discover all return statements
retstmts = []
for bid, blk in interp.blocks.items():
for inst in blk.body:
if isinstance(inst, ir.Return):
retstmts.append(inst)
assert retstmts, "No return statemants?"
# FIXME: In the future, we can return an array that is either a dynamically
# allocated array or an array that is passed as argument. This
# must be statically resolvable.
for ret in retstmts:
if ret.value.name not in interp.argspec.args:
raise ValueError("Only accept returning of array passed into the "
"function as argument")
# Legalized; tag return handling
targetctx.metadata['return.array'] = 'arg'
def translate_stage(bytecode):
interp = interpreter.Interpreter(bytecode=bytecode)
interp.interpret()
if config.DEBUG:
interp.dump()
for syn in interp.syntax_info:
print(syn)
interp.verify()
macro.expand_macros(interp.blocks)
if config.DEBUG:
interp.dump()
for syn in interp.syntax_info:
print(syn)
return interp
def type_inference_stage(typingctx, interp, args, return_type, locals={}):
if len(args) != len(interp.argspec.args):
raise TypeError("Mismatch number of argument types")
infer = typeinfer.TypeInferer(typingctx, interp.blocks)
# Seed argument types
for arg, ty in zip(interp.argspec.args, args):
infer.seed_type(arg, ty)
# Seed return type
if return_type is not None:
infer.seed_return(return_type)
# Seed local types
for k, v in locals.items():
infer.seed_type(k, v)
infer.build_constrain()
infer.propagate()
typemap, restype, calltypes = infer.unify()
if config.DEBUG:
pprint(typemap)
pprint(restype)
pprint(calltypes)
return typemap, restype, calltypes
def native_lowering_stage(targetctx, interp, typemap, restype, calltypes,
nocompile):
# Lowering
fndesc = lowering.describe_function(interp, typemap, restype, calltypes,
mangler=targetctx.mangler)
lower = lowering.Lower(targetctx, fndesc)
lower.lower()
if nocompile:
return None, 0, lower.module, lower.function, fndesc
else:
# Prepare for execution
cfunc, fnptr = targetctx.get_executable(lower.function, fndesc)
targetctx.insert_user_function(cfunc, fndesc)
return cfunc, fnptr, lower.module, lower.function, fndesc
def py_lowering_stage(targetctx, interp, nocompile):
# Optimize for python code
ir_optimize_for_py_stage(interp)
fndesc = lowering.describe_pyfunction(interp)
lower = lowering.PyLower(targetctx, fndesc)
lower.lower()
if nocompile:
return None, 0, lower.module, lower.function, fndesc
else:
cfunc, fnptr = targetctx.get_executable(lower.function, fndesc)
return cfunc, fnptr, lower.module, lower.function, fndesc
def ir_optimize_for_py_stage(interp):
"""
This passes breaks semantic for the type inferer but they reduces
refct calls for object mode.
"""
irpasses.RemoveRedundantAssign(interp).run()
if config.DEBUG:
print("ir optimize".center(80, '-'))
interp.dump()
########NEW FILE########
__FILENAME__ = config
from __future__ import print_function, division, absolute_import
import sys
import os
import warnings
def _readenv(name, ctor, default):
try:
res = os.environ[name]
except KeyError:
return default
else:
try:
return ctor(res)
except:
warnings.warn("environ %s defined but failed to parse '%s'" %
(name, res), RuntimeWarning)
return default
# Debug flag to control compiler debug print
DEBUG = _readenv("NUMBA_DEBUG", int, 0)
# JIT Debug flag to trigger IR instruction print
DEBUG_JIT = _readenv("NUMBA_DEBUG_JIT", int, 0)
# Optimization level
OPT = _readenv("NUMBA_OPT", int, 0)
# Force dump of LLVM IR
DUMP_LLVM = _readenv("NUMBA_DUMP_LLVM", int, DEBUG)
# Force dump of Optimized LLVM IR
DUMP_OPTIMIZED = _readenv("NUMBA_DUMP_OPTIMIZED", int, DEBUG)
# Force dump of generated assembly
DUMP_ASSEMBLY = _readenv("NUMBA_DUMP_ASSEMBLY", int, DEBUG)
# Force dump of type annotation
ANNOTATE = _readenv("NUMBA_DUMP_ANNOTATION", int, 0)
# Python version in (major, minor) tuple
PYVERSION = sys.version_info[:2]
########NEW FILE########
__FILENAME__ = controlflow
from __future__ import print_function, division, absolute_import
import functools
from numba import utils
NEW_BLOCKERS = frozenset(['SETUP_LOOP', 'FOR_ITER'])
class CFBlock(object):
def __init__(self, offset):
self.offset = offset
self.body = []
self.outgoing = set()
self.incoming = set()
self.terminating = False
def __repr__(self):
args = self.body, self.outgoing, self.incoming
return "block(body: %s, outgoing: %s, incoming: %s)" % args
def __iter__(self):
return iter(self.body)
class ControlFlowAnalysis(object):
"""
Attributes
----------
- bytecode
- blocks
- blockseq
- doms: dict of set
Dominators
- backbone: set of block offsets
The set of block that is common to all possible code path.
"""
def __init__(self, bytecode):
self.bytecode = bytecode
self.blocks = {}
self.liveblocks = {}
self.blockseq = []
self.doms = None
self.backbone = None
# Internal temp states
self._force_new_block = True
self._curblock = None
self._blockstack = []
self._loops = []
def iterblocks(self):
"""
Return all blocks in sequence of occurrence
"""
for i in self.blockseq:
yield self.blocks[i]
def iterliveblocks(self):
"""
Return all live blocks in sequence of occurrence
"""
for i in self.blockseq:
if i in self.liveblocks:
yield self.blocks[i]
def run(self):
for inst in self._iter_inst():
fname = "op_%s" % inst.opname
fn = getattr(self, fname, None)
if fn is not None:
fn(inst)
else:
assert not inst.is_jump, inst
# Close all blocks
for cur, nxt in zip(self.blockseq, self.blockseq[1:]):
blk = self.blocks[cur]
if not blk.outgoing and not blk.terminating:
blk.outgoing.add(nxt)
# Fill incoming
for b in utils.dict_itervalues(self.blocks):
for out in b.outgoing:
self.blocks[out].incoming.add(b.offset)
# Find liveblocks
self.dead_block_elimin()
# Find dominators
self.doms = find_dominators(self.liveblocks)
for lastblk in reversed(self.blockseq):
if lastblk in self.liveblocks:
break
else:
raise AssertionError("No live block that exits!?")
# Find backbone
backbone = set(self.doms[lastblk])
# Filter out in loop blocks (Assuming no other cyclic control blocks)
inloopblocks = set()
for b in self.blocks.keys():
for s, e in self._loops:
if s <= b < e:
inloopblocks.add(b)
self.backbone = backbone - inloopblocks
def dead_block_elimin(self):
firstblk = min(self.blocks.keys())
liveset = set([firstblk])
pending = set([firstblk])
finished = set()
while pending:
cur = pending.pop()
blk = self.blocks[cur]
outgoing = set(blk.outgoing)
liveset |= outgoing
pending |= outgoing - finished
finished.add(cur)
for offset in liveset:
self.liveblocks[offset] = self.blocks[offset]
def jump(self, target):
self._curblock.outgoing.add(target)
def _iter_inst(self):
for inst in self.bytecode:
if self._use_new_block(inst):
self._start_new_block(inst)
self._curblock.body.append(inst.offset)
yield inst
def _use_new_block(self, inst):
if inst.offset in self.bytecode.labels:
res = True
elif inst.opname in NEW_BLOCKERS:
res = True
else:
res = self._force_new_block
self._force_new_block = False
return res
def _start_new_block(self, inst):
self._curblock = CFBlock(inst.offset)
self.blocks[inst.offset] = self._curblock
self.blockseq.append(inst.offset)
def op_SETUP_LOOP(self, inst):
end = inst.get_jump_target()
self._blockstack.append(end)
self._loops.append((inst.offset, end))
def op_POP_BLOCK(self, inst):
self._blockstack.pop()
def op_FOR_ITER(self, inst):
self.jump(inst.get_jump_target())
self.jump(inst.next)
self._force_new_block = True
def _op_ABSOLUTE_JUMP_IF(self, inst):
self.jump(inst.get_jump_target())
self.jump(inst.next)
self._force_new_block = True
op_POP_JUMP_IF_FALSE = _op_ABSOLUTE_JUMP_IF
op_POP_JUMP_IF_TRUE = _op_ABSOLUTE_JUMP_IF
op_JUMP_IF_FALSE = _op_ABSOLUTE_JUMP_IF
op_JUMP_IF_TRUE = _op_ABSOLUTE_JUMP_IF
op_JUMP_IF_FALSE_OR_POP = _op_ABSOLUTE_JUMP_IF
op_JUMP_IF_TRUE_OR_POP = _op_ABSOLUTE_JUMP_IF
def op_JUMP_ABSOLUTE(self, inst):
self.jump(inst.get_jump_target())
self._force_new_block = True
def op_JUMP_FORWARD(self, inst):
self.jump(inst.get_jump_target())
self._force_new_block = True
def op_RETURN_VALUE(self, inst):
self._curblock.terminating = True
self._force_new_block = True
def op_BREAK_LOOP(self, inst):
self.jump(self._blockstack[-1])
self._force_new_block = True
def find_dominators(blocks):
firstblk = min(blocks.keys())
doms = {}
for b in blocks:
doms[b] = set()
doms[firstblk].add(firstblk)
allblks = set(blocks)
remainblks = frozenset(blk.offset
for blk in utils.dict_values(blocks)
if blk.offset != firstblk)
for blk in remainblks:
doms[blk] |= allblks
changed = True
while changed:
changed = False
for blk in remainblks:
d = doms[blk]
ps = [doms[p] for p in blocks[blk].incoming if p in doms]
if not ps:
p = set()
else:
p = functools.reduce(set.intersection, ps)
new = set([blk]) | p
if new != d:
doms[blk] = new
changed = True
return doms
########NEW FILE########
__FILENAME__ = ctypes_support
"""
This file fixes portability issues for ctypes
"""
from __future__ import absolute_import
from numba.config import PYVERSION
from ctypes import *
if PYVERSION <= (2, 7):
c_ssize_t = {
4: c_int32,
8: c_int64,
}[sizeof(c_size_t)]
########NEW FILE########
__FILENAME__ = ctypes_utils
"""
This file fixes portability issues for ctypes
"""
from __future__ import absolute_import
import ctypes
from numba import types, typing
CTYPES_MAP = {
None: types.none,
ctypes.c_int8: types.int8,
ctypes.c_int16: types.int16,
ctypes.c_int32: types.int32,
ctypes.c_int64: types.int64,
ctypes.c_uint8: types.uint8,
ctypes.c_uint16: types.uint16,
ctypes.c_uint32: types.uint32,
ctypes.c_uint64: types.uint64,
ctypes.c_float: types.float32,
ctypes.c_double: types.float64,
ctypes.c_void_p: types.voidptr,
}
def convert_ctypes(ctypeobj):
try:
return CTYPES_MAP[ctypeobj]
except KeyError:
raise TypeError("unhandled ctypes type: %s" % ctypeobj)
def is_ctypes_funcptr(obj):
try:
# Is it something of which we can get the address
ctypes.cast(obj, ctypes.c_void_p)
except ctypes.ArgumentError:
return False
else:
# Does it define argtypes and restype
return hasattr(obj, 'argtypes') and hasattr(obj, 'restype')
def make_function_type(cfnptr):
cargs = [convert_ctypes(a)
for a in cfnptr.argtypes]
cret = convert_ctypes(cfnptr.restype)
cases = [typing.signature(cret, *cargs)]
template = typing.make_concrete_template("CFuncPtr", cfnptr, cases)
pointer = ctypes.cast(cfnptr, ctypes.c_void_p).value
return types.FunctionPointer(template, pointer)
########NEW FILE########
__FILENAME__ = api
"""
API that are reported to numba.cuda
"""
from __future__ import print_function, absolute_import
import contextlib
import numpy as np
from .cudadrv import devicearray, devices, driver
try:
long
except NameError:
long = int
# NDarray device helper
require_context = devices.require_context
current_context = devices.get_context
@require_context
def to_device(ary, stream=0, copy=True, to=None):
"""to_device(ary, stream=0, copy=True, to=None)
Allocate and transfer a numpy ndarray to the device.
To copy host->device a numpy array::
ary = numpy.arange(10)
d_ary = cuda.to_device(ary)
To enqueue the transfer to a stream::
stream = cuda.stream()
d_ary = cuda.to_device(ary, stream=stream)
The resulting ``d_ary`` is a ``DeviceNDArray``.
To copy device->host::
hary = d_ary.copy_to_host()
To copy device->host to an existing array::
ary = numpy.empty(shape=d_ary.shape, dtype=d_ary.dtype)
d_ary.copy_to_host(ary)
To enqueue the transfer to a stream::
hary = d_ary.copy_to_host(stream=stream)
"""
if to is None:
devarray = devicearray.from_array_like(ary, stream=stream)
else:
devarray = to
if copy:
devarray.copy_to_device(ary, stream=stream)
return devarray
@require_context
def device_array(shape, dtype=np.float, strides=None, order='C', stream=0):
"""device_array(shape, dtype=np.float, strides=None, order='C', stream=0)
Allocate an empty device ndarray. Similar to numpy.empty()
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
return devicearray.DeviceNDArray(shape=shape, strides=strides, dtype=dtype,
stream=stream)
@require_context
def pinned_array(shape, dtype=np.float, strides=None, order='C'):
"""pinned_array(shape, dtype=np.float, strides=None, order='C')
Allocate a numpy.ndarray with a buffer that is pinned (pagelocked).
Similar to numpy.empty().
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides,
dtype.itemsize)
buffer = current_context().memhostalloc(bytesize)
return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
@require_context
def mapped_array(shape, dtype=np.float, strides=None, order='C', stream=0,
portable=False, wc=False):
"""mapped_array(shape, dtype=np.float, strides=None, order='C', stream=0, portable=False, wc=False)
Allocate a mapped ndarray with a buffer that is pinned and mapped on
to the device. Similar to numpy.empty()
:param portable: a boolean flag to allow the allocated device memory to be
usable in multiple devices.
:param wc: a boolean flag to enable writecombined allocation which is faster
to write by the host and to read by the device, but slower to
write by the host and slower to write by the device.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
buffer = current_context().memhostalloc(bytesize, mapped=True)
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
mappedview = np.ndarray.view(npary, type=devicearray.MappedNDArray)
mappedview.device_setup(buffer, stream=stream)
return mappedview
def synchronize():
"Synchronize current context"
return current_context().synchronize()
def _prepare_shape_strides_dtype(shape, strides, dtype, order):
dtype = np.dtype(dtype)
if isinstance(shape, (int, long)):
shape = (shape,)
if isinstance(strides, (int, long)):
strides = (strides,)
else:
if shape == ():
shape = (1,)
strides = strides or _fill_stride_by_order(shape, dtype, order)
return shape, strides, dtype
def _fill_stride_by_order(shape, dtype, order):
nd = len(shape)
strides = [0] * nd
if order == 'C':
strides[-1] = dtype.itemsize
for d in reversed(range(nd - 1)):
strides[d] = strides[d + 1] * shape[d + 1]
elif order == 'F':
strides[0] = dtype.itemsize
for d in range(1, nd):
strides[d] = strides[d - 1] * shape[d - 1]
else:
raise ValueError('must be either C/F order')
return tuple(strides)
def device_array_like(ary, stream=0):
"""Call cuda.devicearray() with information from the array.
"""
return device_array(shape=ary.shape, dtype=ary.dtype,
strides=ary.strides, stream=stream)
# Stream helper
@require_context
def stream():
"""stream()
Create a CUDA stream that represents a command queue for the device.
"""
return current_context().create_stream()
# Page lock
@require_context
@contextlib.contextmanager
def pinned(*arylist):
"""A context manager for temporary pinning a sequence of host ndarrays.
"""
pmlist = []
for ary in arylist:
pm = current_context().mempin(ary, driver.host_pointer(ary),
driver.host_memory_size(ary),
mapped=False)
pmlist.append(pm)
yield
del pmlist
@require_context
@contextlib.contextmanager
def mapped(*arylist, **kws):
"""A context manager for temporarily mapping a sequence of host ndarrays.
"""
assert not kws or 'stream' in kws, "Only accept 'stream' as keyword."
pmlist = []
stream = kws.get('stream', 0)
for ary in arylist:
pm = current_context.mempin(ary, driver.host_pointer(ary),
driver.host_memory_size(ary),
mapped=True)
pmlist.append(pm)
devarylist = []
for ary, pm in zip(arylist, pmlist):
devary = devicearray.from_array_like(ary, gpu_data=pm, stream=stream)
devarylist.append(devary)
if len(devarylist) == 1:
yield devarylist[0]
else:
yield devarylist
def event(timing=True):
"""Create a CUDA event.
"""
evt = current_context().create_event(timing=timing)
return evt
# Device selection
def select_device(device_id):
"""Creates a new CUDA context with the selected device.
The context is associated with the current thread.
NumbaPro currently allows only one context per thread.
Returns a device instance
Raises exception on error.
"""
context = devices.get_context(device_id)
return context.device
def get_current_device():
"Get current device associated with the current thread"
return current_context().device
def list_devices():
"List all CUDA devices"
devices.init_gpus()
return devices.gpus
def close():
"""Explicitly closes the context.
Destroy the current context of the current thread
"""
devices.reset()
def _auto_device(ary, stream=0, copy=True):
return devicearray.auto_device(ary, stream=stream, copy=copy)
def detect():
"""Detect hardware support
"""
devlist = list_devices()
print('Found %d CUDA devices' % len(devlist))
supported_count = 0
for dev in devlist:
attrs = []
cc = dev.compute_capability
attrs += [('compute capability', '%d.%d' % cc)]
attrs += [('pci device id', dev.PCI_DEVICE_ID)]
attrs += [('pci bus id', dev.PCI_BUS_ID)]
if cc < (2, 0):
support = '[NOT SUPPORTED: CC < 2.0]'
else:
support = '[SUPPORTED]'
supported_count += 1
print('id %d %20s %40s' % (dev.id, dev.name, support))
for key, val in attrs:
print('%40s: %s' % (key, val))
print('Summary:')
print('\t%d/%d devices are supported' % (supported_count, len(devlist)))
return supported_count > 0
@contextlib.contextmanager
def defer_cleanup():
tserv = get_current_device().trashing
with tserv.defer_cleanup:
yield
# TODO
# Temporary entry-point to debug a failure for nvidia profiling tools to
# record any kind of events. Manually invocation of _profile_stop seems to be
# necessary only on windows.
# Should we make cuda.close() call _profile_stop()?
_profiling = require_context(driver.profiling)
_profile_start = require_context(driver.profile_start)
_profile_stop = require_context(driver.profile_stop)
########NEW FILE########
__FILENAME__ = compiler
from __future__ import absolute_import, print_function
import copy
import ctypes
from numba import compiler, types
from numba.typing.templates import ConcreteTemplate
from numba import typing, lowering, dispatcher
from .cudadrv.devices import get_context
from .cudadrv import nvvm, devicearray, driver
def compile_cuda(pyfunc, return_type, args, debug):
# First compilation will trigger the initialization of the CUDA backend.
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
# TODO handle debug flag
flags = compiler.Flags()
# Do not compile (generate native code), just lower (to LLVM)
flags.set('no_compile')
# Run compilation pipeline
cres = compiler.compile_extra(typingctx=typingctx,
targetctx=targetctx,
func=pyfunc,
args=args,
return_type=return_type,
flags=flags,
locals={})
# Linking depending libraries
targetctx.link_dependencies(cres.llvm_module, cres.target_context.linking)
# Fix global naming
for gv in cres.llvm_module.global_variables:
if '.' in gv.name:
gv.name = gv.name.replace('.', '_')
return cres
def compile_kernel(pyfunc, args, link, debug=False):
cres = compile_cuda(pyfunc, types.void, args, debug=debug)
kernel = cres.target_context.prepare_cuda_kernel(cres.llvm_func,
cres.signature.args)
cres = cres._replace(llvm_func=kernel)
cukern = CUDAKernel(llvm_module=cres.llvm_module,
name=cres.llvm_func.name,
argtypes=cres.signature.args,
link=link)
return cukern
def compile_device(pyfunc, return_type, args, inline=True, debug=False):
cres = compile_cuda(pyfunc, return_type, args, debug=debug)
devfn = DeviceFunction(cres)
class device_function_template(ConcreteTemplate):
key = devfn
cases = [cres.signature]
cres.typing_context.insert_user_function(devfn, device_function_template)
libs = [cres.llvm_module]
cres.target_context.insert_user_function(devfn, cres.fndesc, libs)
return devfn
def declare_device_function(name, restype, argtypes):
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
sig = typing.signature(restype, *argtypes)
extfn = ExternFunction(name, sig)
class device_function_template(ConcreteTemplate):
key = extfn
cases = [sig]
fndesc = lowering.describe_external(name=name, restype=restype,
argtypes=argtypes)
typingctx.insert_user_function(extfn, device_function_template)
targetctx.insert_user_function(extfn, fndesc)
return extfn
class DeviceFunction(object):
def __init__(self, cres):
self.cres = cres
class ExternFunction(object):
def __init__(self, name, sig):
self.name = name
self.sig = sig
class CUDARuntimeError(RuntimeError):
def __init__(self, exc, tx, ty, tz, bx, by):
self.tid = tx, ty, tz
self.ctaid = bx, by
self.exc = exc
t = ("An exception was raised in thread=%s block=%s\n"
"\t%s: %s")
msg = t % (self.tid, self.ctaid, type(self.exc).__name__, self.exc)
super(CUDARuntimeError, self).__init__(msg)
class CUDAKernelBase(object):
"""Define interface for configurable kernels
"""
def __init__(self):
self.griddim = (1, 1)
self.blockdim = (1, 1, 1)
self.sharedmem = 0
self.stream = 0
def copy(self):
return copy.copy(self)
def configure(self, griddim, blockdim, stream=0, sharedmem=0):
if not isinstance(griddim, (tuple, list)):
griddim = [griddim]
else:
griddim = list(griddim)
if len(griddim) > 2:
raise ValueError('griddim must be a tuple/list of two ints')
while len(griddim) < 2:
griddim.append(1)
if not isinstance(blockdim, (tuple, list)):
blockdim = [blockdim]
else:
blockdim = list(blockdim)
if len(blockdim) > 3:
raise ValueError('blockdim must be tuple/list of three ints')
while len(blockdim) < 3:
blockdim.append(1)
clone = self.copy()
clone.griddim = tuple(griddim)
clone.blockdim = tuple(blockdim)
clone.stream = stream
clone.sharedmem = sharedmem
return clone
def __getitem__(self, args):
if len(args) not in [2, 3, 4]:
raise ValueError('must specify at least the griddim and blockdim')
return self.configure(*args)
class CachedPTX(object):
"""A PTX cache that uses compute capability as a cache key
"""
def __init__(self, llvmir):
self.llvmir = llvmir
self.cache = {}
def get(self):
"""
Get PTX for the current active context.
"""
cuctx = get_context()
device = cuctx.device
cc = device.compute_capability
ptx = self.cache.get(cc)
if ptx is None:
arch = nvvm.get_arch_option(*cc)
ptx = nvvm.llvm_to_ptx(self.llvmir, opt=3, arch=arch)
self.cache[cc] = ptx
return ptx
class CachedCUFunction(object):
"""
Get or compile CUDA function for the current active context
Uses device ID as key for cache.
"""
def __init__(self, entry_name, ptx, linking):
self.entry_name = entry_name
self.ptx = ptx
self.linking = linking
self.cache = {}
self.ccinfos = {}
def get(self):
cuctx = get_context()
device = cuctx.device
cufunc = self.cache.get(device.id)
if cufunc is None:
ptx = self.ptx.get()
# Link
linker = driver.Linker()
linker.add_ptx(ptx)
for path in self.linking:
linker.add_file_guess_ext(path)
cubin, _size = linker.complete()
compile_info = linker.info_log
module = cuctx.create_module_image(cubin)
# Load
cufunc = module.get_function(self.entry_name)
self.cache[device.id] = cufunc
self.ccinfos[device.id] = compile_info
return cufunc
class CUDAKernel(CUDAKernelBase):
def __init__(self, llvm_module, name, argtypes, link=()):
super(CUDAKernel, self).__init__()
self.entry_name = name
self.argument_types = tuple(argtypes)
self.linking = tuple(link)
ptx = CachedPTX(str(llvm_module))
self._func = CachedCUFunction(self.entry_name, ptx, link)
def __call__(self, *args, **kwargs):
assert not kwargs
self._kernel_call(args=args,
griddim=self.griddim,
blockdim=self.blockdim,
stream=self.stream,
sharedmem=self.sharedmem)
def bind(self):
"""
Force binding to current CUDA context
"""
self._func.get()
@property
def ptx(self):
return self._func.ptx.get().decode('utf8')
def _kernel_call(self, args, griddim, blockdim, stream=0, sharedmem=0):
# Prepare kernel
cufunc = self._func.get()
# Prepare arguments
retr = [] # hold functors for writeback
args = [self._prepare_args(t, v, stream, retr)
for t, v in zip(self.argument_types, args)]
# Configure kernel
cu_func = cufunc.configure(griddim, blockdim,
stream=stream,
sharedmem=sharedmem)
# invoke kernel
cu_func(*args)
# retrieve auto converted arrays
for wb in retr:
wb()
def _prepare_args(self, ty, val, stream, retr):
if isinstance(ty, types.Array):
devary, conv = devicearray.auto_device(val, stream=stream)
if conv:
retr.append(lambda: devary.copy_to_host(val, stream=stream))
return devary.as_cuda_arg()
elif isinstance(ty, types.Integer):
return getattr(ctypes, "c_%s" % ty)(val)
elif ty == types.float64:
return ctypes.c_double(val)
elif ty == types.float32:
return ctypes.c_float(val)
elif ty == types.boolean:
return ctypes.c_uint8(int(val))
elif ty == types.complex64:
ctx = get_context()
size = ctypes.sizeof(Complex64)
dmem = ctx.memalloc(size)
cval = Complex64(val)
driver.host_to_device(dmem, ctypes.addressof(cval), size,
stream=stream)
return dmem
elif ty == types.complex128:
ctx = get_context()
size = ctypes.sizeof(Complex128)
dmem = ctx.memalloc(size)
cval = Complex128(val)
driver.host_to_device(dmem, ctypes.addressof(cval), size,
stream=stream)
return dmem
else:
raise NotImplementedError(ty, val)
class Complex(ctypes.Structure):
def __init__(self, val):
super(Complex, self).__init__()
if isinstance(val, complex):
self.real = val.real
self.imag = val.imag
else:
self.real = val
class Complex64(Complex):
_fields_ = [
('real', ctypes.c_float),
('imag', ctypes.c_float)
]
class Complex128(Complex):
_fields_ = [
('real', ctypes.c_double),
('imag', ctypes.c_double),
]
class AutoJitCUDAKernel(CUDAKernelBase):
def __init__(self, func, bind):
super(AutoJitCUDAKernel, self).__init__()
self.py_func = func
self.bind = bind
self.definitions = {}
def __call__(self, *args):
argtypes = tuple([dispatcher.typeof_pyval(a) for a in args])
kernel = self.definitions.get(argtypes)
if kernel is None:
kernel = compile_kernel(self.py_func, argtypes, link=())
self.definitions[argtypes] = kernel
if self.bind:
kernel.bind()
cfg = kernel[self.griddim, self.blockdim, self.stream, self.sharedmem]
cfg(*args)
########NEW FILE########
__FILENAME__ = cudadecl
from __future__ import print_function, division, absolute_import
from numba import types
from numba.typing.templates import (AttributeTemplate, ConcreteTemplate,
AbstractTemplate, MacroTemplate,
signature, Registry)
from numba import cuda
registry = Registry()
intrinsic = registry.register
intrinsic_attr = registry.register_attr
intrinsic_global = registry.register_global
class Cuda_grid(MacroTemplate):
key = cuda.grid
class Cuda_threadIdx_x(MacroTemplate):
key = cuda.threadIdx.x
class Cuda_threadIdx_y(MacroTemplate):
key = cuda.threadIdx.y
class Cuda_threadIdx_z(MacroTemplate):
key = cuda.threadIdx.z
class Cuda_blockIdx_x(MacroTemplate):
key = cuda.blockIdx.x
class Cuda_blockIdx_y(MacroTemplate):
key = cuda.blockIdx.y
class Cuda_blockIdx_z(MacroTemplate):
key = cuda.blockIdx.z
class Cuda_blockDim_x(MacroTemplate):
key = cuda.blockDim.x
class Cuda_blockDim_y(MacroTemplate):
key = cuda.blockDim.y
class Cuda_blockDim_z(MacroTemplate):
key = cuda.blockDim.z
class Cuda_gridDim_x(MacroTemplate):
key = cuda.gridDim.x
class Cuda_gridDim_y(MacroTemplate):
key = cuda.gridDim.y
class Cuda_gridDim_z(MacroTemplate):
key = cuda.gridDim.z
class Cuda_shared_array(MacroTemplate):
key = cuda.shared.array
class Cuda_local_array(MacroTemplate):
key = cuda.local.array
class Cuda_const_arraylike(MacroTemplate):
key = cuda.const.array_like
@intrinsic
class Cuda_syncthreads(ConcreteTemplate):
key = cuda.syncthreads
cases = [signature(types.none)]
@intrinsic
class Cuda_atomic_add(AbstractTemplate):
key = cuda.atomic.add
def generic(self, args, kws):
assert not kws
ary, idx, val = args
if ary.ndim == 1:
return signature(ary.dtype, ary, types.intp, ary.dtype)
elif ary.ndim > 1:
return signature(ary.dtype, ary, idx, ary.dtype)
@intrinsic_attr
class Cuda_threadIdx(AttributeTemplate):
key = types.Module(cuda.threadIdx)
def resolve_x(self, mod):
return types.Macro(Cuda_threadIdx_x)
def resolve_y(self, mod):
return types.Macro(Cuda_threadIdx_y)
def resolve_z(self, mod):
return types.Macro(Cuda_threadIdx_z)
@intrinsic_attr
class Cuda_blockIdx(AttributeTemplate):
key = types.Module(cuda.blockIdx)
def resolve_x(self, mod):
return types.Macro(Cuda_blockIdx_x)
def resolve_y(self, mod):
return types.Macro(Cuda_blockIdx_y)
def resolve_z(self, mod):
return types.Macro(Cuda_blockIdx_z)
@intrinsic_attr
class Cuda_blockDim(AttributeTemplate):
key = types.Module(cuda.blockDim)
def resolve_x(self, mod):
return types.Macro(Cuda_blockDim_x)
def resolve_y(self, mod):
return types.Macro(Cuda_blockDim_y)
def resolve_z(self, mod):
return types.Macro(Cuda_blockDim_z)
@intrinsic_attr
class Cuda_gridDim(AttributeTemplate):
key = types.Module(cuda.gridDim)
def resolve_x(self, mod):
return types.Macro(Cuda_gridDim_x)
def resolve_y(self, mod):
return types.Macro(Cuda_gridDim_y)
def resolve_z(self, mod):
return types.Macro(Cuda_gridDim_z)
@intrinsic_attr
class CudaSharedModuleTemplate(AttributeTemplate):
key = types.Module(cuda.shared)
def resolve_array(self, mod):
return types.Macro(Cuda_shared_array)
@intrinsic_attr
class CudaConstModuleTemplate(AttributeTemplate):
key = types.Module(cuda.const)
def resolve_array_like(self, mod):
return types.Macro(Cuda_const_arraylike)
@intrinsic_attr
class CudaLocalModuleTemplate(AttributeTemplate):
key = types.Module(cuda.local)
def resolve_array(self, mod):
return types.Macro(Cuda_local_array)
@intrinsic_attr
class CudaAtomicTemplate(AttributeTemplate):
key = types.Module(cuda.atomic)
def resolve_add(self, mod):
return types.Function(Cuda_atomic_add)
@intrinsic_attr
class CudaModuleTemplate(AttributeTemplate):
key = types.Module(cuda)
def resolve_grid(self, mod):
return types.Macro(Cuda_grid)
def resolve_threadIdx(self, mod):
return types.Module(cuda.threadIdx)
def resolve_blockIdx(self, mod):
return types.Module(cuda.blockIdx)
def resolve_blockDim(self, mod):
return types.Module(cuda.blockDim)
def resolve_gridDim(self, mod):
return types.Module(cuda.gridDim)
def resolve_shared(self, mod):
return types.Module(cuda.shared)
def resolve_syncthreads(self, mod):
return types.Function(Cuda_syncthreads)
def resolve_atomic(self, mod):
return types.Module(cuda.atomic)
def resolve_const(self, mod):
return types.Module(cuda.const)
def resolve_local(self, mod):
return types.Module(cuda.local)
intrinsic_global(cuda, types.Module(cuda))
## Forces the use of the cuda namespace by not recognizing individual the
## following as globals.
# intrinsic_global(cuda.grid, types.Function(Cuda_grid))
# intrinsic_global(cuda.threadIdx, types.Module(cuda.threadIdx))
# intrinsic_global(cuda.shared, types.Module(cuda.shared))
# intrinsic_global(cuda.shared.array, types.Function(Cuda_shared_array))
# intrinsic_global(cuda.syncthreads, types.Function(Cuda_syncthreads))
# intrinsic_global(cuda.atomic, types.Module(cuda.atomic))
########NEW FILE########
__FILENAME__ = devicearray
"""
A CUDA ND Array is recognized by checking the __cuda_memory__ attribute
on the object. If it exists and evaluate to True, it must define shape,
strides, dtype and size attributes similar to a NumPy ndarray.
"""
from __future__ import print_function, absolute_import, division
import warnings
import math
import numpy as np
from .ndarray import (ndarray_populate_head, ArrayHeaderManager)
from . import driver as _driver
from . import devices
from numba import dummyarray
try:
long
except NameError:
long = int
def is_cuda_ndarray(obj):
"Check if an object is a CUDA ndarray"
return getattr(obj, '__cuda_ndarray__', False)
def verify_cuda_ndarray_interface(obj):
"Verify the CUDA ndarray interface for an obj"
require_cuda_ndarray(obj)
def requires_attr(attr, typ):
if not hasattr(obj, attr):
raise AttributeError(attr)
if not isinstance(getattr(obj, attr), typ):
raise AttributeError('%s must be of type %s' % (attr, typ))
requires_attr('shape', tuple)
requires_attr('strides', tuple)
requires_attr('dtype', np.dtype)
requires_attr('size', (int, long))
def require_cuda_ndarray(obj):
"Raises ValueError is is_cuda_ndarray(obj) evaluates False"
if not is_cuda_ndarray(obj):
raise ValueError('require an cuda ndarray object')
class DeviceNDArrayBase(object):
"""A on GPU NDArray representation
"""
__cuda_memory__ = True
__cuda_ndarray__ = True # There must be a gpu_head and gpu_data attribute
def __init__(self, shape, strides, dtype, stream=0, writeback=None,
gpu_head=None, gpu_data=None):
"""
Args
----
shape
array shape.
strides
array strides.
dtype
data type as numpy.dtype.
stream
cuda stream.
writeback
Deprecated.
gpu_head
user provided device memory for the ndarray head structure
gpu_data
user provided device memory for the ndarray data buffer
"""
if isinstance(shape, (int, long)):
shape = (shape,)
if isinstance(strides, (int, long)):
strides = (strides,)
self.ndim = len(shape)
if len(strides) != self.ndim:
raise ValueError('strides not match ndim')
self._dummy = dummyarray.Array.from_desc(0, shape, strides,
dtype.itemsize)
self.shape = tuple(shape)
self.strides = tuple(strides)
self.dtype = np.dtype(dtype)
self.size = int(np.prod(self.shape))
# prepare gpu memory
if gpu_data is None:
self.alloc_size = _driver.memory_size_from_info(self.shape,
self.strides,
self.dtype.itemsize)
gpu_data = devices.get_context().memalloc(self.alloc_size)
else:
self.alloc_size = _driver.device_memory_size(gpu_data)
self.gpu_mem = ArrayHeaderManager(devices.get_context())
if gpu_head is None:
gpu_head = self.gpu_mem.allocate(self.ndim)
ndarray_populate_head(gpu_head, gpu_data, self.shape,
self.strides, stream=stream)
self.gpu_head = gpu_head
self.gpu_data = gpu_data
self.__writeback = writeback # should deprecate the use of this
def __del__(self):
try:
self.gpu_mem.free(self.gpu_head)
except:
pass
@property
def device_ctypes_pointer(self):
"""Returns the ctypes pointer to the GPU data buffer
"""
return self.gpu_data.device_ctypes_pointer
def copy_to_device(self, ary, stream=0):
"""Copy `ary` to `self`.
If `ary` is a CUDA memory, perform a device-to-device transfer.
Otherwise, perform a a host-to-device transfer.
"""
if _driver.is_device_memory(ary):
sz = min(_driver.device_memory_size(self),
_driver.device_memory_size(ary))
_driver.device_to_device(self, ary, sz, stream=stream)
else:
sz = min(_driver.host_memory_size(ary), self.alloc_size)
_driver.host_to_device(self, ary, sz, stream=stream)
def copy_to_host(self, ary=None, stream=0):
"""Copy ``self`` to ``ary`` or create a new numpy ndarray
if ``ary`` is ``None``.
Always returns the host array.
"""
if ary is None:
hostary = np.empty(shape=self.alloc_size, dtype=np.byte)
else:
if ary.dtype != self.dtype:
raise TypeError('incompatible dtype')
if ary.shape != self.shape:
scalshapes = (), (1,)
if not (ary.shape in scalshapes and self.shape in scalshapes):
raise TypeError('incompatible shape; device %s; host %s' %
(self.shape, ary.shape))
if ary.strides != self.strides:
scalstrides = (), (self.dtype.itemsize,)
if not (ary.strides in scalstrides and
self.strides in scalstrides):
raise TypeError('incompatible strides; device %s; host %s' %
(self.strides, ary.strides))
hostary = ary
_driver.device_to_host(hostary, self, self.alloc_size, stream=stream)
if ary is None:
hostary = np.ndarray(shape=self.shape, strides=self.strides,
dtype=self.dtype, buffer=hostary)
return hostary
def to_host(self, stream=0):
warnings.warn("to_host() is deprecated and will be removed",
DeprecationWarning)
if self.__writeback is None:
raise ValueError("no associated writeback array")
self.copy_to_host(self.__writeback, stream=stream)
def split(self, section, stream=0):
"""Split the array into equal partition of the `section` size.
If the array cannot be equally divided, the last section will be
smaller.
"""
if self.ndim != 1:
raise ValueError("only support 1d array")
if self.strides[0] != self.dtype.itemsize:
raise ValueError("only support unit stride")
nsect = int(math.ceil(float(self.size) / section))
strides = self.strides
itemsize = self.dtype.itemsize
for i in range(nsect):
begin = i * section
end = min(begin + section, self.size)
shape = (end - begin,)
gpu_data = self.gpu_data.view(begin * itemsize, end * itemsize)
yield DeviceNDArray(shape, strides, dtype=self.dtype, stream=stream,
gpu_data=gpu_data)
def as_cuda_arg(self):
"""Returns a device memory object that is used as the argument.
"""
return self.gpu_head
class DeviceNDArray(DeviceNDArrayBase):
def is_f_contiguous(self):
return self._dummy.is_f_contig
def is_c_contiguous(self):
return self._dummy.is_c_contig
def reshape(self, *newshape, **kws):
"""reshape(self, *newshape, order='C'):
Reshape the array and keeping the original data
"""
newarr, extents = self._dummy.reshape(*newshape, **kws)
cls = type(self)
if extents == [self._dummy.extent]:
return cls(shape=newarr.shape, strides=newarr.strides,
dtype=self.dtype, gpu_data=self.gpu_data)
else:
raise NotImplementedError("operation requires copying")
def ravel(self, order='C', stream=0):
cls = type(self)
newarr, extents = self._dummy.ravel(order=order)
if extents == [self._dummy.extent]:
return cls(shape=newarr.shape, strides=newarr.strides,
dtype=self.dtype, gpu_data=self.gpu_data,
gpu_head=self.gpu_head, stream=stream)
else:
raise NotImplementedError("operation requires copying")
def __getitem__(self, item):
arr = self._dummy.__getitem__(item)
extents = list(arr.iter_contiguous_extent())
cls = type(self)
if len(extents) == 1:
newdata = self.gpu_data.view(*extents[0])
if dummyarray.is_element_indexing(item, self.ndim):
hostary = np.empty(1, dtype=self.dtype)
_driver.device_to_host(dst=hostary, src=newdata,
size=self._dummy.itemsize)
return hostary[0]
else:
return cls(shape=arr.shape, strides=arr.strides,
dtype=self.dtype, gpu_data=newdata)
else:
newdata = self.gpu_data.view(*arr.extent)
return cls(shape=arr.shape, strides=arr.strides,
dtype=self.dtype, gpu_data=newdata)
class MappedNDArray(DeviceNDArrayBase, np.ndarray):
"""
A host array that uses CUDA mapped memory.
"""
def device_setup(self, gpu_data, stream=0):
self.gpu_mem = ArrayHeaderManager(devices.get_context())
gpu_head = self.gpu_mem.allocate(self.ndim)
ndarray_populate_head(gpu_head, gpu_data, self.shape,
self.strides, stream=stream)
self.gpu_data = gpu_data
self.gpu_head = gpu_head
def __del__(self):
try:
self.gpu_mem.free(self.gpu_head)
except:
pass
def from_array_like(ary, stream=0, gpu_head=None, gpu_data=None):
"Create a DeviceNDArray object that is like ary."
if ary.ndim == 0:
ary = ary.reshape(1)
return DeviceNDArray(ary.shape, ary.strides, ary.dtype,
writeback=ary, stream=stream, gpu_head=gpu_head,
gpu_data=gpu_data)
errmsg_contiguous_buffer = ("Array contains non-contiguous buffer and cannot "
"be transferred as a single memory region. Please "
"ensure contiguous buffer with numpy "
".ascontiguousarray()")
def sentry_contiguous(ary):
if not ary.flags['C_CONTIGUOUS'] and not ary.flags['F_CONTIGUOUS']:
if ary.ndim != 1 or ary.shape[0] != 1 or ary.strides[0] != 0:
raise ValueError(errmsg_contiguous_buffer)
def auto_device(ary, stream=0, copy=True):
if _driver.is_device_memory(ary):
return ary, False
else:
sentry_contiguous(ary)
devarray = from_array_like(ary, stream=stream)
if copy:
devarray.copy_to_device(ary, stream=stream)
return devarray, True
########NEW FILE########
__FILENAME__ = devices
"""
Expose each GPU devices directly
"""
from __future__ import print_function, absolute_import, division
import functools
from numba import servicelib
from .driver import driver
gpus = []
def init_gpus():
"""
Populates global "gpus" as a list of GPU objects
"""
if gpus:
assert len(gpus)
return
for num in range(driver.get_device_count()):
device = driver.get_device(num)
gpu = GPU(device)
gpus.append(gpu)
globals()['gpu%d' % num] = gpu
class GPU(object):
"""Proxy into driver.Device
"""
def __init__(self, gpu):
self._gpu = gpu
self._context = None
def __getattr__(self, key):
"""Redirect to self._gpu
"""
if key.startswith('_'):
raise AttributeError(key)
return getattr(self._gpu, key)
def __repr__(self):
return repr(self._gpu)
@property
def context(self):
if self._context is None:
self._context = self._gpu.create_context()
return self._context
def pop(self):
self._context.pop()
def push(self):
self._context.push()
def __enter__(self):
if get_context() is not self:
self._context.push()
_gpustack.push(self)
def __exit__(self, exc_type, exc_val, exc_tb):
assert get_context() is self
self._context.pop()
_gpustack.pop()
def reset(self):
if self._context:
self._context.reset()
self._context = None
def get_gpu(i):
init_gpus()
return gpus[i]
_gpustack = servicelib.TLStack()
def get_context(devnum=0):
if not _gpustack:
_gpustack.push(get_gpu(devnum).context)
return _gpustack.top
def require_context(fn):
"""
A decorator to ensure a context for the CUDA subsystem
"""
@functools.wraps(fn)
def _require_cuda_context(*args, **kws):
get_context()
return fn(*args, **kws)
return _require_cuda_context
def reset():
for gpu in gpus:
gpu.reset()
_gpustack.clear()
########NEW FILE########
__FILENAME__ = driver
"""
CUDA driver bridge implementation
NOTE:
The new driver implementation uses a "trashing service" that help prevents a
crashing the system (particularly OSX) when the CUDA context is corrupted at
resource deallocation. The old approach ties resource management directly
into the object destructor; thus, at corruption of the CUDA context,
subsequent deallocation could further corrupt the CUDA context and causes the
system to freeze in some cases.
"""
from __future__ import absolute_import, print_function, division
import sys
import os
import traceback
import ctypes
import weakref
import functools
import copy
import warnings
from ctypes import (c_int, byref, c_size_t, c_char, c_char_p, addressof,
c_void_p, c_float)
import contextlib
from numba import utils, servicelib, mviewbuf
from .error import CudaSupportError, CudaDriverError
from .drvapi import API_PROTOTYPES
from . import enums, drvapi
try:
long
except NameError:
long = int
VERBOSE_JIT_LOG = int(os.environ.get('NUMBAPRO_VERBOSE_CU_JIT_LOG', 1))
MIN_REQUIRED_CC = (2, 0)
class DeadMemoryError(RuntimeError):
pass
class LinkerError(RuntimeError):
pass
class CudaAPIError(CudaDriverError):
def __init__(self, code, msg):
self.code = code
super(CudaAPIError, self).__init__(msg)
def find_driver():
envpath = os.environ.get('NUMBAPRO_CUDA_DRIVER', None)
if envpath == '0':
# Force fail
_raise_driver_not_found()
# Determine DLL type
if sys.platform == 'win32':
dlloader = ctypes.WinDLL
dldir = ['\\windows\\system32']
dlname = 'nvcuda.dll'
elif sys.platform == 'darwin':
dlloader = ctypes.CDLL
dldir = ['/usr/local/cuda/lib']
dlname = 'libcuda.dylib'
else:
# Assume to be *nix like
dlloader = ctypes.CDLL
dldir = ['/usr/lib', '/usr/lib64']
dlname = 'libcuda.so'
if envpath is not None:
try:
envpath = os.path.abspath(envpath)
except ValueError:
raise ValueError("NUMBAPRO_CUDA_DRIVER %s is not a valid path" %
envpath)
if not os.path.isfile(envpath):
raise ValueError("NUMBAPRO_CUDA_DRIVER %s is not a valid file "
"path. Note it must be a filepath of the .so/"
".dll/.dylib or the driver" % envpath)
candidates = [envpath]
else:
# First search for the name in the default library path.
# If that is not found, try the specific path.
candidates = [dlname] + [os.path.join(x, dlname) for x in dldir]
# Load the driver; Collect driver error information
path_not_exist = []
driver_load_error = []
for path in candidates:
try:
dll = dlloader(path)
except OSError as e:
# Problem opening the DLL
path_not_exist.append(not os.path.isfile(path))
driver_load_error.append(e)
else:
return dll
# Problem loading driver
if all(path_not_exist):
_raise_driver_not_found()
else:
errmsg = '\n'.join(str(e) for e in driver_load_error)
_raise_driver_error(errmsg)
DRIVER_NOT_FOUND_MSG = """
CUDA driver library cannot be found.
If you are sure that a CUDA driver is installed,
try setting environment variable NUMBAPRO_CUDA_DRIVER
with the file path of the CUDA driver shared library.
"""
DRIVER_LOAD_ERROR_MSG = """
Possible CUDA driver libraries are found but error occurred during load:
%s
"""
def _raise_driver_not_found():
raise CudaSupportError(DRIVER_NOT_FOUND_MSG)
def _raise_driver_error(e):
raise CudaSupportError(DRIVER_LOAD_ERROR_MSG % e)
def _build_reverse_error_map():
prefix = 'CUDA_ERROR'
map = utils.UniqueDict()
for name in dir(enums):
if name.startswith(prefix):
code = getattr(enums, name)
map[code] = name
return map
ERROR_MAP = _build_reverse_error_map()
MISSING_FUNCTION_ERRMSG = """driver missing function: %s.
Requires CUDA 5.5 or above.
"""
class Driver(object):
"""
Driver API functions are lazily bound.
"""
_singleton = None
def __new__(cls):
obj = cls._singleton
if obj is not None:
return obj
else:
obj = object.__new__(cls)
obj.lib = find_driver()
# Initialize driver
obj.cuInit(0)
cls._singleton = obj
return obj
def __init__(self):
self.devices = utils.UniqueDict()
def __getattr__(self, fname):
# First request of a driver API function
try:
proto = API_PROTOTYPES[fname]
except KeyError:
raise AttributeError(fname)
restype = proto[0]
argtypes = proto[1:]
libfn = self._find_api(fname)
libfn.restype = restype
libfn.argtypes = argtypes
@functools.wraps(libfn)
def safe_cuda_api_call(*args):
retcode = libfn(*args)
self._check_error(fname, retcode)
setattr(self, fname, safe_cuda_api_call)
return safe_cuda_api_call
def _find_api(self, fname):
# Try version 2
try:
return getattr(self.lib, fname + "_v2")
except AttributeError:
pass
# Try regular
try:
return getattr(self.lib, fname)
except AttributeError:
pass
# Not found.
# Delay missing function error to use
def absent_function(*args, **kws):
raise CudaDriverError(MISSING_FUNCTION_ERRMSG % fname)
setattr(self, fname, absent_function)
return absent_function
def _check_error(self, fname, retcode):
if retcode != enums.CUDA_SUCCESS:
errname = ERROR_MAP.get(retcode, "UNKNOWN_CUDA_ERROR")
msg = "Call to %s results in %s" % (fname, errname)
raise CudaAPIError(retcode, msg)
def get_device(self, devnum=0):
dev = self.devices.get(devnum)
if dev is None:
dev = Device(devnum)
self.devices[devnum] = dev
return weakref.proxy(dev)
def get_device_count(self):
count = c_int()
self.cuDeviceGetCount(byref(count))
return count.value
def list_devices(self):
"""Returns a list of active devices
"""
return list(self.devices.values())
def reset(self):
"""Reset all devices
"""
for dev in self.devices.values():
dev.reset()
driver = Driver()
class TrashService(servicelib.Service):
"""
We need this to enqueue things to be removed. There are times when you
want to disable deallocation because that would break asynchronous work
queues.
"""
CLEAN_LIMIT = 20
def add_trash(self, item):
self.trash.append(item)
def process(self, _arg):
self.trash = []
yield
while True:
count = 0
# Clean the trash
assert self.CLEAN_LIMIT > count
while self.trash and count < self.CLEAN_LIMIT:
cb = self.trash.pop()
# Invoke callback
cb()
count += 1
yield
def clear(self):
while self.trash:
cb = self.trash.pop()
cb()
@contextlib.contextmanager
def defer_cleanup(self):
orig = self.enabled
self.enabled = False
yield
self.enabled = orig
self.service()
def _build_reverse_device_attrs():
prefix = "CU_DEVICE_ATTRIBUTE_"
map = utils.UniqueDict()
for name in dir(enums):
if name.startswith(prefix):
map[name[len(prefix):]] = getattr(enums, name)
return map
DEVICE_ATTRIBUTES = _build_reverse_device_attrs()
class Device(object):
"""
The device object owns the CUDA contexts. This is owned by the driver
object. User should not construct devices directly.
"""
def __init__(self, devnum):
got_devnum = c_int()
driver.cuDeviceGet(byref(got_devnum), devnum)
assert devnum == got_devnum.value, "Driver returned another device"
self.id = got_devnum.value
self.trashing = TrashService("cuda.device%d.trash" % self.id)
self.attributes = {}
# Read compute capability
cc_major = c_int()
cc_minor = c_int()
driver.cuDeviceComputeCapability(byref(cc_major), byref(cc_minor),
self.id)
self.compute_capability = (cc_major.value, cc_minor.value)
# Read name
bufsz = 128
buf = (c_char * bufsz)()
driver.cuDeviceGetName(buf, bufsz, self.id)
self.name = buf.value
# A dictionary or all context with handle value as the key
self.contexts = {}
@property
def COMPUTE_CAPABILITY(self):
"""
For backward compatibility
"""
warnings.warn("Deprecated attribute 'COMPUTE_CAPABILITY'; use lower "
"case version", DeprecationWarning)
return self.compute_capability
def __del__(self):
try:
self.reset()
except:
traceback.print_exc()
def __repr__(self):
return "<CUDA device %d '%s'>" % (self.id, self.name)
def __getattr__(self, attr):
"""Read attributes lazily
"""
try:
code = DEVICE_ATTRIBUTES[attr]
except KeyError:
raise AttributeError(attr)
value = c_int()
driver.cuDeviceGetAttribute(byref(value), code, self.id)
setattr(self, attr, value.value)
return value.value
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
if isinstance(other, Device):
return self.id == other.id
return False
def __ne__(self, other):
return not (self == other)
def create_context(self):
met_requirement_for_device(self)
flags = 0
if self.CAN_MAP_HOST_MEMORY:
flags |= enums.CU_CTX_MAP_HOST
# Clean up any trash
self.trashing.service()
# Create new context
handle = drvapi.cu_context()
driver.cuCtxCreate(byref(handle), flags, self.id)
ctx = Context(weakref.proxy(self), handle,
_context_finalizer(self.trashing, handle))
self.contexts[handle.value] = ctx
return weakref.proxy(ctx)
def close_all_context(self):
self.contexts.clear()
def get_context(self):
handle = drvapi.cu_context()
driver.cuCtxGetCurrent(byref(handle))
if not handle.value:
return None
try:
return self.contexts[handle.value]
except KeyError:
raise RuntimeError("Current context is not manged: %s" %
handle.value)
def get_or_create_context(self):
ctx = self.get_context()
if ctx is None:
ctx = self.create_context()
return ctx
def reset(self):
self.close_all_context()
self.trashing.clear()
def _context_finalizer(trashing, ctxhandle):
def core():
trashing.add_trash(lambda: driver.cuCtxDestroy(ctxhandle))
return core
def met_requirement_for_device(device):
if device.compute_capability < MIN_REQUIRED_CC:
raise CudaSupportError("%s has compute capability < %s" %
(device, MIN_REQUIRED_CC))
class Context(object):
"""This object is tied to the lifetime of the actual context resource.
This object is usually wrapped in a weakref proxy for user. User seldom
owns this object.
"""
def __init__(self, device, handle, finalizer=None):
self.device = device
self.handle = handle
self.finalizer = finalizer
self.trashing = TrashService("cuda.device%d.context%x.trash" %
(self.device.id, self.handle.value))
self.is_managed = finalizer is not None
self.allocations = utils.UniqueDict()
self.modules = utils.UniqueDict()
def __del__(self):
try:
self.reset()
# Free itself
if self.is_managed:
self.finalizer()
except:
traceback.print_exc()
def reset(self):
"""Clean up all owned resources in this context
"""
# Free owned resources
self.allocations.clear()
self.modules.clear()
# Clear trash
self.trashing.clear()
def get_memory_info(self):
"""Returns (free, total) memory in bytes in the context.
"""
free = c_size_t()
total = c_size_t()
driver.cuMemGetInfo(byref(free), byref(total))
return free.value, total.value
def push(self):
"""Push context
"""
driver.cuCtxPushCurrent(self.handle)
def pop(self):
"""Pop context
"""
driver.cuCtxPopCurrent(self.handle)
def memalloc(self, bytesize):
self.trashing.service()
ptr = drvapi.cu_device_ptr()
driver.cuMemAlloc(byref(ptr), bytesize)
mem = MemoryPointer(weakref.proxy(self), ptr, bytesize,
_memory_finalizer(self, ptr))
self.allocations[ptr.value] = mem
return mem.own()
def memhostalloc(self, bytesize, mapped=False, portable=False, wc=False):
self.trashing.service()
pointer = c_void_p()
flags = 0
if mapped:
flags |= enums.CU_MEMHOSTALLOC_DEVICEMAP
if portable:
flags |= enums.CU_MEMHOSTALLOC_PORTABLE
if wc:
flags |= enums.CU_MEMHOSTALLOC_WRITECOMBINED
driver.cuMemHostAlloc(byref(pointer), bytesize, flags)
owner = None
if mapped:
finalizer = _hostalloc_finalizer(self, pointer)
mem = MappedMemory(weakref.proxy(self), owner, pointer,
bytesize, finalizer=finalizer)
self.allocations[mem.handle.value] = mem
return mem.own()
else:
finalizer = _pinnedalloc_finalizer(self.trashing, pointer)
mem = PinnedMemory(weakref.proxy(self), owner, pointer, bytesize,
finalizer=finalizer)
return mem
def memfree(self, pointer):
try:
del self.allocations[pointer.value]
except KeyError:
raise DeadMemoryError
self.trashing.service()
def mempin(self, owner, pointer, size, mapped=False):
self.trashing.service()
if isinstance(pointer, (int, long)):
pointer = c_void_p(pointer)
if mapped and not self.device.CAN_MAP_HOST_MEMORY:
raise CudaDriverError("%s cannot map host memory" % self.device)
# possible flags are "portable" (between context)
# and "device-map" (map host memory to device thus no need
# for memory transfer).
flags = 0
if mapped:
flags |= enums.CU_MEMHOSTREGISTER_DEVICEMAP
driver.cuMemHostRegister(pointer, size, flags)
if mapped:
finalizer = _mapped_finalizer(self, pointer)
mem = MappedMemory(weakref.proxy(self), owner, pointer, size,
finalizer=finalizer)
self.allocations[mem.handle.value] = mem
return mem.own()
else:
mem = PinnedMemory(weakref.proxy(self), owner, pointer, size,
finalizer=_pinned_finalizer(self.trashing,
pointer))
return mem
def memunpin(self, pointer):
raise NotImplementedError
def create_module_ptx(self, ptx):
if isinstance(ptx, str):
ptx = ptx.encode('utf8')
image = c_char_p(ptx)
return self.create_module_image(image)
def create_module_image(self, image):
self.trashing.service()
module = load_module_image(self, image)
self.modules[module.handle.value] = module
return weakref.proxy(module)
def unload_module(self, module):
del self.modules[module.handle.value]
self.trashing.service()
def create_stream(self):
self.trashing.service()
handle = drvapi.cu_stream()
driver.cuStreamCreate(byref(handle), 0)
return Stream(weakref.proxy(self), handle,
_stream_finalizer(self.trashing, handle))
def create_event(self, timing=True):
self.trashing.service()
handle = drvapi.cu_event()
flags = 0
if not timing:
flags |= enums.CU_EVENT_DISABLE_TIMING
driver.cuEventCreate(byref(handle), flags)
return Event(weakref.proxy(self), handle,
finalizer=_event_finalizer(self.trashing, handle))
def synchronize(self):
driver.cuCtxSynchronize()
def __repr__(self):
return "<CUDA context %s of device %d>" % (self.handle, self.device.id)
def load_module_image(context, image):
"""
image must be a pointer
"""
logsz = os.environ.get('NUMBAPRO_CUDA_LOG_SIZE', 1024)
jitinfo = (c_char * logsz)()
jiterrors = (c_char * logsz)()
options = {
enums.CU_JIT_INFO_LOG_BUFFER: addressof(jitinfo),
enums.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_ERROR_LOG_BUFFER: addressof(jiterrors),
enums.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_LOG_VERBOSE: c_void_p(VERBOSE_JIT_LOG),
}
option_keys = (drvapi.cu_jit_option * len(options))(*options.keys())
option_vals = (c_void_p * len(options))(*options.values())
handle = drvapi.cu_module()
try:
driver.cuModuleLoadDataEx(byref(handle), image, len(options),
option_keys, option_vals)
except CudaAPIError as e:
msg = "cuModuleLoadDataEx error:\n%s" % jiterrors.value.decode("utf8")
raise CudaAPIError(e.code, msg)
info_log = jitinfo.value
return Module(weakref.proxy(context), handle, info_log,
_module_finalizer(context, handle))
def _make_mem_finalizer(dtor):
def mem_finalize(context, handle):
trashing = context.trashing
allocations = context.allocations
def core():
def cleanup():
if allocations:
del allocations[handle.value]
dtor(handle)
trashing.add_trash(cleanup)
return core
return mem_finalize
_hostalloc_finalizer = _make_mem_finalizer(driver.cuMemFreeHost)
_mapped_finalizer = _make_mem_finalizer(driver.cuMemHostUnregister)
_memory_finalizer = _make_mem_finalizer(driver.cuMemFree)
def _pinnedalloc_finalizer(trashing, handle):
def core():
trashing.add_trash(lambda: driver.cuMemFreeHost(handle))
return core
def _pinned_finalizer(trashing, handle):
def core():
trashing.add_trash(lambda: driver.cuMemHostUnregister(handle))
return core
def _event_finalizer(trashing, handle):
def core():
trashing.add_trash(lambda: driver.cuEventDestroy(handle))
return core
def _stream_finalizer(trashing, handle):
def core():
trashing.add_trash(lambda: driver.cuStreamDestroy(handle))
return core
def _module_finalizer(context, handle):
trashing = context.trashing
modules = context.modules
def core():
def cleanup():
if modules:
del modules[handle.value]
driver.cuModuleUnload(handle)
trashing.add_trash(cleanup)
return core
class MemoryPointer(object):
__cuda_memory__ = True
def __init__(self, context, pointer, size, finalizer=None):
self.context = context
self.device_pointer = pointer
self.size = size
self._cuda_memsize_ = size
self.finalizer = finalizer
self.is_managed = finalizer is not None
self.is_alive = True
self.refct = 0
self.handle = self.device_pointer
def __del__(self):
try:
if self.is_managed and self.is_alive:
self.finalizer()
except:
traceback.print_exc()
def own(self):
return OwnedPointer(weakref.proxy(self))
def free(self):
"""
Forces the device memory to the trash.
"""
if self.is_managed:
if not self.is_alive:
raise RuntimeError("Freeing dead memory")
self.finalizer()
self.is_alive = False
def memset(self, byte, count=None, stream=0):
count = self.size if count is None else count
if stream:
driver.cuMemsetD8Async(self.device_pointer, byte, count,
stream.handle)
else:
driver.cuMemsetD8(self.device_pointer, byte, count)
def view(self, start, stop=None):
base = self.device_pointer.value + start
if stop is None:
size = self.size - start
else:
size = stop - start
assert size > 0, "zero or negative memory size"
pointer = drvapi.cu_device_ptr(base)
view = MemoryPointer(self.context, pointer, size)
return OwnedPointer(weakref.proxy(self), view)
@property
def device_ctypes_pointer(self):
return self.device_pointer
class MappedMemory(MemoryPointer):
__cuda_memory__ = True
def __init__(self, context, owner, hostpointer, size,
finalizer=None):
self.owned = owner
self.host_pointer = hostpointer
devptr = drvapi.cu_device_ptr()
driver.cuMemHostGetDevicePointer(byref(devptr), hostpointer, 0)
self.device_pointer = devptr
super(MappedMemory, self).__init__(context, devptr, size,
finalizer=finalizer)
self.handle = self.host_pointer
# For buffer interface
self._buflen_ = self.size
self._bufptr_ = self.host_pointer.value
def own(self):
return MappedOwnedPointer(weakref.proxy(self))
class PinnedMemory(mviewbuf.MemAlloc):
def __init__(self, context, owner, pointer, size, finalizer=None):
self.context = context
self.owned = owner
self.size = size
self.host_pointer = pointer
self.is_managed = finalizer is not None
self.finalizer = finalizer
self.is_alive = True
self.handle = self.host_pointer
# For buffer interface
self._buflen_ = self.size
self._bufptr_ = self.host_pointer.value
def __del__(self):
try:
if self.is_managed and self.is_alive:
self.finalizer()
except:
traceback.print_exc()
def unpin(self):
if not self.is_alive:
raise DeadMemoryError
self.finalizer()
self.is_alive = False
def own(self):
return self
class OwnedPointer(object):
def __init__(self, memptr, view=None):
self._mem = memptr
self._mem.refct += 1
if view is None:
self._view = self._mem
else:
assert not view.is_managed
self._view = view
def __del__(self):
try:
self._mem.refct -= 1
assert self._mem.refct >= 0
if self._mem.refct == 0:
self._mem.free()
except ReferenceError:
pass
except:
traceback.print_exc()
def __getattr__(self, fname):
"""Proxy MemoryPointer methods
"""
return getattr(self._view, fname)
class MappedOwnedPointer(OwnedPointer, mviewbuf.MemAlloc):
pass
class Stream(object):
def __init__(self, context, handle, finalizer):
self.context = context
self.handle = handle
self.finalizer = finalizer
self.is_managed = finalizer is not None
def __del__(self):
try:
if self.is_managed:
self.finalizer()
except:
traceback.print_exc()
def __int__(self):
return self.handle.value
def __repr__(self):
return "<CUDA stream %d on %s>" % (self.handle.value, self.context)
def synchronize(self):
driver.cuStreamSynchronize(self.handle)
@contextlib.contextmanager
def auto_synchronize(self):
yield self
self.synchronize()
class Event(object):
def __init__(self, context, handle, finalizer=None):
self.context = context
self.handle = handle
self.finalizer = finalizer
self.is_managed = self.finalizer is not None
def __del__(self):
try:
if self.is_managed:
self.finalizer()
except:
traceback.print_exc()
def query(self):
"""Returns True if all work before the most recent record has completed;
otherwise, returns False.
"""
try:
driver.cuEventQuery(self.handle)
except CudaAPIError as e:
if e.code == enums.CUDA_ERROR_NOT_READY:
return False
else:
raise
else:
return True
def record(self, stream=0):
"""Set the record state of the event at the stream.
"""
hstream = stream.handle if stream else 0
driver.cuEventRecord(self.handle, hstream)
def synchronize(self):
"""Synchronize the host thread for the completion of the event.
"""
driver.cuEventSynchronize(self.handle)
def wait(self, stream=0):
"""All future works submitted to stream will wait util the event
completes.
"""
hstream = stream.handle if stream else 0
flags = 0
driver.cuStreamWaitEvent(hstream, self.handle, flags)
def elapsed_time(self, evtend):
return event_elapsed_time(self, evtend)
def event_elapsed_time(evtstart, evtend):
msec = c_float()
driver.cuEventElapsedTime(byref(msec), evtstart.handle, evtend.handle)
return msec.value
class Module(object):
def __init__(self, context, handle, info_log, finalizer=None):
self.context = context
self.handle = handle
self.info_log = info_log
self.finalizer = finalizer
self.is_managed = self.finalizer is not None
def __del__(self):
try:
if self.is_managed:
self.finalizer()
except:
traceback.print_exc()
def unload(self):
self.context.unload_module(self)
def get_function(self, name):
handle = drvapi.cu_function()
driver.cuModuleGetFunction(byref(handle), self.handle,
name.encode('utf8'))
return Function(weakref.proxy(self), handle, name)
class Function(object):
griddim = 1, 1, 1
blockdim = 1, 1, 1
stream = 0
sharedmem = 0
def __init__(self, module, handle, name):
self.module = module
self.handle = handle
self.name = name
def __repr__(self):
return "<CUDA function %s>" % self.name
def cache_config(self, prefer_equal=False, prefer_cache=False,
prefer_shared=False):
prefer_equal = prefer_equal or (prefer_cache and prefer_shared)
if prefer_equal:
flag = enums.CU_FUNC_CACHE_PREFER_EQUAL
elif prefer_cache:
flag = enums.CU_FUNC_CACHE_PREFER_L1
elif prefer_shared:
flag = enums.CU_FUNC_CACHE_PREFER_SHARED
else:
flag = enums.CU_FUNC_CACHE_PREFER_NONE
driver.cuFuncSetCacheConfig(self.handle, flag)
def configure(self, griddim, blockdim, sharedmem=0, stream=0):
while len(griddim) < 3:
griddim += (1,)
while len(blockdim) < 3:
blockdim += (1,)
inst = copy.copy(self) # shallow clone the object
inst.griddim = griddim
inst.blockdim = blockdim
inst.sharedmem = sharedmem
if stream:
inst.stream = stream
else:
inst.stream = 0
return inst
def __call__(self, *args):
'''
*args -- Must be either ctype objects of DevicePointer instances.
'''
if self.stream:
streamhandle = self.stream.handle
else:
streamhandle = None
launch_kernel(self.handle, self.griddim, self.blockdim,
self.sharedmem, streamhandle, args)
@property
def device(self):
return self.module.context.device
def launch_kernel(cufunc_handle, griddim, blockdim, sharedmem, hstream, args):
gx, gy, gz = griddim
bx, by, bz = blockdim
param_vals = []
for arg in args:
if is_device_memory(arg):
param_vals.append(addressof(device_ctypes_pointer(arg)))
else:
param_vals.append(addressof(arg))
params = (c_void_p * len(param_vals))(*param_vals)
driver.cuLaunchKernel(cufunc_handle,
gx, gy, gz,
bx, by, bz,
sharedmem,
hstream,
params,
None)
FILE_EXTENSION_MAP = {
'o': enums.CU_JIT_INPUT_OBJECT,
'ptx': enums.CU_JIT_INPUT_PTX,
'a': enums.CU_JIT_INPUT_LIBRARY,
'cubin': enums.CU_JIT_INPUT_CUBIN,
'fatbin': enums.CU_JIT_INPUT_FATBINAR,
}
class Linker(object):
def __init__(self):
logsz = int(os.environ.get('NUMBAPRO_CUDA_LOG_SIZE', 1024))
linkerinfo = (c_char * logsz)()
linkererrors = (c_char * logsz)()
options = {
enums.CU_JIT_INFO_LOG_BUFFER: addressof(linkerinfo),
enums.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_ERROR_LOG_BUFFER: addressof(linkererrors),
enums.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_LOG_VERBOSE: c_void_p(1),
}
raw_keys = list(options.keys()) + [enums.CU_JIT_TARGET_FROM_CUCONTEXT]
raw_values = list(options.values())
del options
option_keys = (drvapi.cu_jit_option * len(raw_keys))(*raw_keys)
option_vals = (c_void_p * len(raw_values))(*raw_values)
self.handle = handle = drvapi.cu_link_state()
driver.cuLinkCreate(len(raw_keys), option_keys, option_vals,
byref(self.handle))
self.finalizer = lambda: driver.cuLinkDestroy(handle)
self.linker_info_buf = linkerinfo
self.linker_errors_buf = linkererrors
self._keep_alive = [linkerinfo, linkererrors, option_keys, option_vals]
@property
def info_log(self):
return self.linker_info_buf.value.decode('utf8')
@property
def error_log(self):
return self.linker_errors_buf.value.decode('utf8')
def __del__(self):
try:
self.finalizer()
except:
traceback.print_exc()
def add_ptx(self, ptx, name='<cudapy-ptx>'):
ptxbuf = c_char_p(ptx)
namebuf = c_char_p(name.encode('utf8'))
self._keep_alive += [ptxbuf, namebuf]
try:
driver.cuLinkAddData(self.handle, enums.CU_JIT_INPUT_PTX,
ptxbuf, len(ptx), namebuf, 0, None, None)
except CudaAPIError as e:
raise LinkerError("%s\n%s" % (e, self.error_log))
def add_file(self, path, kind):
pathbuf = c_char_p(path.encode("utf8"))
self._keep_alive.append(pathbuf)
try:
driver.cuLinkAddFile(self.handle, kind, pathbuf, 0, None, None)
except CudaAPIError as e:
raise LinkerError("%s\n%s" % (e, self.error_log))
def add_file_guess_ext(self, path):
ext = path.rsplit('.', 1)[1]
kind = FILE_EXTENSION_MAP[ext]
self.add_file(path, kind)
def complete(self):
'''
Returns (cubin, size)
cubin is a pointer to a internal buffer of cubin owned
by the linker; thus, it should be loaded before the linker
is destroyed.
'''
cubin = c_void_p(0)
size = c_size_t(0)
try:
driver.cuLinkComplete(self.handle, byref(cubin), byref(size))
except CudaAPIError as e:
raise LinkerError("%s\n%s" % (e, self.error_log))
size = size.value
assert size > 0, 'linker returned a zero sized cubin'
del self._keep_alive[:]
return cubin, size
# -----------------------------------------------------------------------------
def _device_pointer_attr(devmem, attr, odata):
"""Query attribute on the device pointer
"""
error = driver.cuPointerGetAttribute(byref(odata), attr,
device_ctypes_pointer(devmem))
driver.check_error(error, "Failed to query pointer attribute")
def device_pointer_type(devmem):
"""Query the device pointer type: host, device, array, unified?
"""
ptrtype = c_int(0)
_device_pointer_attr(devmem, enums.CU_POINTER_ATTRIBUTE_MEMORY_TYPE,
ptrtype)
map = {
enums.CU_MEMORYTYPE_HOST: 'host',
enums.CU_MEMORYTYPE_DEVICE: 'device',
enums.CU_MEMORYTYPE_ARRAY: 'array',
enums.CU_MEMORYTYPE_UNIFIED: 'unified',
}
return map[ptrtype.value]
def device_extents(devmem):
"""Find the extents (half open begin and end pointer) of the underlying
device memory allocation.
NOTE: it always returns the extents of the allocation but the extents
of the device memory view that can be a subsection of the entire allocation.
"""
s = drvapi.cu_device_ptr()
n = c_size_t()
devptr = device_ctypes_pointer(devmem)
driver.cuMemGetAddressRange(byref(s), byref(n), devptr)
s, n = s.value, n.value
return s, s + n
def device_memory_size(devmem):
"""Check the memory size of the device memory.
The result is cached in the device memory object.
It may query the driver for the memory size of the device memory allocation.
"""
sz = getattr(devmem, '_cuda_memsize_', None)
if sz is None:
s, e = device_extents(devmem)
sz = e - s
devmem._cuda_memsize_ = sz
assert sz > 0, "zero length array"
return sz
def host_pointer(obj):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
if isinstance(obj, (int, long)):
return obj
return mviewbuf.memoryview_get_buffer(obj)
def host_memory_extents(obj):
"Returns (start, end) the start and end pointer of the array (half open)."
return mviewbuf.memoryview_get_extents(obj)
def memory_size_from_info(shape, strides, itemsize):
"""et the byte size of a contiguous memory buffer given the shape, strides
and itemsize.
"""
assert len(shape) == len(strides), "# dim mismatch"
ndim = len(shape)
s, e = mviewbuf.memoryview_get_extents_info(shape, strides, ndim, itemsize)
return e - s
def host_memory_size(obj):
"Get the size of the memory"
s, e = host_memory_extents(obj)
assert e >= s, "memory extend of negative size"
return e - s
def device_pointer(obj):
"Get the device pointer as an integer"
return device_ctypes_pointer(obj).value
def device_ctypes_pointer(obj):
"Get the ctypes object for the device pointer"
require_device_memory(obj)
return obj.device_ctypes_pointer
def is_device_memory(obj):
"""All CUDA memory object is recognized as an instance with the attribute
"__cuda_memory__" defined and its value evaluated to True.
All CUDA memory object should also define an attribute named
"device_pointer" which value is an int(or long) object carrying the pointer
value of the device memory address. This is not tested in this method.
"""
return getattr(obj, '__cuda_memory__', False)
def require_device_memory(obj):
"""A sentry for methods that accept CUDA memory object.
"""
if not is_device_memory(obj):
raise Exception("Not a CUDA memory object.")
def device_memory_depends(devmem, *objs):
"""Add dependencies to the device memory.
Mainly used for creating structures that points to other device memory,
so that the referees are not GC and released.
"""
depset = getattr(devmem, "_depends_", [])
depset.extend(objs)
def host_to_device(dst, src, size, stream=0):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
varargs = []
if stream:
assert isinstance(stream, Stream)
fn = driver.cuMemcpyHtoDAsync
varargs.append(stream.handle)
else:
fn = driver.cuMemcpyHtoD
fn(device_pointer(dst), host_pointer(src), size, *varargs)
def device_to_host(dst, src, size, stream=0):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
varargs = []
if stream:
assert isinstance(stream, Stream)
fn = driver.cuMemcpyDtoHAsync
varargs.append(stream.handle)
else:
fn = driver.cuMemcpyDtoH
fn(host_pointer(dst), device_pointer(src), size, *varargs)
def device_to_device(dst, src, size, stream=0):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
varargs = []
if stream:
assert isinstance(stream, Stream)
fn = driver.cuMemcpyDtoDAsync
varargs.append(stream.handle)
else:
fn = driver.cuMemcpyDtoD
fn(device_pointer(dst), device_pointer(src), size, *varargs)
def device_memset(dst, val, size, stream=0):
"""Memset on the device.
If stream is not zero, asynchronous mode is used.
dst: device memory
val: byte value to be written
size: number of byte to be written
stream: a CUDA stream
"""
varargs = []
if stream:
assert isinstance(stream, Stream)
fn = driver.cuMemsetD8Async
varargs.append(stream.handle)
else:
fn = driver.cuMemsetD8
fn(device_pointer(dst), val, size, *varargs)
def profile_start():
driver = Driver()
err = driver.cuProfilerStart()
driver.check_error(err, "Failed to start profiler")
def profile_stop():
driver = Driver()
err = driver.cuProfilerStop()
driver.check_error(err, "Failed to stop profiler")
@contextlib.contextmanager
def profiling():
"""
Experimental profiling context.
"""
profile_start()
yield
profile_stop()
########NEW FILE########
__FILENAME__ = drvapi
from __future__ import print_function, absolute_import, division
from ctypes import *
cu_device = c_int
cu_device_attribute = c_int # enum
cu_context = c_void_p # an opaque handle
cu_module = c_void_p # an opaque handle
cu_jit_option = c_int # enum
cu_jit_input_type = c_int # enum
cu_function = c_void_p # an opaque handle
cu_device_ptr = c_size_t # defined as unsigned int on 32-bit
# and unsigned long long on 64-bit machine
cu_stream = c_void_p # an opaque handle
cu_event = c_void_p
cu_link_state = c_void_p
API_PROTOTYPES = {
# CUresult cuInit(unsigned int Flags);
'cuInit' : (c_int, c_uint),
# CUresult cuDriverGetVersion ( int* driverVersion )
'cuDriverGetVersion': (c_int, POINTER(c_int)),
# CUresult cuDeviceGetCount(int *count);
'cuDeviceGetCount': (c_int, POINTER(c_int)),
# CUresult cuDeviceGet(CUdevice *device, int ordinal);
'cuDeviceGet': (c_int, POINTER(cu_device), c_int),
# CUresult cuDeviceGetName ( char* name, int len, CUdevice dev )
'cuDeviceGetName': (c_int, c_char_p, c_int, cu_device),
# CUresult cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib,
# CUdevice dev);
'cuDeviceGetAttribute': (c_int, POINTER(c_int), cu_device_attribute,
cu_device),
# CUresult cuDeviceComputeCapability(int *major, int *minor,
# CUdevice dev);
'cuDeviceComputeCapability': (c_int, POINTER(c_int), POINTER(c_int),
cu_device),
# CUresult cuCtxCreate(CUcontext *pctx, unsigned int flags,
# CUdevice dev);
'cuCtxCreate': (c_int, POINTER(cu_context), c_uint, cu_device),
# CUresult cuCtxGetDevice ( CUdevice * device )
'cuCtxGetDevice': (c_int, POINTER(cu_device)),
# CUresult cuCtxGetCurrent (CUcontext *pctx);
'cuCtxGetCurrent': (c_int, POINTER(cu_context)),
# CUresult cuCtxPushCurrent (CUcontext pctx);
'cuCtxPushCurrent': (c_int, cu_context),
# CUresult cuCtxPopCurrent (CUcontext *pctx);
'cuCtxPopCurrent': (c_int, POINTER(cu_context)),
# CUresult cuCtxDestroy(CUcontext pctx);
'cuCtxDestroy': (c_int, cu_context),
# CUresult cuModuleLoadDataEx(CUmodule *module, const void *image,
# unsigned int numOptions,
# CUjit_option *options,
# void **optionValues);
'cuModuleLoadDataEx': (c_int, cu_module, c_void_p, c_uint,
POINTER(cu_jit_option), POINTER(c_void_p)),
# CUresult cuModuleUnload(CUmodule hmod);
'cuModuleUnload': (c_int, cu_module),
# CUresult cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod,
# const char *name);
'cuModuleGetFunction': (c_int, cu_function, cu_module, c_char_p),
# CUresult CUDAAPI cuFuncSetCacheConfig(CUfunction hfunc,
# CUfunc_cache config);
'cuFuncSetCacheConfig': (c_int, cu_function, c_uint),
# CUresult cuMemAlloc(CUdeviceptr *dptr, size_t bytesize);
'cuMemAlloc': (c_int, POINTER(cu_device_ptr), c_size_t),
# CUresult cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N)
'cuMemsetD8': (c_int, cu_device_ptr, c_uint8, c_size_t),
# CUresult cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc,
# size_t N, CUstream hStream);
'cuMemsetD8Async': (c_int,
cu_device_ptr, c_uint8, c_size_t, cu_stream),
# CUresult cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost,
# size_t ByteCount);
'cuMemcpyHtoD': (c_int, cu_device_ptr, c_void_p, c_size_t),
# CUresult cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost,
# size_t ByteCount, CUstream hStream);
'cuMemcpyHtoDAsync': (c_int, cu_device_ptr, c_void_p, c_size_t,
cu_stream),
# CUresult cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost,
# size_t ByteCount);
'cuMemcpyDtoD': (c_int, cu_device_ptr, cu_device_ptr, c_size_t),
# CUresult cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost,
# size_t ByteCount, CUstream hStream);
'cuMemcpyDtoDAsync': (c_int, cu_device_ptr, cu_device_ptr, c_size_t,
cu_stream),
# CUresult cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice,
# size_t ByteCount);
'cuMemcpyDtoH': (c_int, c_void_p, cu_device_ptr, c_size_t),
# CUresult cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr srcDevice,
# size_t ByteCount, CUstream hStream);
'cuMemcpyDtoHAsync': (c_int, c_void_p, cu_device_ptr, c_size_t,
cu_stream),
# CUresult cuMemFree(CUdeviceptr dptr);
'cuMemFree': (c_int, cu_device_ptr),
# CUresult cuStreamCreate(CUstream *phStream, unsigned int Flags);
'cuStreamCreate': (c_int, POINTER(cu_stream), c_uint),
# CUresult cuStreamDestroy(CUstream hStream);
'cuStreamDestroy': (c_int, cu_stream),
# CUresult cuStreamSynchronize(CUstream hStream);
'cuStreamSynchronize': (c_int, cu_stream),
# CUresult cuLaunchKernel(CUfunction f, unsigned int gridDimX,
# unsigned int gridDimY,
# unsigned int gridDimZ,
# unsigned int blockDimX,
# unsigned int blockDimY,
# unsigned int blockDimZ,
# unsigned int sharedMemBytes,
# CUstream hStream, void **kernelParams,
# void ** extra)
'cuLaunchKernel': (c_int, cu_function, c_uint, c_uint, c_uint,
c_uint, c_uint, c_uint, c_uint, cu_stream,
POINTER(c_void_p), POINTER(c_void_p)),
# CUresult cuMemHostAlloc ( void ** pp,
# size_t bytesize,
# unsigned int Flags
# )
'cuMemHostAlloc': (c_int, c_void_p, c_size_t, c_uint),
# CUresult cuMemFreeHost ( void * p )
'cuMemFreeHost': (c_int, c_void_p),
# CUresult cuMemHostRegister(void * p,
# size_t bytesize,
# unsigned int Flags)
'cuMemHostRegister': (c_int, c_void_p, c_size_t, c_uint),
# CUresult cuMemHostUnregister(void * p)
'cuMemHostUnregister': (c_int, c_void_p),
# CUresult cuMemHostGetDevicePointer(CUdeviceptr * pdptr,
# void * p,
# unsigned int Flags)
'cuMemHostGetDevicePointer': (c_int, POINTER(cu_device_ptr),
c_void_p, c_uint),
# CUresult cuMemGetInfo(size_t * free, size_t * total)
'cuMemGetInfo' : (c_int, POINTER(c_size_t), POINTER(c_size_t)),
# CUresult cuEventCreate ( CUevent * phEvent,
# unsigned int Flags )
'cuEventCreate': (c_int, POINTER(cu_event), c_uint),
# CUresult cuEventDestroy ( CUevent hEvent )
'cuEventDestroy': (c_int, cu_event),
# CUresult cuEventElapsedTime ( float * pMilliseconds,
# CUevent hStart,
# CUevent hEnd )
'cuEventElapsedTime': (c_int, POINTER(c_float), cu_event, cu_event),
# CUresult cuEventQuery ( CUevent hEvent )
'cuEventQuery': (c_int, cu_event),
# CUresult cuEventRecord ( CUevent hEvent,
# CUstream hStream )
'cuEventRecord': (c_int, cu_event, cu_stream),
# CUresult cuEventSynchronize ( CUevent hEvent )
'cuEventSynchronize': (c_int, cu_event),
# CUresult cuStreamWaitEvent ( CUstream hStream,
# CUevent hEvent,
# unsigned int Flags )
'cuStreamWaitEvent': (c_int, cu_stream, cu_event, c_uint),
# CUresult cuPointerGetAttribute (void *data, CUpointer_attribute attribute, CUdeviceptr ptr)
'cuPointerGetAttribute': (c_int, c_void_p, c_uint, cu_device_ptr),
# CUresult cuMemGetAddressRange ( CUdeviceptr * pbase,
# size_t * psize,
# CUdeviceptr dptr
# )
'cuMemGetAddressRange': (c_int,
POINTER(cu_device_ptr),
POINTER(c_size_t),
cu_device_ptr),
# CUresult cuMemHostGetFlags ( unsigned int * pFlags,
# void * p )
'cuMemHostGetFlags': (c_int,
POINTER(c_uint),
c_void_p),
# CUresult cuCtxSynchronize ( void )
'cuCtxSynchronize' : (c_int,),
# CUresult
# cuLinkCreate(unsigned int numOptions, CUjit_option *options,
# void **optionValues, CUlinkState *stateOut);
'cuLinkCreate': (c_int,
c_uint, POINTER(cu_jit_option),
POINTER(c_void_p), POINTER(cu_link_state)),
# CUresult
# cuLinkAddData(CUlinkState state, CUjitInputType type, void *data,
# size_t size, const char *name, unsigned
# int numOptions, CUjit_option *options,
# void **optionValues);
'cuLinkAddData': (c_int,
cu_link_state, cu_jit_input_type, c_void_p,
c_size_t, c_char_p, c_uint, POINTER(cu_jit_option),
POINTER(c_void_p)),
# CUresult
# cuLinkAddFile(CUlinkState state, CUjitInputType type,
# const char *path, unsigned int numOptions,
# CUjit_option *options, void **optionValues);
'cuLinkAddFile': (c_int,
cu_link_state, cu_jit_input_type, c_char_p, c_uint,
POINTER(cu_jit_option), POINTER(c_void_p)),
# CUresult CUDAAPI
# cuLinkComplete(CUlinkState state, void **cubinOut, size_t *sizeOut)
'cuLinkComplete': (c_int,
cu_link_state, POINTER(c_void_p), POINTER(c_size_t)),
# CUresult CUDAAPI
# cuLinkDestroy(CUlinkState state)
'cuLinkDestroy': (c_int, cu_link_state),
# cuProfilerInitialize ( const char* configFile, const char*
# outputFile, CUoutput_mode outputMode )
# 'cuProfilerInitialize': (c_int, c_char_p, c_char_p, cu_output_mode),
# cuProfilerStart ( void )
'cuProfilerStart': (c_int,),
# cuProfilerStop ( void )
'cuProfilerStop': (c_int,),
}
########NEW FILE########
__FILENAME__ = enums
"""
Enum values for CUDA driver
"""
from __future__ import print_function, absolute_import, division
CUDA_SUCCESS = 0
CUDA_ERROR_INVALID_VALUE = 1
CUDA_ERROR_OUT_OF_MEMORY = 2
CUDA_ERROR_NOT_INITIALIZED = 3
CUDA_ERROR_DEINITIALIZED = 4
CUDA_ERROR_PROFILER_DISABLED = 5
CUDA_ERROR_PROFILER_NOT_INITIALIZED = 6
CUDA_ERROR_PROFILER_ALREADY_STARTED = 7
CUDA_ERROR_PROFILER_ALREADY_STOPPED = 8
CUDA_ERROR_NO_DEVICE = 100
CUDA_ERROR_INVALID_DEVICE = 101
CUDA_ERROR_INVALID_IMAGE = 200
CUDA_ERROR_INVALID_CONTEXT = 201
CUDA_ERROR_CONTEXT_ALREADY_CURRENT = 202
CUDA_ERROR_MAP_FAILED = 205
CUDA_ERROR_UNMAP_FAILED = 206
CUDA_ERROR_ARRAY_IS_MAPPED = 207
CUDA_ERROR_ALREADY_MAPPED = 208
CUDA_ERROR_NO_BINARY_FOR_GPU = 209
CUDA_ERROR_ALREADY_ACQUIRED = 210
CUDA_ERROR_NOT_MAPPED = 211
CUDA_ERROR_NOT_MAPPED_AS_ARRAY = 212
CUDA_ERROR_NOT_MAPPED_AS_POINTER = 213
CUDA_ERROR_ECC_UNCORRECTABLE = 214
CUDA_ERROR_UNSUPPORTED_LIMIT = 215
CUDA_ERROR_CONTEXT_ALREADY_IN_USE = 216
CUDA_ERROR_INVALID_SOURCE = 300
CUDA_ERROR_FILE_NOT_FOUND = 301
CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = 302
CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = 303
CUDA_ERROR_OPERATING_SYSTEM = 304
CUDA_ERROR_INVALID_HANDLE = 400
CUDA_ERROR_NOT_FOUND = 500
CUDA_ERROR_NOT_READY = 600
CUDA_ERROR_LAUNCH_FAILED = 700
CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = 701
CUDA_ERROR_LAUNCH_TIMEOUT = 702
CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = 703
CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = 704
CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = 705
CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = 708
CUDA_ERROR_CONTEXT_IS_DESTROYED = 709
CUDA_ERROR_ASSERT = 710
CUDA_ERROR_TOO_MANY_PEERS = 711
CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = 712
CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = 713
CUDA_ERROR_UNKNOWN = 999
# no preference for shared memory or L1 (default)
CU_FUNC_CACHE_PREFER_NONE = 0x00
# prefer larger shared memory and smaller L1 cache
CU_FUNC_CACHE_PREFER_SHARED = 0x01
# prefer larger L1 cache and smaller shared memory
CU_FUNC_CACHE_PREFER_L1 = 0x02
# prefer equal sized L1 cache and shared memory
CU_FUNC_CACHE_PREFER_EQUAL = 0x03
# Automatic scheduling
CU_CTX_SCHED_AUTO = 0x00
# Set spin as default scheduling
CU_CTX_SCHED_SPIN = 0x01
# Set yield as default scheduling
CU_CTX_SCHED_YIELD = 0x02
# Set blocking synchronization as default scheduling
CU_CTX_SCHED_BLOCKING_SYNC = 0x04
CU_CTX_SCHED_MASK = 0x07
# Support mapped pinned allocations
CU_CTX_MAP_HOST = 0x08
# Keep local memory allocation after launch
CU_CTX_LMEM_RESIZE_TO_MAX = 0x10
CU_CTX_FLAGS_MASK = 0x1f
# If set, host memory is portable between CUDA contexts.
# Flag for cuMemHostAlloc()
CU_MEMHOSTALLOC_PORTABLE = 0x01
# If set, host memory is mapped into CUDA address space and
# cuMemHostGetDevicePointer() may be called on the host pointer.
# Flag for cuMemHostAlloc()
CU_MEMHOSTALLOC_DEVICEMAP = 0x02
# If set, host memory is allocated as write-combined - fast to write,
# faster to DMA, slow to read except via SSE4 streaming load instruction
# (MOVNTDQA).
# Flag for cuMemHostAlloc()
CU_MEMHOSTALLOC_WRITECOMBINED = 0x04
# If set, host memory is portable between CUDA contexts.
# Flag for cuMemHostRegister()
CU_MEMHOSTREGISTER_PORTABLE = 0x01
# If set, host memory is mapped into CUDA address space and
# cuMemHostGetDevicePointer() may be called on the host pointer.
# Flag for cuMemHostRegister()
CU_MEMHOSTREGISTER_DEVICEMAP = 0x02
# Default event flag
CU_EVENT_DEFAULT = 0x0
# Event uses blocking synchronization
CU_EVENT_BLOCKING_SYNC = 0x1
# Event will not record timing data
CU_EVENT_DISABLE_TIMING = 0x2
# Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set
CU_EVENT_INTERPROCESS = 0x4
# The CUcontext on which a pointer was allocated or registered
CU_POINTER_ATTRIBUTE_CONTEXT = 1,
# The CUmemorytype describing the physical location of a pointer
CU_POINTER_ATTRIBUTE_MEMORY_TYPE = 2
# The address at which a pointer's memory may be accessed on the device
CU_POINTER_ATTRIBUTE_DEVICE_POINTER = 3
# The address at which a pointer's memory may be accessed on the host
CU_POINTER_ATTRIBUTE_HOST_POINTER = 4
# A pair of tokens for use with the nv-p2p.h Linux kernel interface
CU_POINTER_ATTRIBUTE_P2P_TOKENS = 5
# Host memory
CU_MEMORYTYPE_HOST = 0x01
# Device memory
CU_MEMORYTYPE_DEVICE = 0x02
# Array memory
CU_MEMORYTYPE_ARRAY = 0x03
# Unified device or host memory
CU_MEMORYTYPE_UNIFIED = 0x04
# Compiled device-class-specific device code
# Applicable options: none
CU_JIT_INPUT_CUBIN = 0
# PTX source code
# Applicable options: PTX compiler options
CU_JIT_INPUT_PTX = 1
# Bundle of multiple cubins and/or PTX of some device code
# Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY
CU_JIT_INPUT_FATBINAR = 2
# Host object with embedded device code
# Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY
CU_JIT_INPUT_OBJECT = 3
# Archive of host objects with embedded device code
# Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY
CU_JIT_INPUT_LIBRARY = 4
# Max number of registers that a thread may use.
# Option type: unsigned int
# Applies to: compiler only
CU_JIT_MAX_REGISTERS = 0,
# IN: Specifies minimum number of threads per block to target compilation
# for
# OUT: Returns the number of threads the compiler actually targeted.
# This restricts the resource utilization fo the compiler (e.g. max
# registers) such that a block with the given number of threads should be
# able to launch based on register limitations. Note, this option does not
# currently take into account any other resource limitations, such as
# shared memory utilization.
# Cannot be combined with ::CU_JIT_TARGET.
# Option type: unsigned int
# Applies to: compiler only
CU_JIT_THREADS_PER_BLOCK = 1
# Overwrites the option value with the total wall clock time, in
# milliseconds, spent in the compiler and linker
# Option type: float
# Applies to: compiler and linker
CU_JIT_WALL_TIME = 2
# Pointer to a buffer in which to print any log messages
# that are informational in nature (the buffer size is specified via
# option ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)
# Option type: char *
# Applies to: compiler and linker
CU_JIT_INFO_LOG_BUFFER = 3
# IN: Log buffer size in bytes. Log messages will be capped at this size
# (including null terminator)
# OUT: Amount of log buffer filled with messages
# Option type: unsigned int
# Applies to: compiler and linker
CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES = 4
# Pointer to a buffer in which to print any log messages that
# reflect errors (the buffer size is specified via option
# ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)
# Option type: char *
# Applies to: compiler and linker
CU_JIT_ERROR_LOG_BUFFER = 5
# IN: Log buffer size in bytes. Log messages will be capped at this size
# (including null terminator)
# OUT: Amount of log buffer filled with messages
# Option type: unsigned int
# Applies to: compiler and linker
CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES = 6
# Level of optimizations to apply to generated code (0 - 4), with 4
# being the default and highest level of optimizations.
# Option type: unsigned int
# Applies to: compiler only
CU_JIT_OPTIMIZATION_LEVEL = 7
# No option value required. Determines the target based on the current
# attached context (default)
# Option type: No option value needed
# Applies to: compiler and linker
CU_JIT_TARGET_FROM_CUCONTEXT = 8
# Target is chosen based on supplied ::CUjit_target. Cannot be
# combined with ::CU_JIT_THREADS_PER_BLOCK.
# Option type: unsigned int for enumerated type ::CUjit_target
# Applies to: compiler and linker
CU_JIT_TARGET = 9
# Specifies choice of fallback strategy if matching cubin is not found.
# Choice is based on supplied ::CUjit_fallback.
# Option type: unsigned int for enumerated type ::CUjit_fallback
# Applies to: compiler only
CU_JIT_FALLBACK_STRATEGY = 10
# Specifies whether to create debug information in output (-g)
# (0: false, default)
# Option type: int
# Applies to: compiler and linker
CU_JIT_GENERATE_DEBUG_INFO = 11
# Generate verbose log messages (0: false, default)
# Option type: int
# Applies to: compiler and linker
CU_JIT_LOG_VERBOSE = 12
# Generate line number information (-lineinfo) (0: false, default)
# Option type: int
# Applies to: compiler only
CU_JIT_GENERATE_LINE_INFO = 13
# Specifies whether to enable caching explicitly (-dlcm)
# Choice is based on supplied ::CUjit_cacheMode_enum.
# Option type: unsigned int for enumerated type ::CUjit_cacheMode_enum
# Applies to: compiler only
CU_JIT_CACHE_MODE = 14
# Device attributes
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 1
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = 2
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = 3
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = 4
CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = 5
CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = 6
CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = 7
CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = 8
CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = 40
CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = 19
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
CU_DEVICE_ATTRIBUTE_WARP_SIZE = 10
CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = 41
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
########NEW FILE########
__FILENAME__ = error
from __future__ import print_function, absolute_import, division
class CudaDriverError(Exception):
pass
class CudaSupportError(ImportError):
pass
class NvvmError(Exception):
pass
class NvvmSupportError(ImportError):
pass
########NEW FILE########
__FILENAME__ = libs
from __future__ import print_function
import re
import os
import sys
import ctypes
import platform
from numba.findlib import find_lib, find_file
if sys.platform == 'win32':
_dllopener = ctypes.WinDLL
elif sys.platform == 'darwin':
_dllopener = ctypes.CDLL
else:
_dllopener = ctypes.CDLL
def get_libdevice(arch):
libdir = (os.environ.get('NUMBAPRO_LIBDEVICE') or
os.environ.get('NUMBAPRO_CUDALIB'))
pat = r'libdevice\.%s(\.\d+)*\.bc$' % arch
candidates = find_file(re.compile(pat), libdir)
return max(candidates) if candidates else None
def open_libdevice(arch):
with open(get_libdevice(arch), 'rb') as bcfile:
return bcfile.read()
def get_cudalib(lib, platform=None):
libdir = os.environ.get('NUMBAPRO_CUDALIB')
candidates = find_lib(lib, libdir, platform)
return max(candidates) if candidates else None
def open_cudalib(lib, ccc=False):
path = get_cudalib(lib)
if path is None:
raise OSError('library %s not found' % lib)
if ccc:
return ctypes.CDLL(path)
return _dllopener(path)
def test(_platform=None):
failed = False
libs = 'cublas cusparse cufft curand nvvm'.split()
for lib in libs:
path = get_cudalib(lib, _platform)
print('Finding', lib)
if path:
print('\tlocated at', path)
else:
print('\tERROR: can\'t locate lib')
failed = True
if not failed and _platform in (None, sys.platform):
try:
print('\ttrying to open library', end='...')
open_cudalib(lib, ccc=True)
print('\tok')
except OSError as e:
print('\tERROR: failed to open %s:\n%s' % (lib, e))
# NOTE: ignore failure of dlopen on cuBlas on OSX 10.5
failed = True if not _if_osx_10_5() else False
archs = 'compute_20', 'compute_30', 'compute_35'
for arch in archs:
print('\tfinding libdevice for', arch, end='...')
path = get_libdevice(arch)
if path:
print('\tok')
else:
print('\tERROR: can\'t open libdevice for %s' % arch)
failed = True
return not failed
def _if_osx_10_5():
if sys.platform == 'darwin':
vers = tuple(map(int, platform.mac_ver()[0].split('.')))
if vers < (10, 6):
return True
return False
########NEW FILE########
__FILENAME__ = ndarray
from __future__ import print_function, absolute_import, division
import numba.ctypes_support as ctypes
from . import devices, driver
class ArrayHeaderManager(object):
"""
Manages array header memory for reusing the allocation.
It allocates one big chunk of memory and partition it for fix sized array
header. It currently stores up to 4D array header in 64-bit mode or 8D
array header in 32-bit mode.
This allows the small array header allocation to be reused to avoid
breaking asynchronous streams and avoid fragmentation of memory.
When run out of preallocated space, it automatically fallback to regular
allocation.
"""
# Caches associated contexts
# There is one array header manager per context.
context_map = {}
# The number of preallocated array head
maxsize = 2 ** 10
# Maximum size for each array head
# = 4 (ndim) * 8 (sizeof intp) * 2 (shape strides) + 8 (ptr)
elemsize = 72
def __new__(cls, context):
key = context.handle.value
mm = cls.context_map.get(key)
if mm is None:
mm = object.__new__(cls)
mm.init(context)
cls.context_map[key] = mm
return mm
def init(self, context):
self.context = context
self.data = self.context.memalloc(self.elemsize * self.maxsize)
self.queue = []
for i in range(self.maxsize):
offset = i * self.elemsize
mem = self.data.view(offset, offset + self.elemsize)
self.queue.append(mem)
self.allocated = set()
def allocate(self, nd):
arraytype = make_array_ctype(nd)
sizeof = ctypes.sizeof(arraytype)
# Oversized or insufficient space
if sizeof >= self.elemsize or not self.queue:
return _allocate_head(nd)
mem = self.queue.pop()
self.allocated.add(mem)
return mem
def free(self, mem):
if mem in self.allocated:
self.allocated.discard(mem)
self.queue.append(mem)
def __repr__(self):
return "<cuda managed memory %s >" % (self.context.device,)
def make_array_ctype(ndim):
"""Create a array header type for a given dimension.
"""
c_intp = ctypes.c_ssize_t
class c_array(ctypes.Structure):
_fields_ = [('data', ctypes.c_void_p),
('shape', c_intp * ndim),
('strides', c_intp * ndim)]
return c_array
def _allocate_head(nd):
"""Allocate the metadata structure
"""
arraytype = make_array_ctype(nd)
gpu_head = devices.get_context().memalloc(ctypes.sizeof(arraytype))
return gpu_head
def ndarray_device_allocate_data(ary):
"""
Allocate gpu data buffer
"""
datasize = driver.host_memory_size(ary)
# allocate
gpu_data = devices.get_context().memalloc(datasize)
return gpu_data
def ndarray_populate_head(gpu_head, gpu_data, shape, strides, stream=0):
"""
Populate the array header
"""
nd = len(shape)
assert nd > 0, "0 or negative dimension"
arraytype = make_array_ctype(nd)
struct = arraytype(data=driver.device_pointer(gpu_data),
shape=shape,
strides=strides)
driver.host_to_device(gpu_head, struct, ctypes.sizeof(struct),
stream=stream)
driver.device_memory_depends(gpu_head, gpu_data)
########NEW FILE########
__FILENAME__ = nvvm
"""
This is a direct translation of nvvm.h
"""
from __future__ import print_function, absolute_import, division
import sys, logging, re
from ctypes import (c_void_p, c_int, POINTER, c_char_p, c_size_t, byref,
c_char)
from .error import NvvmError, NvvmSupportError
from .libs import open_libdevice, open_cudalib
logger = logging.getLogger(__name__)
ADDRSPACE_GENERIC = 0
ADDRSPACE_GLOBAL = 1
ADDRSPACE_SHARED = 3
ADDRSPACE_CONSTANT = 4
ADDRSPACE_LOCAL = 5
# Opaque handle for comilation unit
nvvm_program = c_void_p
# Result code
nvvm_result = c_int
RESULT_CODE_NAMES = '''
NVVM_SUCCESS
NVVM_ERROR_OUT_OF_MEMORY
NVVM_ERROR_PROGRAM_CREATION_FAILURE
NVVM_ERROR_IR_VERSION_MISMATCH
NVVM_ERROR_INVALID_INPUT
NVVM_ERROR_INVALID_PROGRAM
NVVM_ERROR_INVALID_IR
NVVM_ERROR_INVALID_OPTION
NVVM_ERROR_NO_MODULE_IN_PROGRAM
NVVM_ERROR_COMPILATION
'''.split()
for i, k in enumerate(RESULT_CODE_NAMES):
setattr(sys.modules[__name__], k, i)
class NVVM(object):
'''Process-wide singleton.
'''
_PROTOTYPES = {
# nvvmResult nvvmVersion(int *major, int *minor)
'nvvmVersion': (nvvm_result, POINTER(c_int), POINTER(c_int)),
# nvvmResult nvvmCreateProgram(nvvmProgram *cu)
'nvvmCreateProgram': (nvvm_result, POINTER(nvvm_program)),
# nvvmResult nvvmDestroyProgram(nvvmProgram *cu)
'nvvmDestroyProgram': (nvvm_result, POINTER(nvvm_program)),
# nvvmResult nvvmAddModuleToProgram(nvvmProgram cu, const char *buffer, size_t size)
'nvvmAddModuleToProgram': (
nvvm_result, nvvm_program, c_char_p, c_size_t),
# nvvmResult nvvmCompileProgram(nvvmProgram cu, int numOptions,
# const char **options)
'nvvmCompileProgram': (
nvvm_result, nvvm_program, c_int, POINTER(c_char_p)),
# nvvmResult nvvmGetCompiledResultSize(nvvmProgram cu,
# size_t *bufferSizeRet)
'nvvmGetCompiledResultSize': (
nvvm_result, nvvm_program, POINTER(c_size_t)),
# nvvmResult nvvmGetCompiledResult(nvvmProgram cu, char *buffer)
'nvvmGetCompiledResult': (nvvm_result, nvvm_program, c_char_p),
# nvvmResult nvvmGetProgramLogSize(nvvmProgram cu,
# size_t *bufferSizeRet)
'nvvmGetProgramLogSize': (nvvm_result, nvvm_program, POINTER(c_size_t)),
# nvvmResult nvvmGetProgramLog(nvvmProgram cu, char *buffer)
'nvvmGetProgramLog': (nvvm_result, nvvm_program, c_char_p),
}
# Singleton reference
__INSTANCE = None
def __new__(cls):
if not cls.__INSTANCE:
cls.__INSTANCE = inst = object.__new__(cls)
try:
inst.driver = open_cudalib('nvvm', ccc=True)
except OSError as e:
cls.__INSTANCE = None
errmsg = ("libNVVM cannot be found. Do `conda install "
"cudatoolkit`:\n%s")
raise NvvmSupportError(errmsg % e)
# Find & populate functions
for name, proto in inst._PROTOTYPES.items():
func = getattr(inst.driver, name)
func.restype = proto[0]
func.argtypes = proto[1:]
setattr(inst, name, func)
return cls.__INSTANCE
def get_version(self):
major = c_int()
minor = c_int()
err = self.nvvmVersion(byref(major), byref(minor))
self.check_error(err, 'Failed to get version.')
return major.value, minor.value
def check_error(self, error, msg, exit=False):
if error:
exc = NvvmError(msg, RESULT_CODE_NAMES[error])
if exit:
print(exc)
sys.exit(1)
else:
raise exc
class CompilationUnit(object):
def __init__(self):
self.driver = NVVM()
self._handle = nvvm_program()
err = self.driver.nvvmCreateProgram(byref(self._handle))
self.driver.check_error(err, 'Failed to create CU')
def __del__(self):
driver = NVVM()
err = driver.nvvmDestroyProgram(byref(self._handle))
driver.check_error(err, 'Failed to destroy CU', exit=True)
def add_module(self, buffer):
"""
Add a module level NVVM IR to a compilation unit.
- The buffer should contain an NVVM module IR either in the bitcode
representation (LLVM3.0) or in the text representation.
"""
err = self.driver.nvvmAddModuleToProgram(self._handle, buffer,
len(buffer))
self.driver.check_error(err, 'Failed to add module')
def compile(self, **options):
"""Perform Compliation
The valid compiler options are
* - -g (enable generation of debugging information)
* - -opt=
* - 0 (disable optimizations)
* - 3 (default, enable optimizations)
* - -arch=
* - compute_20 (default)
* - compute_30
* - compute_35
* - -ftz=
* - 0 (default, preserve denormal values, when performing
* single-precision floating-point operations)
* - 1 (flush denormal values to zero, when performing
* single-precision floating-point operations)
* - -prec-sqrt=
* - 0 (use a faster approximation for single-precision
* floating-point square root)
* - 1 (default, use IEEE round-to-nearest mode for
* single-precision floating-point square root)
* - -prec-div=
* - 0 (use a faster approximation for single-precision
* floating-point division and reciprocals)
* - 1 (default, use IEEE round-to-nearest mode for
* single-precision floating-point division and reciprocals)
* - -fma=
* - 0 (disable FMA contraction)
* - 1 (default, enable FMA contraction)
*
"""
# stringify options
opts = []
if options.get('debug'):
opts.append('-g')
options.pop('debug')
if options.get('opt'):
opts.append('-opt=%d' % options.pop('opt'))
if options.get('arch'):
opts.append('-arch=%s' % options.pop('arch'))
for k in ('ftz', 'prec_sqrt', 'prec_div', 'fma'):
if k in options:
v = bool(options.pop(k))
opts.append('-%s=%d' % (k.replace('_', '-'), v))
# compile
c_opts = (c_char_p * len(opts))(*[c_char_p(x.encode('utf8'))
for x in opts])
err = self.driver.nvvmCompileProgram(self._handle, len(opts), c_opts)
self._try_error(err, 'Failed to compile\n')
# get result
reslen = c_size_t()
err = self.driver.nvvmGetCompiledResultSize(self._handle, byref(reslen))
self._try_error(err, 'Failed to get size of compiled result.')
ptxbuf = (c_char * reslen.value)()
err = self.driver.nvvmGetCompiledResult(self._handle, ptxbuf)
self._try_error(err, 'Failed to get compiled result.')
# get log
self.log = self.get_log()
return ptxbuf[:]
def _try_error(self, err, msg):
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
def get_log(self):
reslen = c_size_t()
err = self.driver.nvvmGetProgramLogSize(self._handle, byref(reslen))
self.driver.check_error(err, 'Failed to get compilation log size.')
if reslen.value > 1:
logbuf = (c_char * reslen.value)()
err = self.driver.nvvmGetProgramLog(self._handle, logbuf)
self.driver.check_error(err, 'Failed to get compilation log.')
return logbuf.value.decode('utf8') # popluate log attribute
return ''
data_layout = {
32: ('e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-'
'f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64'),
64: ('e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-'
'f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64')}
default_data_layout = data_layout[tuple.__itemsize__ * 8]
SUPPORTED_CC = frozenset([(2, 0), (3, 0), (3, 5)])
def get_arch_option(major, minor):
if major == 2:
minor = 0
if major == 3:
if minor < 5:
minor = 0
else:
minor = 5
if (major, minor) not in SUPPORTED_CC:
raise Exception("compute compability %d.%d is not supported" %
(major, minor))
arch = 'compute_%d%d' % (major, minor)
return arch
MISSING_LIBDEVICE_MSG = '''
Please define environment variable NUMBAPRO_LIBDEVICE=/path/to/libdevice
/path/to/libdevice -- is the path to the directory containing the libdevice.*.bc
files in the installation of CUDA. (requires CUDA >=5.5)
'''
class LibDevice(object):
_cache_ = {}
def __init__(self, arch):
'''
arch --- must be result from get_arch_option()
'''
if arch not in self._cache_:
self._cache_[arch] = open_libdevice(arch)
self.bc = self._cache_[arch]
def get(self):
return self.bc
def llvm_to_ptx(llvmir, **opts):
cu = CompilationUnit()
libdevice = LibDevice(arch=opts.get('arch', 'compute_20'))
llvmir = llvm33_to_32_ir(llvmir)
cu.add_module(llvmir.encode('utf8'))
cu.add_module(libdevice.get())
ptx = cu.compile(**opts)
return ptx
re_fnattr_ref = re.compile('#\d+')
re_fnattr_def = re.compile('attributes\s+(#\d+)\s*=\s*{((?:\s*\w+)+)\s*}')
def llvm33_to_32_ir(ir):
"""rewrite function attributes in the IR
"""
attrs = {}
for m in re_fnattr_def.finditer(ir):
ct, text = m.groups()
attrs[ct] = text
def scanline(line):
if line.startswith('define') or line.startswith('declare'):
for k, v in attrs.items():
if k in line:
return line.replace(k, v)
elif re_fnattr_def.match(line):
return '; %s' % line
return line
return '\n'.join(scanline(ln) for ln in ir.splitlines())
def set_cuda_kernel(lfunc):
from llvm.core import MetaData, MetaDataString, Constant, Type
m = lfunc.module
ops = lfunc, MetaDataString.get(m, "kernel"), Constant.int(Type.int(), 1)
md = MetaData.get(m, ops)
nmd = m.get_or_insert_named_metadata('nvvm.annotations')
nmd.add(md)
def fix_data_layout(module):
module.data_layout = default_data_layout
########NEW FILE########
__FILENAME__ = cudaimpl
from __future__ import print_function, absolute_import, division
from functools import reduce
import operator
from llvm.core import Type
import llvm.core as lc
import llvm.ee as le
from numba.targets.imputils import implement, Registry
from numba import cgutils
from numba import types
from .cudadrv import nvvm
from . import nvvmutils, stubs
registry = Registry()
register = registry.register
# -----------------------------------------------------------------------------
SREG_MAPPING = {
'tid.x': 'llvm.nvvm.read.ptx.sreg.tid.x',
'tid.y': 'llvm.nvvm.read.ptx.sreg.tid.y',
'tid.z': 'llvm.nvvm.read.ptx.sreg.tid.z',
'ntid.x': 'llvm.nvvm.read.ptx.sreg.ntid.x',
'ntid.y': 'llvm.nvvm.read.ptx.sreg.ntid.y',
'ntid.z': 'llvm.nvvm.read.ptx.sreg.ntid.z',
'ctaid.x': 'llvm.nvvm.read.ptx.sreg.ctaid.x',
'ctaid.y': 'llvm.nvvm.read.ptx.sreg.ctaid.y',
'ctaid.z': 'llvm.nvvm.read.ptx.sreg.ctaid.z',
'nctaid.x': 'llvm.nvvm.read.ptx.sreg.nctaid.x',
'nctaid.y': 'llvm.nvvm.read.ptx.sreg.nctaid.y',
'nctaid.z': 'llvm.nvvm.read.ptx.sreg.nctaid.z',
}
def _call_sreg(builder, name):
module = cgutils.get_module(builder)
fnty = Type.function(Type.int(), ())
fn = module.get_or_insert_function(fnty, name=SREG_MAPPING[name])
return builder.call(fn, ())
# -----------------------------------------------------------------------------
@register
@implement('ptx.grid.1d', types.intp)
def ptx_grid1d(context, builder, sig, args):
assert len(args) == 1
tidx = _call_sreg(builder, "tid.x")
ntidx = _call_sreg(builder, "ntid.x")
nctaidx = _call_sreg(builder, "ctaid.x")
res = builder.add(builder.mul(ntidx, nctaidx), tidx)
return res
@register
@implement('ptx.grid.2d', types.intp)
def ptx_grid2d(context, builder, sig, args):
assert len(args) == 1
tidx = _call_sreg(builder, "tid.x")
ntidx = _call_sreg(builder, "ntid.x")
nctaidx = _call_sreg(builder, "ctaid.x")
tidy = _call_sreg(builder, "tid.y")
ntidy = _call_sreg(builder, "ntid.y")
nctaidy = _call_sreg(builder, "ctaid.y")
r1 = builder.add(builder.mul(ntidx, nctaidx), tidx)
r2 = builder.add(builder.mul(ntidy, nctaidy), tidy)
return cgutils.pack_array(builder, [r1, r2])
# -----------------------------------------------------------------------------
def ptx_sreg_template(sreg):
def ptx_sreg_impl(context, builder, sig, args):
assert not args
return _call_sreg(builder, sreg)
return ptx_sreg_impl
# Dynamic create all special register
for sreg in SREG_MAPPING.keys():
register(implement(sreg)(ptx_sreg_template(sreg)))
# -----------------------------------------------------------------------------
@register
@implement('ptx.cmem.arylike', types.Kind(types.Array))
def ptx_cmem_arylike(context, builder, sig, args):
lmod = cgutils.get_module(builder)
[arr] = args
flat = arr.flatten(order='A')
aryty = sig.return_type
dtype = aryty.dtype
if isinstance(dtype, types.Complex):
elemtype = (types.float32
if dtype == types.complex64
else types.float64)
constvals = []
for i in range(flat.size):
elem = flat[i]
real = context.get_constant(elemtype, elem.real)
imag = context.get_constant(elemtype, elem.imag)
constvals.extend([real, imag])
elif dtype in types.number_domain:
constvals = [context.get_constant(dtype, flat[i])
for i in range(flat.size)]
else:
raise TypeError("unsupport type: %s" % dtype)
constary = lc.Constant.array(constvals[0].type, constvals)
addrspace = nvvm.ADDRSPACE_CONSTANT
gv = lmod.add_global_variable(constary.type, name="_cudapy_cmem",
addrspace=addrspace)
gv.linkage = lc.LINKAGE_INTERNAL
gv.global_constant = True
gv.initializer = constary
# Convert to generic address-space
conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace)
addrspaceptr = gv.bitcast(Type.pointer(Type.int(8), addrspace))
genptr = builder.call(conv, [addrspaceptr])
# Create array object
ary = context.make_array(aryty)(context, builder)
ary.data = builder.bitcast(genptr, ary.data.type)
kshape = [context.get_constant(types.intp, s) for s in arr.shape]
kstrides = [context.get_constant(types.intp, s) for s in arr.strides]
ary.shape = cgutils.pack_array(builder, kshape)
ary.strides = cgutils.pack_array(builder, kstrides)
return ary._getvalue()
@register
@implement('ptx.smem.alloc', types.intp, types.Any)
def ptx_smem_alloc_intp(context, builder, sig, args):
length, dtype = args
return _generic_array(context, builder, shape=(length,), dtype=dtype,
symbol_name='_cudapy_smem',
addrspace=nvvm.ADDRSPACE_SHARED,
can_dynsized=True)
@register
@implement('ptx.smem.alloc', types.Kind(types.UniTuple), types.Any)
def ptx_smem_alloc_array(context, builder, sig, args):
shape, dtype = args
return _generic_array(context, builder, shape=shape, dtype=dtype,
symbol_name='_cudapy_smem',
addrspace=nvvm.ADDRSPACE_SHARED,
can_dynsized=True)
@register
@implement('ptx.lmem.alloc', types.intp, types.Any)
def ptx_lmem_alloc_intp(context, builder, sig, args):
length, dtype = args
return _generic_array(context, builder, shape=(length,), dtype=dtype,
symbol_name='_cudapy_lmem',
addrspace=nvvm.ADDRSPACE_LOCAL,
can_dynsized=False)
@register
@implement('ptx.lmem.alloc', types.Kind(types.UniTuple), types.Any)
def ptx_lmem_alloc_array(context, builder, sig, args):
shape, dtype = args
return _generic_array(context, builder, shape=shape, dtype=dtype,
symbol_name='_cudapy_lmem',
addrspace=nvvm.ADDRSPACE_LOCAL,
can_dynsized=False)
@register
@implement(stubs.syncthreads)
def ptx_syncthreads(context, builder, sig, args):
assert not args
fname = 'llvm.nvvm.barrier0'
lmod = cgutils.get_module(builder)
fnty = Type.function(Type.void(), ())
sync = lmod.get_or_insert_function(fnty, name=fname)
builder.call(sync, ())
return context.get_dummy_value()
@register
@implement(stubs.atomic.add, types.Kind(types.Array), types.intp, types.Any)
def ptx_atomic_add_intp(context, builder, sig, args):
aryty, indty, valty = sig.args
ary, ind, val = args
dtype = aryty.dtype
if dtype != valty:
raise TypeError("expect %s but got %s" % (dtype, valty))
if aryty.ndim != 1:
raise TypeError("indexing %d-D array with 1-D index" % (aryty.ndim,))
lary = context.make_array(aryty)(context, builder, ary)
ptr = cgutils.get_item_pointer(builder, aryty, lary, [ind])
return builder.atomic_rmw('add', ptr, val, 'monotonic')
@register
@implement(stubs.atomic.add, types.Kind(types.Array),
types.Kind(types.UniTuple), types.Any)
def ptx_atomic_add_unituple(context, builder, sig, args):
aryty, indty, valty = sig.args
ary, inds, val = args
dtype = aryty.dtype
indices = cgutils.unpack_tuple(builder, inds, count=len(indty))
indices = [context.cast(builder, i, t, types.intp)
for t, i in zip(indty, indices)]
if dtype != valty:
raise TypeError("expect %s but got %s" % (dtype, valty))
if aryty.ndim != len(indty):
raise TypeError("indexing %d-D array with %d-D index" %
(aryty.ndim, len(indty)))
lary = context.make_array(aryty)(context, builder, ary)
ptr = cgutils.get_item_pointer(builder, aryty, lary, indices)
return builder.atomic_rmw('add', ptr, val, 'monotonic')
@register
@implement(stubs.atomic.add, types.Kind(types.Array),
types.Kind(types.Tuple), types.Any)
def ptx_atomic_add_tuple(context, builder, sig, args):
aryty, indty, valty = sig.args
ary, inds, val = args
dtype = aryty.dtype
indices = cgutils.unpack_tuple(builder, inds, count=len(indty))
indices = [context.cast(builder, i, t, types.intp)
for t, i in zip(indty, indices)]
if dtype != valty:
raise TypeError("expect %s but got %s" % (dtype, valty))
if aryty.ndim != len(indty):
raise TypeError("indexing %d-D array with %d-D index" %
(aryty.ndim, len(indty)))
lary = context.make_array(aryty)(context, builder, ary)
ptr = cgutils.get_item_pointer(builder, aryty, lary, indices)
return builder.atomic_rmw('add', ptr, val, 'monotonic')
# -----------------------------------------------------------------------------
def _get_target_data(context):
return le.TargetData.new(nvvm.data_layout[context.address_size])
def _generic_array(context, builder, shape, dtype, symbol_name, addrspace,
can_dynsized=False):
elemcount = reduce(operator.mul, shape)
lldtype = context.get_data_type(dtype)
laryty = Type.array(lldtype, elemcount)
if addrspace == nvvm.ADDRSPACE_LOCAL:
# Special case local addrespace allocation to use alloca
# NVVM is smart enough to only use local memory if no register is
# available
dataptr = builder.alloca(laryty, name=symbol_name)
else:
lmod = cgutils.get_module(builder)
# Create global variable in the requested address-space
gvmem = lmod.add_global_variable(laryty, symbol_name, addrspace)
if elemcount <= 0:
if can_dynsized: # dynamic shared memory
gvmem.linkage = lc.LINKAGE_EXTERNAL
else:
raise ValueError("array length <= 0")
else:
gvmem.linkage = lc.LINKAGE_INTERNAL
gvmem.initializer = lc.Constant.undef(laryty)
if dtype not in types.number_domain:
raise TypeError("unsupported type: %s" % dtype)
# Convert to generic address-space
conv = nvvmutils.insert_addrspace_conv(lmod, Type.int(8), addrspace)
addrspaceptr = gvmem.bitcast(Type.pointer(Type.int(8), addrspace))
dataptr = builder.call(conv, [addrspaceptr])
return _make_array(context, builder, dataptr, dtype, shape)
def _make_array(context, builder, dataptr, dtype, shape, layout='C'):
ndim = len(shape)
# Create array object
aryty = types.Array(dtype=dtype, ndim=ndim, layout='C')
ary = context.make_array(aryty)(context, builder)
ary.data = builder.bitcast(dataptr, ary.data.type)
targetdata = _get_target_data(context)
lldtype = context.get_data_type(dtype)
itemsize = targetdata.abi_size(lldtype)
# Compute strides
rstrides = [itemsize]
for i, lastsize in enumerate(reversed(shape[1:])):
rstrides.append(lastsize * rstrides[-1])
strides = [s for s in reversed(rstrides)]
kshape = [context.get_constant(types.intp, s) for s in shape]
kstrides = [context.get_constant(types.intp, s) for s in strides]
ary.shape = cgutils.pack_array(builder, kshape)
ary.strides = cgutils.pack_array(builder, kstrides)
return ary._getvalue()
########NEW FILE########
__FILENAME__ = cudamath
from __future__ import print_function, absolute_import, division
import math
from numba import types, utils
from numba.typing.templates import (AttributeTemplate, ConcreteTemplate,
signature, Registry)
registry = Registry()
builtin_attr = registry.register_attr
builtin_global = registry.register_global
@builtin_attr
class MathModuleAttribute(AttributeTemplate):
key = types.Module(math)
def resolve_fabs(self, mod):
return types.Function(Math_fabs)
def resolve_exp(self, mod):
return types.Function(Math_exp)
def resolve_expm1(self, mod):
return types.Function(Math_expm1)
def resolve_sqrt(self, mod):
return types.Function(Math_sqrt)
def resolve_log(self, mod):
return types.Function(Math_log)
def resolve_log1p(self, mod):
return types.Function(Math_log1p)
def resolve_log10(self, mod):
return types.Function(Math_log10)
def resolve_sin(self, mod):
return types.Function(Math_sin)
def resolve_cos(self, mod):
return types.Function(Math_cos)
def resolve_tan(self, mod):
return types.Function(Math_tan)
def resolve_sinh(self, mod):
return types.Function(Math_sinh)
def resolve_cosh(self, mod):
return types.Function(Math_cosh)
def resolve_tanh(self, mod):
return types.Function(Math_tanh)
def resolve_asin(self, mod):
return types.Function(Math_asin)
def resolve_acos(self, mod):
return types.Function(Math_acos)
def resolve_atan(self, mod):
return types.Function(Math_atan)
def resolve_atan2(self, mod):
return types.Function(Math_atan2)
def resolve_asinh(self, mod):
return types.Function(Math_asinh)
def resolve_acosh(self, mod):
return types.Function(Math_acosh)
def resolve_atanh(self, mod):
return types.Function(Math_atanh)
def resolve_pi(self, mod):
return types.float64
def resolve_e(self, mod):
return types.float64
def resolve_floor(self, mod):
return types.Function(Math_floor)
def resolve_ceil(self, mod):
return types.Function(Math_ceil)
def resolve_trunc(self, mod):
return types.Function(Math_trunc)
def resolve_isnan(self, mod):
return types.Function(Math_isnan)
def resolve_isinf(self, mod):
return types.Function(Math_isinf)
def resolve_degrees(self, mod):
return types.Function(Math_degrees)
def resolve_radians(self, mod):
return types.Function(Math_radians)
# def resolve_hypot(self, mod):
# return types.Function(Math_hypot)
def resolve_copysign(self, mod):
return types.Function(Math_copysign)
def resolve_fmod(self, mod):
return types.Function(Math_fmod)
def resolve_pow(self, mod):
return types.Function(Math_pow)
class Math_unary(ConcreteTemplate):
cases = [
signature(types.float64, types.int64),
signature(types.float64, types.uint64),
signature(types.float32, types.float32),
signature(types.float64, types.float64),
]
class Math_fabs(Math_unary):
key = math.fabs
class Math_exp(Math_unary):
key = math.exp
if utils.PYVERSION > (2, 6):
class Math_expm1(Math_unary):
key = math.expm1
class Math_sqrt(Math_unary):
key = math.sqrt
class Math_log(Math_unary):
key = math.log
class Math_log1p(Math_unary):
key = math.log1p
class Math_log10(Math_unary):
key = math.log10
class Math_sin(Math_unary):
key = math.sin
class Math_cos(Math_unary):
key = math.cos
class Math_tan(Math_unary):
key = math.tan
class Math_sinh(Math_unary):
key = math.sinh
class Math_cosh(Math_unary):
key = math.cosh
class Math_tanh(Math_unary):
key = math.tanh
class Math_asin(Math_unary):
key = math.asin
class Math_acos(Math_unary):
key = math.acos
class Math_atan(Math_unary):
key = math.atan
class Math_atan2(ConcreteTemplate):
key = math.atan2
cases = [
signature(types.float64, types.int64, types.int64),
signature(types.float64, types.uint64, types.uint64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
]
class Math_asinh(Math_unary):
key = math.asinh
class Math_acosh(Math_unary):
key = math.acosh
class Math_atanh(Math_unary):
key = math.atanh
class Math_floor(Math_unary):
key = math.floor
class Math_ceil(Math_unary):
key = math.ceil
class Math_trunc(Math_unary):
key = math.trunc
class Math_radians(Math_unary):
key = math.radians
class Math_degrees(Math_unary):
key = math.degrees
# class Math_hypot(ConcreteTemplate):
# key = math.hypot
# cases = [
# signature(types.float64, types.int64, types.int64),
# signature(types.float64, types.uint64, types.uint64),
# signature(types.float32, types.float32, types.float32),
# signature(types.float64, types.float64, types.float64),
# ]
class Math_binary(ConcreteTemplate):
cases = [
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
]
class Math_copysign(Math_binary):
key = math.copysign
class Math_fmod(Math_binary):
key = math.fmod
class Math_pow(ConcreteTemplate):
key = math.pow
cases = [
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
signature(types.float32, types.float32, types.int32),
signature(types.float64, types.float64, types.int32),
]
class Math_isnan(ConcreteTemplate):
key = math.isnan
cases = [
signature(types.boolean, types.int64),
signature(types.boolean, types.uint64),
signature(types.boolean, types.float32),
signature(types.boolean, types.float64),
]
class Math_isinf(ConcreteTemplate):
key = math.isinf
cases = [
signature(types.boolean, types.int64),
signature(types.boolean, types.uint64),
signature(types.boolean, types.float32),
signature(types.boolean, types.float64),
]
builtin_global(math, types.Module(math))
builtin_global(math.fabs, types.Function(Math_fabs))
builtin_global(math.exp, types.Function(Math_exp))
if utils.PYVERSION > (2, 6):
builtin_global(math.expm1, types.Function(Math_expm1))
builtin_global(math.sqrt, types.Function(Math_sqrt))
builtin_global(math.log, types.Function(Math_log))
builtin_global(math.log1p, types.Function(Math_log1p))
builtin_global(math.log10, types.Function(Math_log10))
builtin_global(math.sin, types.Function(Math_sin))
builtin_global(math.cos, types.Function(Math_cos))
builtin_global(math.tan, types.Function(Math_tan))
builtin_global(math.sinh, types.Function(Math_sinh))
builtin_global(math.cosh, types.Function(Math_cosh))
builtin_global(math.tanh, types.Function(Math_tanh))
builtin_global(math.asin, types.Function(Math_asin))
builtin_global(math.acos, types.Function(Math_acos))
builtin_global(math.atan, types.Function(Math_atan))
builtin_global(math.atan2, types.Function(Math_atan2))
builtin_global(math.asinh, types.Function(Math_asinh))
builtin_global(math.acosh, types.Function(Math_acosh))
builtin_global(math.atanh, types.Function(Math_atanh))
# builtin_global(math.hypot, types.Function(Math_hypot))
builtin_global(math.floor, types.Function(Math_floor))
builtin_global(math.ceil, types.Function(Math_ceil))
builtin_global(math.trunc, types.Function(Math_trunc))
builtin_global(math.isnan, types.Function(Math_isnan))
builtin_global(math.isinf, types.Function(Math_isinf))
builtin_global(math.degrees, types.Function(Math_degrees))
builtin_global(math.radians, types.Function(Math_radians))
builtin_global(math.copysign, types.Function(Math_copysign))
builtin_global(math.fmod, types.Function(Math_fmod))
builtin_global(math.pow, types.Function(Math_pow))
########NEW FILE########
__FILENAME__ = decorators
from __future__ import print_function, absolute_import, division
from numba import sigutils, types
from .compiler import (compile_kernel, compile_device, declare_device_function,
AutoJitCUDAKernel)
def jit(restype=None, argtypes=None, device=False, inline=False, bind=True,
link=[], debug=False, **kws):
"""JIT compile a python function conforming to
the CUDA-Python specification.
To define a CUDA kernel that takes two int 1D-arrays::
@cuda.jit('void(int32[:], int32[:])')
def foo(aryA, aryB):
...
.. note:: A kernel cannot have any return value.
To launch the cuda kernel::
griddim = 1, 2
blockdim = 3, 4
foo[griddim, blockdim](aryA, aryB)
``griddim`` is the number of thread-block per grid.
It can be:
* an int;
* tuple-1 of ints;
* tuple-2 of ints.
``blockdim`` is the number of threads per block.
It can be:
* an int;
* tuple-1 of ints;
* tuple-2 of ints;
* tuple-3 of ints.
The above code is equaivalent to the following CUDA-C.
.. code-block:: c
dim3 griddim(1, 2);
dim3 blockdim(3, 4);
foo<<<griddim, blockdim>>>(aryA, aryB);
To access the compiled PTX code::
print foo.ptx
To define a CUDA device function that takes two ints and returns a int::
@cuda.jit('int32(int32, int32)', device=True)
def bar(a, b):
...
To force inline the device function::
@cuda.jit('int32(int32, int32)', device=True, inline=True)
def bar_forced_inline(a, b):
...
A device function can only be used inside another kernel.
It cannot be called from the host.
Using ``bar`` in a CUDA kernel::
@cuda.jit('void(int32[:], int32[:], int32[:])')
def use_bar(aryA, aryB, aryOut):
i = cuda.grid(1) # global position of the thread for a 1D grid.
aryOut[i] = bar(aryA[i], aryB[i])
"""
restype, argtypes = convert_types(restype, argtypes)
if restype and not device and restype != types.void:
raise TypeError("CUDA kernel must have void return type.")
def kernel_jit(func):
kernel = compile_kernel(func, argtypes, link=link, debug=debug)
# Force compilation for the current context
if bind:
kernel.bind()
return kernel
def device_jit(func):
return compile_device(func, restype, argtypes, inline=True,
debug=debug)
if device:
return device_jit
else:
return kernel_jit
def autojit(func, **kws):
"""JIT at callsite. Function signature is not needed as this
will capture the type at call time. Each signature of the kernel
is cached for future use.
.. note:: Can only compile CUDA kernel.
Example::
import numpy
@cuda.autojit
def foo(aryA, aryB):
...
aryA = numpy.arange(10, dtype=np.int32)
aryB = numpy.arange(10, dtype=np.float32)
foo[griddim, blockdim](aryA, aryB)
In the above code, a version of foo with the signature
"void(int32[:], float32[:])" is compiled.
"""
return AutoJitCUDAKernel(func, bind=True)
def declare_device(name, restype=None, argtypes=None):
restype, argtypes = convert_types(restype, argtypes)
return declare_device_function(name, restype, argtypes)
def convert_types(restype, argtypes):
# eval type string
if sigutils.is_signature(restype):
assert argtypes is None
argtypes, restype = sigutils.normalize_signature(restype)
return restype, argtypes
########NEW FILE########
__FILENAME__ = descriptor
from __future__ import print_function, division, absolute_import
from numba.targets.descriptors import TargetDescriptor
from numba.targets.options import TargetOptions
from .target import CUDATargetContext, CUDATypingContext
class CPUTargetOptions(TargetOptions):
OPTIONS = {}
class CUDATargetDesc(TargetDescriptor):
options = CPUTargetOptions
typingctx = CUDATypingContext()
targetctx = CUDATargetContext(typingctx)
########NEW FILE########
__FILENAME__ = libdevice
from __future__ import print_function, absolute_import, division
import sys
import math
from llvm.core import Type
from numba import cgutils, types
from numba.targets.imputils import implement, Registry
registry = Registry()
register = registry.register
float_set = types.float32, types.float64
def bool_implement(nvname, ty):
def core(context, builder, sig, args):
assert sig.return_type == types.boolean, nvname
fty = context.get_value_type(ty)
lmod = cgutils.get_module(builder)
fnty = Type.function(Type.int(), [fty])
fn = lmod.get_or_insert_function(fnty, name=nvname)
result = builder.call(fn, args)
return context.cast(builder, result, types.int32, types.boolean)
return core
def unary_implement(nvname, ty):
def core(context, builder, sig, args):
fty = context.get_value_type(ty)
lmod = cgutils.get_module(builder)
fnty = Type.function(fty, [fty])
fn = lmod.get_or_insert_function(fnty, name=nvname)
return builder.call(fn, args)
return core
def binary_implement(nvname, ty):
def core(context, builder, sig, args):
fty = context.get_value_type(ty)
lmod = cgutils.get_module(builder)
fnty = Type.function(fty, [fty, fty])
fn = lmod.get_or_insert_function(fnty, name=nvname)
return builder.call(fn, args)
return core
def powi_implement(nvname):
def core(context, builder, sig, args):
[base, pow] = args
[basety, powty] = sig.args
lmod = cgutils.get_module(builder)
fty = context.get_value_type(basety)
ity = context.get_value_type(types.int32)
fnty = Type.function(fty, [fty, ity])
fn = lmod.get_or_insert_function(fnty, name=nvname)
return builder.call(fn, [base, pow])
return core
register(implement(math.pow, types.float32, types.int32)(powi_implement(
'__nv_powif')))
register(implement(math.pow, types.float64, types.int32)(
powi_implement('__nv_powi')))
booleans = []
booleans += [('__nv_isnand', '__nv_isnanf', math.isnan)]
booleans += [('__nv_isinfd', '__nv_isinff', math.isinf)]
unarys = []
unarys += [('__nv_ceil', '__nv_ceilf', math.ceil)]
unarys += [('__nv_floor', '__nv_floorf', math.floor)]
unarys += [('__nv_fabs', '__nv_fabsf', math.fabs)]
unarys += [('__nv_exp', '__nv_expf', math.exp)]
if sys.version_info[:2] >= (2, 7):
unarys += [('__nv_expm1', '__nv_expm1f', math.expm1)]
unarys += [('__nv_sqrt', '__nv_sqrtf', math.sqrt)]
unarys += [('__nv_log', '__nv_logf', math.log)]
unarys += [('__nv_log10', '__nv_log10f', math.log10)]
unarys += [('__nv_log1p', '__nv_log1pf', math.log1p)]
unarys += [('__nv_acosh', '__nv_acoshf', math.acosh)]
unarys += [('__nv_acos', '__nv_acosf', math.acos)]
unarys += [('__nv_cos', '__nv_cosf', math.cos)]
unarys += [('__nv_cosh', '__nv_coshf', math.cosh)]
unarys += [('__nv_asinh', '__nv_asinhf', math.asinh)]
unarys += [('__nv_asin', '__nv_asinf', math.asin)]
unarys += [('__nv_sin', '__nv_sinf', math.sin)]
unarys += [('__nv_sinh', '__nv_sinhf', math.sinh)]
unarys += [('__nv_atan', '__nv_atanf', math.atan)]
unarys += [('__nv_atanh', '__nv_atanhf', math.atanh)]
unarys += [('__nv_tan', '__nv_tanf', math.tan)]
unarys += [('__nv_tanh', '__nv_tanhf', math.tanh)]
binarys = []
binarys += [('__nv_copysign', '__nv_copysignf', math.copysign)]
binarys += [('__nv_atan2', '__nv_atan2f', math.atan2)]
binarys += [('__nv_pow', '__nv_powf', math.pow)]
binarys += [('__nv_fmod', '__nv_fmodf', math.fmod)]
for name64, name32, key in booleans:
impl64 = bool_implement(name64, types.float64)
register(implement(key, types.float64)(impl64))
impl32 = bool_implement(name32, types.float32)
register(implement(key, types.float32)(impl32))
for name64, name32, key in unarys:
impl64 = unary_implement(name64, types.float64)
register(implement(key, types.float64)(impl64))
impl32 = unary_implement(name32, types.float32)
register(implement(key, types.float32)(impl32))
for name64, name32, key in binarys:
impl64 = binary_implement(name64, types.float64)
register(implement(key, types.float64, types.float64)(impl64))
impl32 = binary_implement(name32, types.float32)
register(implement(key, types.float32, types.float32)(impl32))
########NEW FILE########
__FILENAME__ = nvvmutils
from __future__ import print_function, absolute_import, division
import llvm.core as lc
from .cudadrv import nvvm
def insert_addrspace_conv(lmod, elemtype, addrspace):
addrspacename = {
nvvm.ADDRSPACE_SHARED: 'shared',
nvvm.ADDRSPACE_LOCAL: 'local',
nvvm.ADDRSPACE_CONSTANT: 'constant',
}[addrspace]
tyname = str(elemtype)
tyname = {'float': 'f32', 'double': 'f64'}.get(tyname, tyname)
s2g_name_fmt = 'llvm.nvvm.ptr.' + addrspacename + '.to.gen.p0%s.p%d%s'
s2g_name = s2g_name_fmt % (tyname, addrspace, tyname)
elem_ptr_ty = lc.Type.pointer(elemtype)
elem_ptr_ty_addrspace = lc.Type.pointer(elemtype, addrspace)
s2g_fnty = lc.Type.function(elem_ptr_ty,
[elem_ptr_ty_addrspace])
return lmod.get_or_insert_function(s2g_fnty, s2g_name)
def declare_vprint(lmod):
voidptrty = lc.Type.pointer(lc.Type.int(8))
vprintfty = lc.Type.function(lc.Type.int(), [voidptrty, voidptrty])
vprintf = lmod.get_or_insert_function(vprintfty, "vprintf")
return vprintf
def declare_string(builder, value):
lmod = builder.basic_block.function.module
cval = lc.Constant.stringz(value)
gl = lmod.add_global_variable(cval.type, name="_str",
addrspace=nvvm.ADDRSPACE_CONSTANT)
gl.linkage = lc.LINKAGE_INTERNAL
gl.global_constant = True
gl.initializer = cval
charty = lc.Type.int(8)
constcharptrty = lc.Type.pointer(charty, nvvm.ADDRSPACE_CONSTANT)
charptr = builder.bitcast(gl, constcharptrty)
conv = insert_addrspace_conv(lmod, charty, nvvm.ADDRSPACE_CONSTANT)
return builder.call(conv, [charptr])
########NEW FILE########
__FILENAME__ = stubs
"""
This scripts specifies all PTX special objects.
"""
from __future__ import print_function, absolute_import, division
import operator
import numpy
import llvm.core as lc
from numba import types, ir, typing, macro, dispatcher
from .cudadrv import nvvm
class Stub(object):
'''A stub object to represent special objects which is meaningless
outside the context of CUDA-python.
'''
_description_ = '<ptx special value>'
__slots__ = () # don't allocate __dict__
def __new__(cls):
raise NotImplementedError("%s is not instantiable" % cls)
def __repr__(self):
return self._description_
#-------------------------------------------------------------------------------
# SREG
SREG_SIGNATURE = typing.signature(types.int32)
class threadIdx(Stub):
'''threadIdx.{x, y, z}
'''
_description_ = '<threadIdx.{x,y,z}>'
x = macro.Macro('tid.x', SREG_SIGNATURE)
y = macro.Macro('tid.y', SREG_SIGNATURE)
z = macro.Macro('tid.z', SREG_SIGNATURE)
class blockIdx(Stub):
'''blockIdx.{x, y}
'''
_description_ = '<blockIdx.{x,y,z}>'
x = macro.Macro('ctaid.x', SREG_SIGNATURE)
y = macro.Macro('ctaid.y', SREG_SIGNATURE)
z = macro.Macro('ctaid.z', SREG_SIGNATURE)
class blockDim(Stub):
'''blockDim.{x, y, z}
'''
x = macro.Macro('ntid.x', SREG_SIGNATURE)
y = macro.Macro('ntid.y', SREG_SIGNATURE)
z = macro.Macro('ntid.z', SREG_SIGNATURE)
class gridDim(Stub):
'''gridDim.{x, y}
'''
_description_ = '<gridDim.{x,y,z}>'
x = macro.Macro('nctaid.x', SREG_SIGNATURE)
y = macro.Macro('nctaid.y', SREG_SIGNATURE)
z = macro.Macro('nctaid.z', SREG_SIGNATURE)
#-------------------------------------------------------------------------------
# Grid Macro
def _ptx_grid1d(): pass
def _ptx_grid2d(): pass
def grid_expand(ndim):
"""grid(ndim)
ndim: [int] 1 or 2
if ndim == 1:
return cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
elif ndim == 2:
x = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
y = cuda.threadIdx.y + cuda.blockIdx.y * cuda.blockDim.y
return x, y
"""
if ndim == 1:
fname = "ptx.grid.1d"
restype = types.int32
elif ndim == 2:
fname = "ptx.grid.2d"
restype = types.UniTuple(types.int32, 2)
else:
raise ValueError('argument can only be 1 or 2')
return ir.Intrinsic(fname, typing.signature(restype, types.intp),
args=[ndim])
grid = macro.Macro('ptx.grid', grid_expand, callable=True)
#-------------------------------------------------------------------------------
# synthreads
class syncthreads(Stub):
'''syncthreads()
Synchronizes all threads in the thread block.
'''
_description_ = '<syncthread()>'
#-------------------------------------------------------------------------------
# shared
def shared_array(shape, dtype):
ndim = 1
if isinstance(shape, tuple):
ndim = len(shape)
fname = "ptx.smem.alloc"
restype = types.Array(dtype, ndim, 'C')
if ndim == 1:
sig = typing.signature(restype, types.intp, types.Any)
else:
sig = typing.signature(restype, types.UniTuple(types.intp, ndim),
types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class shared(Stub):
"""shared namespace
"""
_description_ = '<shared>'
array = macro.Macro('shared.array', shared_array, callable=True,
argnames=['shape', 'dtype'])
#-------------------------------------------------------------------------------
# local array
def local_array(shape, dtype):
ndim = 1
if isinstance(shape, tuple):
ndim = len(shape)
fname = "ptx.lmem.alloc"
restype = types.Array(dtype, ndim, 'C')
if ndim == 1:
sig = typing.signature(restype, types.intp, types.Any)
else:
sig = typing.signature(restype, types.UniTuple(types.intp, ndim),
types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class local(Stub):
'''shared namespace
'''
_description_ = '<local>'
array = macro.Macro('local.array', local_array, callable=True,
argnames=['shape', 'dtype'])
#-------------------------------------------------------------------------------
# const array
def const_array_like(ndarray):
fname = "ptx.cmem.arylike"
aryty = dispatcher.typeof_pyval(ndarray)
sig = typing.signature(aryty, aryty)
return ir.Intrinsic(fname, sig, args=[ndarray])
raise NotImplementedError
[aryarg] = args
ary = aryarg.value
count = reduce(operator.mul, ary.shape)
dtype = types.from_dtype(numpy.dtype(ary.dtype))
def impl(context, args, argtys, retty):
builder = context.builder
lmod = builder.basic_block.function.module
addrspace = nvvm.ADDRSPACE_CONSTANT
data_t = dtype.llvm_as_value()
flat = ary.flatten(order='A') # preserve order
constvals = [dtype.llvm_const(flat[i]) for i in range(flat.size)]
constary = lc.Constant.array(data_t, constvals)
gv = lmod.add_global_variable(constary.type, "cmem", addrspace)
gv.linkage = lc.LINKAGE_INTERNAL
gv.global_constant = True
gv.initializer = constary
byte = lc.Type.int(8)
byte_ptr_as = lc.Type.pointer(byte, addrspace)
to_generic = nvvmutils.insert_addrspace_conv(lmod, byte, addrspace)
rawdata = builder.call(to_generic, [builder.bitcast(gv, byte_ptr_as)])
data = builder.bitcast(rawdata, lc.Type.pointer(data_t))
llintp = types.intp.llvm_as_value()
cshape = lc.Constant.array(llintp,
map(types.const_intp, ary.shape))
cstrides = lc.Constant.array(llintp,
map(types.const_intp, ary.strides))
res = lc.Constant.struct([lc.Constant.null(data.type), cshape,
cstrides])
res = builder.insert_value(res, data, 0)
return res
if ary.flags['C_CONTIGUOUS']:
contig = 'C'
elif ary.flags['F_CONTIGUOUS']:
contig = 'F'
else:
raise TypeError("array must be either C/F contiguous to be used as a "
"constant")
impl.codegen = True
impl.return_type = types.arraytype(dtype, ary.ndim, 'A')
return impl
class const(Stub):
'''shared namespace
'''
_description_ = '<const>'
array_like = macro.Macro('const.array_like', const_array_like,
callable=True, argnames=['ary'])
#-------------------------------------------------------------------------------
# atomic
class atomic(Stub):
"""atomic namespace
"""
_description_ = '<atomic>'
class add(Stub):
"""add(ary, idx, val)
Perform atomic ary[idx] += val
"""
########NEW FILE########
__FILENAME__ = target
from __future__ import print_function, absolute_import
import re
from llvm.core import Type, Builder, LINKAGE_INTERNAL, inline_function
from numba import typing, types
from numba.targets.base import BaseContext
from .cudadrv import nvvm
# -----------------------------------------------------------------------------
# Typing
class CUDATypingContext(typing.BaseContext):
def init(self):
from . import cudadecl, cudamath
self.install(cudadecl.registry)
self.install(cudamath.registry)
# -----------------------------------------------------------------------------
# Implementation
VALID_CHARS = re.compile(r'[^a-z0-9]', re.I)
class CUDATargetContext(BaseContext):
implement_powi_as_math_call = True
def init(self):
from . import cudaimpl, libdevice
self.insert_func_defn(cudaimpl.registry.functions)
self.insert_func_defn(libdevice.registry.functions)
def mangler(self, name, argtypes):
def repl(m):
ch = m.group(0)
return "_%X_" % ord(ch)
qualified = name + '.' + '.'.join(str(a) for a in argtypes)
mangled = VALID_CHARS.sub(repl, qualified)
return mangled
def prepare_cuda_kernel(self, func, argtypes):
# Adapt to CUDA LLVM
module = func.module
func.linkage = LINKAGE_INTERNAL
wrapper = self.generate_kernel_wrapper(func, argtypes)
func.delete()
del func
nvvm.set_cuda_kernel(wrapper)
nvvm.fix_data_layout(module)
return wrapper
def generate_kernel_wrapper(self, func, argtypes):
module = func.module
argtys = self.get_arguments(func.type.pointee)
fnty = Type.function(Type.void(), argtys)
wrapfn = module.add_function(fnty, name="cudaPy_" + func.name)
builder = Builder.new(wrapfn.append_basic_block(''))
callargs = []
for at, av in zip(argtypes, wrapfn.args):
av = self.get_argument_value(builder, at, av)
callargs.append(av)
status, _ = self.call_function(builder, func, types.void, argtypes,
callargs)
# TODO handle status
builder.ret_void()
del builder
# force inline
inline_function(status.code)
module.verify()
return wrapfn
def link_dependencies(self, module, depends):
for lib in depends:
module.link_in(lib, preserve=True)
def make_constant_array(self, builder, typ, ary):
"""
Return dummy value.
XXX: We should be able to move cuda.const.array_like into here.
"""
a = self.make_array(typ)(self, builder)
return a._getvalue()
########NEW FILE########
__FILENAME__ = testing
from __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
class CUDATestCase(unittest.TestCase):
def tearDown(self):
from numba.cuda.cudadrv.devices import reset
reset()
########NEW FILE########
__FILENAME__ = runtests
from __future__ import print_function, division, absolute_import
from numba.testing import discover_tests, run_tests
def test():
suite = discover_tests("numba.cuda.tests.cudadrv")
return run_tests(suite).wasSuccessful()
if __name__ == '__main__':
test()
########NEW FILE########
__FILENAME__ = test_array_attr
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestArrayAttr(unittest.TestCase):
def test_contigous_2d(self):
ary = np.arange(10)
cary = ary.reshape(2, 5)
fary = np.asfortranarray(cary)
dcary = cuda.to_device(cary)
dfary = cuda.to_device(fary)
self.assertTrue(dcary.is_c_contiguous())
self.assertTrue(not dfary.is_c_contiguous())
self.assertTrue(not dcary.is_f_contiguous())
self.assertTrue(dfary.is_f_contiguous())
def test_contigous_3d(self):
ary = np.arange(20)
cary = ary.reshape(2, 5, 2)
fary = np.asfortranarray(cary)
dcary = cuda.to_device(cary)
dfary = cuda.to_device(fary)
self.assertTrue(dcary.is_c_contiguous())
self.assertTrue(not dfary.is_c_contiguous())
self.assertTrue(not dcary.is_f_contiguous())
self.assertTrue(dfary.is_f_contiguous())
def test_contigous_4d(self):
ary = np.arange(60)
cary = ary.reshape(2, 5, 2, 3)
fary = np.asfortranarray(cary)
dcary = cuda.to_device(cary)
dfary = cuda.to_device(fary)
self.assertTrue(dcary.is_c_contiguous())
self.assertTrue(not dfary.is_c_contiguous())
self.assertTrue(not dcary.is_f_contiguous())
self.assertTrue(dfary.is_f_contiguous())
def test_ravel_c(self):
ary = np.arange(60)
reshaped = ary.reshape(2, 5, 2, 3)
expect = reshaped.ravel(order='C')
dary = cuda.to_device(reshaped)
dflat = dary.ravel()
flat = dflat.copy_to_host()
self.assertTrue(flat.ndim == 1)
self.assertTrue(np.all(expect == flat))
def test_ravel_f(self):
ary = np.arange(60)
reshaped = np.asfortranarray(ary.reshape(2, 5, 2, 3))
expect = reshaped.ravel(order='F')
dary = cuda.to_device(reshaped)
dflat = dary.ravel(order='F')
flat = dflat.copy_to_host()
self.assertTrue(flat.ndim == 1)
self.assertTrue(np.all(expect == flat))
def test_reshape_c(self):
ary = np.arange(10)
expect = ary.reshape(2, 5)
dary = cuda.to_device(ary)
dary_reshaped = dary.reshape(2, 5)
got = dary_reshaped.copy_to_host()
self.assertTrue(np.all(expect == got))
def test_reshape_f(self):
ary = np.arange(10)
expect = ary.reshape(2, 5, order='F')
dary = cuda.to_device(ary)
dary_reshaped = dary.reshape(2, 5, order='F')
got = dary_reshaped.copy_to_host()
self.assertTrue(np.all(expect == got))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_cuda_array_slicing
from __future__ import print_function
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class CudaArrayIndexing(unittest.TestCase):
def test_index_1d(self):
arr = np.arange(10)
darr = cuda.to_device(arr)
for i in range(arr.size):
self.assertEqual(arr[i], darr[i])
def test_index_2d(self):
arr = np.arange(9).reshape(3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
self.assertEqual(arr[i, j], darr[i, j])
def test_index_3d(self):
arr = np.arange(3 ** 3).reshape(3, 3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
for k in range(arr.shape[2]):
self.assertEqual(arr[i, j, k], darr[i, j, k])
class CudaArraySlicing(unittest.TestCase):
def test_prefix_1d(self):
arr = np.arange(5)
darr = cuda.to_device(arr)
for i in range(arr.size):
expect = arr[i:]
got = darr[i:].copy_to_host()
self.assertTrue(np.all(expect == got))
def test_prefix_2d(self):
arr = np.arange(3 ** 2).reshape(3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
expect = arr[i:, j:]
sliced = darr[i:, j:]
self.assertEqual(expect.shape, sliced.shape)
self.assertEqual(expect.strides, sliced.strides)
got = sliced.copy_to_host()
self.assertTrue(np.all(expect == got))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_cuda_auto_context
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestCudaAutoContext(unittest.TestCase):
def test_auto_context(self):
"""A problem was revealed by a customer that the use cuda.to_device
does not create a CUDA context.
This tests the problem
"""
A = np.arange(10, dtype=np.float32)
newA = np.empty_like(A)
dA = cuda.to_device(A)
dA.copy_to_host(newA)
self.assertTrue(np.allclose(A, newA))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_cuda_driver
from __future__ import print_function, absolute_import
from ctypes import c_int, sizeof
from numba.cuda.cudadrv.driver import driver, host_to_device, device_to_host
from numba.cuda.testing import unittest
ptx1 = '''
.version 1.4
.target sm_10, map_f64_to_f32
.entry _Z10helloworldPi (
.param .u64 __cudaparm__Z10helloworldPi_A)
{
.reg .u32 %r<3>;
.reg .u64 %rd<6>;
.loc 14 4 0
$LDWbegin__Z10helloworldPi:
.loc 14 6 0
cvt.s32.u16 %r1, %tid.x;
ld.param.u64 %rd1, [__cudaparm__Z10helloworldPi_A];
cvt.u64.u16 %rd2, %tid.x;
mul.lo.u64 %rd3, %rd2, 4;
add.u64 %rd4, %rd1, %rd3;
st.global.s32 [%rd4+0], %r1;
.loc 14 7 0
exit;
$LDWend__Z10helloworldPi:
} // _Z10helloworldPi
'''
ptx2 = '''
.version 3.0
.target sm_20
.address_size 64
.file 1 "/tmp/tmpxft_000012c7_00000000-9_testcuda.cpp3.i"
.file 2 "testcuda.cu"
.entry _Z10helloworldPi(
.param .u64 _Z10helloworldPi_param_0
)
{
.reg .s32 %r<3>;
.reg .s64 %rl<5>;
ld.param.u64 %rl1, [_Z10helloworldPi_param_0];
cvta.to.global.u64 %rl2, %rl1;
.loc 2 6 1
mov.u32 %r1, %tid.x;
mul.wide.u32 %rl3, %r1, 4;
add.s64 %rl4, %rl2, %rl3;
st.global.u32 [%rl4], %r1;
.loc 2 7 2
ret;
}
'''
class TestCudaDriver(unittest.TestCase):
def setUp(self):
self.assertTrue(driver.get_device_count())
device = driver.get_device()
ccmajor, _ = device.compute_capability
if ccmajor >= 2:
self.ptx = ptx2
else:
self.ptx = ptx1
self.context = device.get_or_create_context()
def test_cuda_driver_basic(self):
module = self.context.create_module_ptx(self.ptx)
print(module.info_log)
function = module.get_function('_Z10helloworldPi')
array = (c_int * 100)()
memory = self.context.memalloc(sizeof(array))
host_to_device(memory, array, sizeof(array))
function = function.configure((1,), (100,))
function(memory)
device_to_host(array, memory, sizeof(array))
for i, v in enumerate(array):
self.assertEqual(i, v)
module.unload()
def test_cuda_driver_stream(self):
module = self.context.create_module_ptx(self.ptx)
print(module.info_log)
function = module.get_function('_Z10helloworldPi')
array = (c_int * 100)()
stream = self.context.create_stream()
with stream.auto_synchronize():
memory = self.context.memalloc(sizeof(array))
host_to_device(memory, array, sizeof(array), stream=stream)
function = function.configure((1,), (100,), stream=stream)
function(memory)
device_to_host(array, memory, sizeof(array), stream=stream)
for i, v in enumerate(array):
self.assertEqual(i, v)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_cuda_memory
import ctypes
import numpy
from numba.cuda.cudadrv import driver, drvapi
from numba.cuda.testing import unittest, CUDATestCase
from numba.utils import IS_PY3
class TestCudaMemory(CUDATestCase):
def setUp(self):
self.device = driver.driver.get_device()
self.context = self.device.get_or_create_context()
def _template(self, obj):
self.assertTrue(driver.is_device_memory(obj))
driver.require_device_memory(obj)
self.assertTrue(isinstance(obj.device_ctypes_pointer,
drvapi.cu_device_ptr))
def test_device_memory(self):
devmem = self.context.memalloc(1024)
self._template(devmem)
def test_device_view(self):
devmem = self.context.memalloc(1024)
self._template(devmem.view(10))
def test_host_alloc(self):
devmem = self.context.memhostalloc(1024, mapped=True)
self._template(devmem)
def test_pinned_memory(self):
ary = numpy.arange(10)
arybuf = ary if IS_PY3 else buffer(ary)
devmem = self.context.mempin(arybuf, ary.ctypes.data,
ary.size * ary.dtype.itemsize,
mapped=True)
self._template(devmem)
class TestCudaMemoryFunctions(CUDATestCase):
def setUp(self):
device = driver.driver.get_device()
self.context = device.get_or_create_context()
def test_memcpy(self):
hstary = numpy.arange(100, dtype=numpy.uint32)
hstary2 = numpy.arange(100, dtype=numpy.uint32)
sz = hstary.size * hstary.dtype.itemsize
devary = self.context.memalloc(sz)
driver.host_to_device(devary, hstary, sz)
driver.device_to_host(hstary2, devary, sz)
self.assertTrue(numpy.all(hstary == hstary2))
def test_memset(self):
dtype = numpy.dtype('uint32')
n = 10
sz = dtype.itemsize * 10
devary = self.context.memalloc(sz)
driver.device_memset(devary, 0xab, sz)
hstary = numpy.empty(n, dtype=dtype)
driver.device_to_host(hstary, devary, sz)
hstary2 = numpy.array([0xabababab] * n, dtype=numpy.dtype('uint32'))
self.assertTrue(numpy.all(hstary == hstary2))
def test_d2d(self):
hst = numpy.arange(100, dtype=numpy.uint32)
hst2 = numpy.empty_like(hst)
sz = hst.size * hst.dtype.itemsize
dev1 = self.context.memalloc(sz)
dev2 = self.context.memalloc(sz)
driver.host_to_device(dev1, hst, sz)
driver.device_to_device(dev2, dev1, sz)
driver.device_to_host(hst2, dev2, sz)
self.assertTrue(numpy.all(hst == hst2))
class TestMVExtent(CUDATestCase):
def test_c_contiguous_array(self):
ary = numpy.arange(100)
arysz = ary.dtype.itemsize * ary.size
s, e = driver.host_memory_extents(ary)
self.assertTrue(ary.ctypes.data == s)
self.assertTrue(arysz == driver.host_memory_size(ary))
def test_f_contiguous_array(self):
ary = numpy.asfortranarray(numpy.arange(100).reshape(2, 50))
arysz = ary.dtype.itemsize * numpy.prod(ary.shape)
s, e = driver.host_memory_extents(ary)
self.assertTrue(ary.ctypes.data == s)
self.assertTrue(arysz == driver.host_memory_size(ary))
def test_single_element_array(self):
ary = numpy.asarray(numpy.uint32(1234))
arysz = ary.dtype.itemsize
s, e = driver.host_memory_extents(ary)
self.assertTrue(ary.ctypes.data == s)
self.assertTrue(arysz == driver.host_memory_size(ary))
def test_ctypes_struct(self):
class mystruct(ctypes.Structure):
_fields_ = [('x', ctypes.c_int), ('y', ctypes.c_int)]
data = mystruct(x=123, y=432)
sz = driver.host_memory_size(data)
self.assertTrue(ctypes.sizeof(data) == sz)
def test_ctypes_double(self):
data = ctypes.c_double(1.234)
sz = driver.host_memory_size(data)
self.assertTrue(ctypes.sizeof(data) == sz)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_cuda_ndarray
import numpy as np
from numba.cuda.cudadrv import devicearray
from numba import cuda
from numba.cuda.testing import unittest
class TestCudaNDArray(unittest.TestCase):
def test_device_array_interface(self):
dary = cuda.device_array(shape=100)
devicearray.verify_cuda_ndarray_interface(dary)
ary = np.empty(100)
dary = cuda.to_device(ary)
devicearray.verify_cuda_ndarray_interface(dary)
ary = np.asarray(1.234)
dary = cuda.to_device(ary)
self.assertTrue(dary.ndim == 1)
devicearray.verify_cuda_ndarray_interface(dary)
def test_devicearray_no_copy(self):
array = np.arange(100, dtype=np.float32)
cuda.to_device(array, copy=False)
def test_devicearray(self):
array = np.arange(100, dtype=np.int32)
original = array.copy()
gpumem = cuda.to_device(array)
array[:] = 0
gpumem.copy_to_host(array)
self.assertTrue((array == original).all())
def test_devicearray_partition(self):
N = 100
array = np.arange(N, dtype=np.int32)
original = array.copy()
gpumem = cuda.to_device(array)
left, right = gpumem.split(N // 2)
array[:] = 0
self.assertTrue(np.all(array == 0))
right.copy_to_host(array[N//2:])
left.copy_to_host(array[:N//2])
self.assertTrue(np.all(array == original))
def test_devicearray_replace(self):
N = 100
array = np.arange(N, dtype=np.int32)
original = array.copy()
gpumem = cuda.to_device(array)
cuda.to_device(array * 2, to=gpumem)
gpumem.copy_to_host(array)
self.assertTrue((array == original * 2).all())
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_detect
from __future__ import absolute_import, print_function
from numba import cuda
from numba.cuda.testing import unittest
class TestCudaDetect(unittest.TestCase):
def test_cuda_detect(self):
# exercise the code path
cuda.detect()
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_events
from __future__ import absolute_import, print_function
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestCudaEvent(unittest.TestCase):
def test_event_elapsed(self):
N = 32
dary = cuda.device_array(N, dtype=np.double)
evtstart = cuda.event()
evtend = cuda.event()
evtstart.record()
cuda.to_device(np.arange(N), to=dary)
evtend.record()
evtend.wait()
evtend.synchronize()
print(evtstart.elapsed_time(evtend))
def test_event_elapsed_stream(self):
N = 32
stream = cuda.stream()
dary = cuda.device_array(N, dtype=np.double)
evtstart = cuda.event()
evtend = cuda.event()
evtstart.record(stream=stream)
cuda.to_device(np.arange(N), to=dary, stream=stream)
evtend.record(stream=stream)
evtend.wait(stream=stream)
evtend.synchronize()
print(evtstart.elapsed_time(evtend))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_host_alloc
from __future__ import print_function, division, absolute_import
import numpy as np
from numba.cuda.cudadrv import driver
from numba import cuda
from numba.cuda.testing import unittest, CUDATestCase
class TestHostAlloc(CUDATestCase):
def test_host_alloc_driver(self):
n = 32
mem = cuda.current_context().memhostalloc(n, mapped=True)
dtype = np.dtype(np.uint8)
ary = np.ndarray(shape=n // dtype.itemsize, dtype=dtype,
buffer=mem)
magic = 0xab
driver.device_memset(mem, magic, n)
self.assertTrue(np.all(ary == magic))
ary.fill(n)
recv = np.empty_like(ary)
driver.device_to_host(recv, mem, ary.size)
self.assertTrue(np.all(ary == recv))
self.assertTrue(np.all(recv == n))
def test_host_alloc_pinned(self):
ary = cuda.pinned_array(10, dtype=np.uint32)
ary.fill(123)
self.assertTrue(all(ary == 123))
devary = cuda.to_device(ary)
driver.device_memset(devary, 0, driver.device_memory_size(devary))
self.assertTrue(all(ary == 123))
devary.copy_to_host(ary)
self.assertTrue(all(ary == 0))
def test_host_alloc_mapped(self):
ary = cuda.mapped_array(10, dtype=np.uint32)
ary.fill(123)
self.assertTrue(all(ary == 123))
driver.device_memset(ary, 0, driver.device_memory_size(ary))
self.assertTrue(all(ary == 0))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_inline_ptx
from __future__ import print_function, division, absolute_import
from llvm.core import Module, Type, Builder, InlineAsm
from numba.cuda.cudadrv import nvvm
from numba.cuda.testing import unittest, CUDATestCase
class TestCudaInlineAsm(CUDATestCase):
def test_inline_rsqrt(self):
mod = Module.new(__name__)
fnty = Type.function(Type.void(), [Type.pointer(Type.float())])
fn = mod.add_function(fnty, 'cu_rsqrt')
bldr = Builder.new(fn.append_basic_block('entry'))
rsqrt_approx_fnty = Type.function(Type.float(), [Type.float()])
inlineasm = InlineAsm.get(rsqrt_approx_fnty,
'rsqrt.approx.f32 $0, $1;',
'=f,f', side_effect=True)
val = bldr.load(fn.args[0])
res = bldr.call(inlineasm, [val])
bldr.store(res, fn.args[0])
bldr.ret_void()
# generate ptx
nvvm.fix_data_layout(mod)
nvvm.set_cuda_kernel(fn)
nvvmir = str(mod)
ptx = nvvm.llvm_to_ptx(nvvmir)
self.assertTrue('rsqrt.approx.f32' in str(ptx))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_linker
from __future__ import print_function, absolute_import, division
import os.path
import numpy as np
from numba.cuda.testing import unittest
from numba.cuda.cudadrv.driver import Linker
from numba.cuda import require_context
from numba import cuda
class TestLinker(unittest.TestCase):
@require_context
def test_linker_basic(self):
'''Simply go through the constructor and destructor
'''
linker = Linker()
del linker
@require_context
def test_linking(self):
global bar # must be a global; other it is recognized as a freevar
bar = cuda.declare_device('bar', 'int32(int32)')
link = os.path.join(os.path.dirname(__file__), 'data', 'jitlink.o')
print('link to:', link)
if not os.path.isfile(link):
print('test skipped due to missing file')
return
@cuda.jit('void(int32[:], int32[:])', link=[link])
def foo(x, y):
i = cuda.grid(1)
x[i] += bar(y[i])
A = np.array([123])
B = np.array([321])
foo(A, B)
self.assertTrue(A[0] == 123 + 2 * 321)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_nvvm_driver
from __future__ import absolute_import, print_function, division
from llvm.core import Module, Type, Builder
from numba.cuda.cudadrv.nvvm import (NVVM, CompilationUnit, llvm_to_ptx,
set_cuda_kernel, fix_data_layout,
get_arch_option)
from ctypes import c_size_t, c_uint64, sizeof
from numba.cuda.testing import unittest, CUDATestCase
is64bit = sizeof(c_size_t) == sizeof(c_uint64)
class TestNvvmDriver(unittest.TestCase):
def get_ptx(self):
nvvm = NVVM()
print(nvvm.get_version())
if is64bit:
return gpu64
else:
return gpu32
def test_nvvm_compile(self):
nvvmir = self.get_ptx()
cu = CompilationUnit()
cu.add_module(nvvmir.encode('utf8'))
ptx = cu.compile().decode('utf8')
print(ptx)
self.assertTrue('simple' in ptx)
self.assertTrue('ave' in ptx)
print(cu.log)
def test_nvvm_compile_simple(self):
nvvmir = self.get_ptx()
ptx = llvm_to_ptx(nvvmir).decode('utf8')
print(ptx)
self.assertTrue('simple' in ptx)
self.assertTrue('ave' in ptx)
def test_nvvm_from_llvm(self):
m = Module.new("test_nvvm_from_llvm")
fty = Type.function(Type.void(), [Type.int()])
kernel = m.add_function(fty, name='mycudakernel')
bldr = Builder.new(kernel.append_basic_block('entry'))
bldr.ret_void()
print(m)
set_cuda_kernel(kernel)
fix_data_layout(m)
ptx = llvm_to_ptx(str(m)).decode('utf8')
print(ptx)
self.assertTrue('mycudakernel' in ptx)
if is64bit:
self.assertTrue('.address_size 64' in ptx)
else:
self.assertTrue('.address_size 32' in ptx)
class TestArchOption(unittest.TestCase):
def test_get_arch_option(self):
self.assertTrue(get_arch_option(2, 0) == 'compute_20')
self.assertTrue(get_arch_option(2, 1) == 'compute_20')
self.assertTrue(get_arch_option(3, 0) == 'compute_30')
self.assertTrue(get_arch_option(3, 3) == 'compute_30')
self.assertTrue(get_arch_option(3, 4) == 'compute_30')
self.assertTrue(get_arch_option(3, 5) == 'compute_35')
self.assertTrue(get_arch_option(3, 6) == 'compute_35')
gpu64 = '''
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
define i32 @ave(i32 %a, i32 %b) {
entry:
%add = add nsw i32 %a, %b
%div = sdiv i32 %add, 2
ret i32 %div
}
define void @simple(i32* %data) {
entry:
%0 = call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
%1 = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
%mul = mul i32 %0, %1
%2 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
%add = add i32 %mul, %2
%call = call i32 @ave(i32 %add, i32 %add)
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds i32* %data, i64 %idxprom
store i32 %call, i32* %arrayidx, align 4
ret void
}
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() nounwind readnone
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.x() nounwind readnone
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() nounwind readnone
!nvvm.annotations = !{!1}
!1 = metadata !{void (i32*)* @simple, metadata !"kernel", i32 1}
'''
gpu32 = '''
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
define i32 @ave(i32 %a, i32 %b) {
entry:
%add = add nsw i32 %a, %b
%div = sdiv i32 %add, 2
ret i32 %div
}
define void @simple(i32* %data) {
entry:
%0 = call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
%1 = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
%mul = mul i32 %0, %1
%2 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
%add = add i32 %mul, %2
%call = call i32 @ave(i32 %add, i32 %add)
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds i32* %data, i64 %idxprom
store i32 %call, i32* %arrayidx, align 4
ret void
}
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() nounwind readnone
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.x() nounwind readnone
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() nounwind readnone
!nvvm.annotations = !{!1}
!1 = metadata !{void (i32*)* @simple, metadata !"kernel", i32 1}
'''
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_pinned
from __future__ import print_function, division, absolute_import
from timeit import default_timer as timer
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest, CUDATestCase
REPEAT = 25
class TestPinned(CUDATestCase):
def _template(self, name, A):
A0 = np.copy(A)
s = timer()
stream = cuda.stream()
ptr = cuda.to_device(A, copy=False, stream=stream)
ptr.copy_to_device(A, stream=stream)
ptr.copy_to_host(A, stream=stream)
stream.synchronize()
e = timer()
self.assertTrue(np.allclose(A, A0))
elapsed = e - s
return elapsed
def test_pinned(self):
A = np.arange(2*1024*1024) # 16 MB
total = 0
with cuda.pinned(A):
for i in range(REPEAT):
total += self._template('pinned', A)
print('pinned', total / REPEAT)
def test_unpinned(self):
A = np.arange(2*1024*1024) # 16 MB
total = 0
for i in range(REPEAT):
total += self._template('unpinned', A)
print('unpinned', total / REPEAT)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_reset_device
from __future__ import print_function, absolute_import, division
import threading
from numba import cuda
from numba.cuda.cudadrv.driver import driver
from numba.cuda.testing import unittest, CUDATestCase
class TestResetDevice(CUDATestCase):
def test_reset_device(self):
def newthread():
devices = range(driver.get_device_count())
print('Devices', devices)
for _ in range(2):
for d in devices:
cuda.select_device(d)
print('Selected device', d)
cuda.close()
print('Closed device', d)
# Do test on a separate thread so that we don't affect
# the current context in the main thread.
t = threading.Thread(target=newthread)
t.start()
t.join()
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_select_device
#
# Test does not work on some cards.
#
from __future__ import print_function, absolute_import, division
import threading
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest, CUDATestCase
def newthread():
cuda.select_device(0)
stream = cuda.stream()
A = np.arange(100)
dA = cuda.to_device(A, stream=stream)
stream.synchronize()
del dA
del stream
cuda.close()
class TestSelectDevice(CUDATestCase):
def test_select_device(self):
for i in range(10):
t = threading.Thread(target=newthread)
t.start()
t.join()
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = runtests
from __future__ import print_function, division, absolute_import
from numba.testing import discover_tests, run_tests
def test():
suite = discover_tests("numba.cuda.tests.cudapy.runtests")
return run_tests(suite).wasSuccessful()
if __name__ == '__main__':
test()
########NEW FILE########
__FILENAME__ = test_array
from __future__ import print_function, division, absolute_import
import numpy
from numba.cuda.testing import unittest
from numba import cuda
@cuda.jit('void(double[:])')
def kernel(x):
i = cuda.grid(1)
if i < x.shape[0]:
x[i] = i
@cuda.jit('void(double[:], double[:])')
def copykernel(x, y):
i = cuda.grid(1)
if i < x.shape[0]:
x[i] = i
y[i] = i
class TestCudaArray(unittest.TestCase):
def test_gpu_array_strided(self):
x = numpy.arange(10, dtype=numpy.double)
y = numpy.ndarray(shape=10 * 8, buffer=x, dtype=numpy.byte)
z = numpy.ndarray(9, buffer=y[4:-4], dtype=numpy.double)
kernel[10, 10](z)
self.assertTrue(numpy.allclose(z, list(range(9))))
def test_gpu_array_interleaved(self):
x = numpy.arange(10, dtype=numpy.double)
y = x[:-1:2]
# z = x[1::2]
# n = y.size
try:
cuda.devicearray.auto_device(y)
except ValueError:
pass
else:
raise AssertionError("Should raise exception complaining the "
"contiguous-ness of the array.")
# Should we handle this use case?
# assert z.size == y.size
# copykernel[1, n](y, x)
# print(y, z)
# assert numpy.all(y == z)
# assert numpy.all(y == list(range(n)))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_array_args
from __future__ import print_function, division, absolute_import
import numpy
from numba import cuda
from numba.cuda.testing import unittest
@cuda.jit('double(double[:],int64)', device=True, inline=True)
def device_function(a, c):
return a[c]
@cuda.jit('void(double[:],double[:])')
def kernel(x, y):
i = cuda.grid(1)
y[i] = device_function(x, i)
class TestCudaArrayArg(unittest.TestCase):
def test_array_ary(self):
x = numpy.arange(10, dtype=numpy.double)
y = numpy.zeros_like(x)
kernel[10, 1](x, y)
self.assertTrue(numpy.all(x == y))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_atomics
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import cuda, uint32, uint64, float32
from numba.cuda.testing import unittest
def atomic_add(ary):
tid = cuda.threadIdx.x
sm = cuda.shared.array(32, uint32)
sm[tid] = 0
cuda.syncthreads()
bin = ary[tid] % 32
cuda.atomic.add(sm, bin, 1)
cuda.syncthreads()
ary[tid] = sm[tid]
def atomic_add2(ary):
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
sm = cuda.shared.array((4, 8), uint32)
sm[tx, ty] = ary[tx, ty]
cuda.syncthreads()
cuda.atomic.add(sm, (tx, ty), 1)
cuda.syncthreads()
ary[tx, ty] = sm[tx, ty]
def atomic_add3(ary):
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
sm = cuda.shared.array((4, 8), uint32)
sm[tx, ty] = ary[tx, ty]
cuda.syncthreads()
cuda.atomic.add(sm, (tx, uint64(ty)), 1)
cuda.syncthreads()
ary[tx, ty] = sm[tx, ty]
def atomic_add_float(ary):
tid = cuda.threadIdx.x
sm = cuda.shared.array(32, float32)
sm[tid] = 0
cuda.syncthreads()
bin = ary[tid] % 32
cuda.atomic.add(sm, bin, 1)
cuda.syncthreads()
ary[tid] = sm[tid]
class TestCudaAtomics(unittest.TestCase):
def test_atomic_add(self):
ary = np.random.randint(0, 32, size=32).astype(np.uint32)
orig = ary.copy()
cuda_atomic_add = cuda.jit('void(uint32[:])')(atomic_add)
cuda_atomic_add[1, 32](ary)
gold = np.zeros(32, dtype=np.uint32)
for i in range(orig.size):
gold[orig[i]] += 1
self.assertTrue(np.all(ary == gold))
def test_atomic_add2(self):
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_add2 = cuda.jit('void(uint32[:,:])')(atomic_add2)
cuda_atomic_add2[1, (4, 8)](ary)
self.assertTrue(np.all(ary == orig + 1))
def test_atomic_add3(self):
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_add3 = cuda.jit('void(uint32[:,:])')(atomic_add3)
cuda_atomic_add3[1, (4, 8)](ary)
self.assertTrue(np.all(ary == orig + 1))
# Should support float atomic add
#@testcase
#def test_atomic_add_float():
# ary = np.random.randint(0, 32, size=32).astype(np.float32).reshape(4, 8)
# orig = ary.copy()
# cuda_atomic_add = cuda.jit('void(float32[:])')(atomic_add_float)
# cuda_atomic_add[1, (4, 8)](ary)
#
# assertTrue(np.all(ary == orig + 1))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_autojit
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestCudaAutoJit(unittest.TestCase):
def test_autojit(self):
@cuda.autojit
def what(a, b, c):
pass
what(np.empty(1), 1.0, 21)
what(np.empty(1), 1.0, 21)
what(np.empty(1), np.empty(1, dtype=np.int32), 21)
what(np.empty(1), np.empty(1, dtype=np.int32), 21)
what(np.empty(1), 1.0, 21)
print(what.definitions)
self.assertTrue(len(what.definitions) == 2)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_blackscholes
from __future__ import print_function, absolute_import, division
import numpy as np
import math
import time
from numba import cuda, double
from numba.cuda.testing import unittest
RISKFREE = 0.02
VOLATILITY = 0.30
A1 = 0.31938153
A2 = -0.356563782
A3 = 1.781477937
A4 = -1.821255978
A5 = 1.330274429
RSQRT2PI = 0.39894228040143267793994605993438
def cnd(d):
K = 1.0 / (1.0 + 0.2316419 * np.abs(d))
ret_val = (RSQRT2PI * np.exp(-0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))
return np.where(d > 0, 1.0 - ret_val, ret_val)
def black_scholes(callResult, putResult, stockPrice, optionStrike, optionYears,
Riskfree, Volatility):
S = stockPrice
X = optionStrike
T = optionYears
R = Riskfree
V = Volatility
sqrtT = np.sqrt(T)
d1 = (np.log(S / X) + (R + 0.5 * V * V) * T) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd(d1)
cndd2 = cnd(d2)
expRT = np.exp(- R * T)
callResult[:] = (S * cndd1 - X * expRT * cndd2)
putResult[:] = (X * expRT * (1.0 - cndd2) - S * (1.0 - cndd1))
@cuda.jit(argtypes=(double,), restype=double, device=True, inline=True)
def cnd_cuda(d):
K = 1.0 / (1.0 + 0.2316419 * math.fabs(d))
ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))
if d > 0:
ret_val = 1.0 - ret_val
return ret_val
@cuda.jit(argtypes=(double[:], double[:], double[:], double[:], double[:],
double, double))
def black_scholes_cuda(callResult, putResult, S, X, T, R, V):
i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
if i >= S.shape[0]:
return
sqrtT = math.sqrt(T[i])
d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd_cuda(d1)
cndd2 = cnd_cuda(d2)
expRT = math.exp((-1. * R) * T[i])
callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))
def randfloat(rand_var, low, high):
return (1.0 - rand_var) * low + rand_var * high
class TestBlackScholes(unittest.TestCase):
def test_blackscholes(self):
OPT_N = 400
iterations = 2
stockPrice = randfloat(np.random.random(OPT_N), 5.0, 30.0)
optionStrike = randfloat(np.random.random(OPT_N), 1.0, 100.0)
optionYears = randfloat(np.random.random(OPT_N), 0.25, 10.0)
callResultNumpy = np.zeros(OPT_N)
putResultNumpy = -np.ones(OPT_N)
callResultNumbapro = np.zeros(OPT_N)
putResultNumbapro = -np.ones(OPT_N)
# numpy
for i in range(iterations):
black_scholes(callResultNumpy, putResultNumpy, stockPrice,
optionStrike, optionYears, RISKFREE, VOLATILITY)
# numbapro
time0 = time.time()
blockdim = 512, 1
griddim = int(math.ceil(float(OPT_N) / blockdim[0])), 1
stream = cuda.stream()
d_callResult = cuda.to_device(callResultNumbapro, stream)
d_putResult = cuda.to_device(putResultNumbapro, stream)
d_stockPrice = cuda.to_device(stockPrice, stream)
d_optionStrike = cuda.to_device(optionStrike, stream)
d_optionYears = cuda.to_device(optionYears, stream)
time1 = time.time()
for i in range(iterations):
black_scholes_cuda[griddim, blockdim, stream](
d_callResult, d_putResult, d_stockPrice, d_optionStrike,
d_optionYears, RISKFREE, VOLATILITY)
d_callResult.copy_to_host(callResultNumbapro, stream)
d_putResult.copy_to_host(putResultNumbapro, stream)
stream.synchronize()
dt = (time1 - time0)
print("numbapro.cuda time: %f msec" % ((1000 * dt) / iterations))
delta = np.abs(callResultNumpy - callResultNumbapro)
L1norm = delta.sum() / np.abs(callResultNumpy).sum()
max_abs_err = delta.max()
print('L1norm', L1norm)
print('Max absolute error', max_abs_err)
self.assertTrue(L1norm < 1e-13)
self.assertTrue(max_abs_err < 1e-13)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_boolean
from __future__ import print_function, absolute_import
import numpy as np
from numba.cuda.testing import unittest
from numba import cuda
def boolean_test(A, vertial):
if vertial:
A[0] = 123
else:
A[0] = 321
class TestCudaBoolean(unittest.TestCase):
def test_boolean(self):
func = cuda.jit('void(float64[:], bool_)')(boolean_test)
A = np.array([0], dtype='float64')
func(A, True)
self.assertTrue(A[0] == 123)
func(A, False)
self.assertTrue(A[0] == 321)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_complex_kernel
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestCudaComplex(unittest.TestCase):
def test_cuda_complex_arg(self):
@cuda.jit('void(complex128[:], complex128)')
def foo(a, b):
i = cuda.grid(1)
a[i] += b
a = np.arange(5, dtype=np.complex128)
a0 = a.copy()
foo[1, a.shape](a, 2j)
self.assertTrue(np.allclose(a, a0 + 2j))
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = test_constmem
from __future__ import print_function
import numpy
from numba import cuda, int32
from numba.cuda.testing import unittest
CONST1D = numpy.arange(10, dtype=numpy.float64) / 2.
CONST2D = numpy.asfortranarray(
numpy.arange(100, dtype=numpy.int32).reshape(10, 10))
CONST3D = ((numpy.arange(5*5*5, dtype=numpy.complex64).reshape(5, 5, 5) + 1j) /
2j)
def cuconst(A):
C = cuda.const.array_like(CONST1D)
i = cuda.grid(1)
A[i] = C[i]
def cuconst2d(A):
C = cuda.const.array_like(CONST2D)
i, j = cuda.grid(2)
A[i, j] = C[i, j]
def cuconst3d(A):
C = cuda.const.array_like(CONST3D)
i = cuda.threadIdx.x
j = cuda.threadIdx.y
k = cuda.threadIdx.z
A[i, j, k] = C[i, j, k]
class TestCudaConstantMemory(unittest.TestCase):
def test_const_array(self):
jcuconst = cuda.jit('void(float64[:])')(cuconst)
print(jcuconst.ptx)
self.assertTrue('.const' in jcuconst.ptx)
A = numpy.empty_like(CONST1D)
jcuconst[2, 5](A)
self.assertTrue(numpy.all(A == CONST1D))
def test_const_array_2d(self):
jcuconst2d = cuda.jit('void(int32[:,:])')(cuconst2d)
print(jcuconst2d.ptx)
self.assertTrue('.const' in jcuconst2d.ptx)
A = numpy.empty_like(CONST2D, order='C')
jcuconst2d[(2,2), (5,5)](A)
print(CONST2D)
print(A)
self.assertTrue(numpy.all(A == CONST2D))
def test_const_array_3d(self):
jcuconst3d = cuda.jit('void(complex64[:,:,:])')(cuconst3d)
print(jcuconst3d.ptx)
self.assertTrue('.const' in jcuconst3d.ptx)
A = numpy.empty_like(CONST3D, order='F')
jcuconst3d[1, (5, 5, 5)](A)
print(CONST3D)
print(A)
self.assertTrue(numpy.all(A == CONST3D))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_device_func
from __future__ import print_function, absolute_import, division
import numpy as np
from numba.cuda.testing import unittest
from numba import cuda
@cuda.jit("float32(float32, float32)", device=True)
def add2f(a, b):
return a + b
@cuda.jit("float32(float32, float32)", device=True)
def indirect(a, b):
return add2f(a, b)
def use_add2f(ary):
i = cuda.grid(1)
ary[i] = add2f(ary[i], ary[i])
def indirect_add2f(ary):
i = cuda.grid(1)
ary[i] = indirect(ary[i], ary[i])
class TestDeviceFunc(unittest.TestCase):
def test_use_add2f(self):
compiled = cuda.jit("void(float32[:])")(use_add2f)
nelem = 10
ary = np.arange(nelem, dtype=np.float32)
exp = ary + ary
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == exp), (ary, exp))
def test_indirect_add2f(self):
compiled = cuda.jit("void(float32[:])")(indirect_add2f)
nelem = 10
ary = np.arange(nelem, dtype=np.float32)
exp = ary + ary
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == exp), (ary, exp))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_globals
from __future__ import absolute_import, print_function, division
import numpy as np
from numba import cuda, int32, float32
from numba.cuda.testing import unittest
N = 100
def simple_smem(ary):
sm = cuda.shared.array(N, int32)
i = cuda.grid(1)
if i == 0:
for j in range(N):
sm[j] = j
cuda.syncthreads()
ary[i] = sm[i]
S0 = 10
S1 = 20
def coop_smem2d(ary):
i, j = cuda.grid(2)
sm = cuda.shared.array((S0, S1), float32)
sm[i, j] = (i + 1) / (j + 1)
cuda.syncthreads()
ary[i, j] = sm[i, j]
class TestCudaTestGlobal(unittest.TestCase):
def test_global_int_const(self):
"""Test simple_smem
"""
compiled = cuda.jit("void(int32[:])")(simple_smem)
nelem = 100
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32)))
@unittest.SkipTest
def test_global_tuple_const(self):
"""Test coop_smem2d
"""
compiled = cuda.jit("void(float32[:,:])")(coop_smem2d)
shape = 10, 20
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape](ary)
exp = np.empty_like(ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = float(i + 1) / (j + 1)
self.assertTrue(np.allclose(ary, exp))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_idiv
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import cuda, float32, float64, int32
from numba.cuda.testing import unittest
@cuda.jit(argtypes=[float32[:, :], int32, int32])
def div(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
@cuda.jit(argtypes=[float64[:, :], int32, int32])
def div_double(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
class TestCudaIDiv(unittest.TestCase):
def test_inplace_div(self):
x = np.ones((2, 2), dtype=np.float32)
grid = cuda.to_device(x)
div(grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
def test_inplace_div_double(self):
x = np.ones((2, 2), dtype=np.float64)
grid = cuda.to_device(x)
div_double(grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_intrinsics
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import cuda, int32, float32
from numba.cuda.testing import unittest
def simple_threadidx(ary):
i = cuda.threadIdx.x
ary[0] = i
def fill_threadidx(ary):
i = cuda.threadIdx.x
ary[i] = i
def fill3d_threadidx(ary):
i = cuda.threadIdx.x
j = cuda.threadIdx.y
k = cuda.threadIdx.z
ary[i, j, k] = (i + 1) * (j + 1) * (k + 1)
def simple_grid1d(ary):
i = cuda.grid(1)
ary[i] = i
def simple_grid2d(ary):
i, j = cuda.grid(2)
ary[i, j] = i + j
def intrinsic_forloop_step(c):
startX, startY = cuda.grid(2)
gridX = cuda.gridDim.x * cuda.blockDim.x
gridY = cuda.gridDim.y * cuda.blockDim.y
height, width = c.shape
for x in range(startX, width, gridX):
for y in range(startY, height, gridY):
c[y, x] = x + y
class TestCudaIntrinsic(unittest.TestCase):
def test_simple_threadidx(self):
compiled = cuda.jit("void(int32[:])")(simple_threadidx)
ary = np.ones(1, dtype=np.int32)
compiled(ary)
self.assertTrue(ary[0] == 0)
def test_fill_threadidx(self):
compiled = cuda.jit("void(int32[:])")(fill_threadidx)
N = 10
ary = np.ones(N, dtype=np.int32)
exp = np.arange(N, dtype=np.int32)
compiled[1, N](ary)
self.assertTrue(np.all(ary == exp))
def test_fill3d_threadidx(self):
X, Y, Z = 4, 5, 6
def c_contigous():
compiled = cuda.jit("void(int32[:,:,::1])")(fill3d_threadidx)
ary = np.zeros((X, Y, Z), dtype=np.int32)
compiled[1, (X, Y, Z)](ary)
return ary
def f_contigous():
compiled = cuda.jit("void(int32[::1,:,:])")(fill3d_threadidx)
ary = np.asfortranarray(np.zeros((X, Y, Z), dtype=np.int32))
compiled[1, (X, Y, Z)](ary)
return ary
c_res = c_contigous()
f_res = f_contigous()
self.assertTrue(np.all(c_res == f_res))
def test_simple_grid1d(self):
compiled = cuda.jit("void(int32[::1])")(simple_grid1d)
ntid, nctaid = 3, 7
nelem = ntid * nctaid
ary = np.empty(nelem, dtype=np.int32)
compiled[nctaid, ntid](ary)
self.assertTrue(np.all(ary == np.arange(nelem)))
def test_simple_grid2d(self):
compiled = cuda.jit("void(int32[:,::1])")(simple_grid2d)
ntid = (4, 3)
nctaid = (5, 6)
shape = (ntid[0] * nctaid[0], ntid[1] * nctaid[1])
ary = np.empty(shape, dtype=np.int32)
exp = ary.copy()
compiled[nctaid, ntid](ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = i + j
self.assertTrue(np.all(ary == exp))
def test_intrinsic_forloop_step(self):
compiled = cuda.jit("void(float32[:,::1])")(intrinsic_forloop_step)
ntid = (4, 3)
nctaid = (5, 6)
shape = (ntid[0] * nctaid[0], ntid[1] * nctaid[1])
ary = np.empty(shape, dtype=np.int32)
compiled[nctaid, ntid](ary)
gridX, gridY = shape
height, width = ary.shape
for i, j in zip(range(ntid[0]), range(ntid[1])):
startX, startY = gridX + i, gridY + j
for x in range(startX, width, gridX):
for y in range(startY, height, gridY):
self.assertTrue(ary[y, x] == x + y, (ary[y, x], x + y))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_laplace
from __future__ import print_function, absolute_import, division
import numpy as np
import time
from numba import cuda, float64, void
from numba.cuda.testing import unittest
# NOTE: CUDA kernel does not return any value
tpb = 16
SM_SIZE = tpb, tpb
@cuda.jit(float64(float64, float64), device=True, inline=True)
def get_max(a, b):
if a > b:
return a
else:
return b
@cuda.jit(void(float64[:, :], float64[:, :], float64[:, :]))
def jocabi_relax_core(A, Anew, error):
err_sm = cuda.shared.array(SM_SIZE, dtype=float64)
ty = cuda.threadIdx.x
tx = cuda.threadIdx.y
bx = cuda.blockIdx.x
by = cuda.blockIdx.y
n = A.shape[0]
m = A.shape[1]
i, j = cuda.grid(2)
err_sm[ty, tx] = 0
if j >= 1 and j < n - 1 and i >= 1 and i < m - 1:
Anew[j, i] = 0.25 * ( A[j, i + 1] + A[j, i - 1] \
+ A[j - 1, i] + A[j + 1, i])
err_sm[ty, tx] = Anew[j, i] - A[j, i]
cuda.syncthreads()
# max-reduce err_sm vertically
t = tpb // 2
while t > 0:
if ty < t:
err_sm[ty, tx] = get_max(err_sm[ty, tx], err_sm[ty + t, tx])
t //= 2
cuda.syncthreads()
# max-reduce err_sm horizontally
t = tpb // 2
while t > 0:
if tx < t and ty == 0:
err_sm[ty, tx] = get_max(err_sm[ty, tx], err_sm[ty, tx + t])
t //= 2
cuda.syncthreads()
if tx == 0 and ty == 0:
error[by, bx] = err_sm[0, 0]
class TestCudaLaplace(unittest.TestCase):
def test_laplace_small(self):
NN = 256
NM = 256
A = np.zeros((NN, NM), dtype=np.float64)
Anew = np.zeros((NN, NM), dtype=np.float64)
n = NN
m = NM
iter_max = 1000
tol = 1.0e-6
error = 1.0
for j in range(n):
A[j, 0] = 1.0
Anew[j, 0] = 1.0
print("Jacobi relaxation Calculation: %d x %d mesh" % (n, m))
timer = time.time()
iter = 0
blockdim = (tpb, tpb)
griddim = (NN // blockdim[0], NM // blockdim[1])
error_grid = np.zeros(griddim)
stream = cuda.stream()
dA = cuda.to_device(A, stream) # to device and don't come back
dAnew = cuda.to_device(Anew, stream) # to device and don't come back
derror_grid = cuda.to_device(error_grid, stream)
while error > tol and iter < iter_max:
self.assertTrue(error_grid.dtype == np.float64)
jocabi_relax_core[griddim, blockdim, stream](dA, dAnew, derror_grid)
derror_grid.copy_to_host(error_grid, stream=stream)
# error_grid is available on host
stream.synchronize()
error = np.abs(error_grid).max()
# swap dA and dAnew
tmp = dA
dA = dAnew
dAnew = tmp
if iter % 100 == 0:
print("%5d, %0.6f (elapsed: %f s)" %
(iter, error, time.time() - timer))
iter += 1
runtime = time.time() - timer
print(" total: %f s" % runtime)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_localmem
from __future__ import print_function, absolute_import, division
import numpy
from numba import cuda, int32, complex128
from numba.cuda.testing import unittest
def culocal(A, B):
C = cuda.local.array(100, dtype=int32)
for i in range(C.shape[0]):
C[i] = A[i]
for i in range(C.shape[0]):
B[i] = C[i]
def culocalcomplex(A, B):
C = cuda.local.array(100, dtype=complex128)
for i in range(C.shape[0]):
C[i] = A[i]
for i in range(C.shape[0]):
B[i] = C[i]
class TestCudaLocalMem(unittest.TestCase):
def test_local_array(self):
jculocal = cuda.jit('void(int32[:], int32[:])')(culocal)
self.assertTrue('.local' in jculocal.ptx)
A = numpy.arange(100, dtype='int32')
B = numpy.zeros_like(A)
jculocal(A, B)
self.assertTrue(numpy.all(A == B))
def test_local_array_complex(self):
sig = 'void(complex128[:], complex128[:])'
jculocalcomplex = cuda.jit(sig)(culocalcomplex)
self.assertTrue('.local' in jculocalcomplex.ptx)
A = (numpy.arange(100, dtype='complex128') - 1) / 2j
B = numpy.zeros_like(A)
jculocalcomplex(A, B)
self.assertTrue(numpy.all(A == B))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_mandel
from __future__ import print_function, absolute_import, division
from numba import cuda
from numba.cuda.testing import unittest
class TestCudaMandel(unittest.TestCase):
def test_mandel(self):
"""Just make sure we can compile this
"""
@cuda.jit('(uint32, float64, float64, float64, '
'float64, uint32, uint32, uint32)', device=True)
def mandel(tid, min_x, max_x, min_y, max_y, width, height, iters):
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
x = tid % width
y = tid / width
real = min_x + x * pixel_size_x
imag = min_y + y * pixel_size_y
c = complex(real, imag)
z = 0.0j
for i in range(iters):
z = z * z + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return i
return iters
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_math
from __future__ import print_function, absolute_import, division
import sys
import numpy as np
from numba.cuda.testing import unittest
from numba import cuda, float32, float64, int32
import math
def math_acos(A, B):
i = cuda.grid(1)
B[i] = math.acos(A[i])
def math_asin(A, B):
i = cuda.grid(1)
B[i] = math.asin(A[i])
def math_atan(A, B):
i = cuda.grid(1)
B[i] = math.atan(A[i])
def math_acosh(A, B):
i = cuda.grid(1)
B[i] = math.acosh(A[i])
def math_asinh(A, B):
i = cuda.grid(1)
B[i] = math.asinh(A[i])
def math_atanh(A, B):
i = cuda.grid(1)
B[i] = math.atanh(A[i])
def math_cos(A, B):
i = cuda.grid(1)
B[i] = math.cos(A[i])
def math_sin(A, B):
i = cuda.grid(1)
B[i] = math.sin(A[i])
def math_tan(A, B):
i = cuda.grid(1)
B[i] = math.tan(A[i])
def math_cosh(A, B):
i = cuda.grid(1)
B[i] = math.cosh(A[i])
def math_sinh(A, B):
i = cuda.grid(1)
B[i] = math.sinh(A[i])
def math_tanh(A, B):
i = cuda.grid(1)
B[i] = math.tanh(A[i])
def math_atan2(A, B, C):
i = cuda.grid(1)
C[i] = math.atan2(A[i], B[i])
def math_exp(A, B):
i = cuda.grid(1)
B[i] = math.exp(A[i])
def math_expm1(A, B):
i = cuda.grid(1)
B[i] = math.expm1(A[i])
def math_fabs(A, B):
i = cuda.grid(1)
B[i] = math.fabs(A[i])
def math_log(A, B):
i = cuda.grid(1)
B[i] = math.log(A[i])
def math_log10(A, B):
i = cuda.grid(1)
B[i] = math.log10(A[i])
def math_log1p(A, B):
i = cuda.grid(1)
B[i] = math.log1p(A[i])
def math_sqrt(A, B):
i = cuda.grid(1)
B[i] = math.sqrt(A[i])
def math_pow(A, B, C):
i = cuda.grid(1)
C[i] = math.pow(A[i], B[i])
def math_ceil(A, B):
i = cuda.grid(1)
B[i] = math.ceil(A[i])
def math_floor(A, B):
i = cuda.grid(1)
B[i] = math.floor(A[i])
def math_copysign(A, B, C):
i = cuda.grid(1)
C[i] = math.copysign(A[i], B[i])
def math_fmod(A, B, C):
i = cuda.grid(1)
C[i] = math.fmod(A[i], B[i])
def math_modf(A, B, C):
i = cuda.grid(1)
C[i] = math.modf(A[i], B[i])
def math_isnan(A, B):
i = cuda.grid(1)
B[i] = math.isnan(A[i])
def math_isinf(A, B):
i = cuda.grid(1)
B[i] = math.isinf(A[i])
def math_pow_binop(A, B, C):
i = cuda.grid(1)
C[i] = A[i] ** B[i]
def math_mod_binop(A, B, C):
i = cuda.grid(1)
C[i] = A[i] % B[i]
class TestCudaMath(unittest.TestCase):
def unary_template_float32(self, func, npfunc, start=0, stop=1):
self.unary_template(func, npfunc, np.float32, float32, start, stop)
def unary_template_float64(self, func, npfunc, start=0, stop=1):
self.unary_template(func, npfunc, np.float64, float64, start, stop)
def unary_template(self, func, npfunc, npdtype, npmtype, start, stop):
nelem = 50
A = np.linspace(start, stop, nelem).astype(npdtype)
B = np.empty_like(A)
arytype = npmtype[::1]
cfunc = cuda.jit((arytype, arytype))(func)
cfunc[1, nelem](A, B)
self.assertTrue(np.allclose(npfunc(A), B))
def unary_bool_template_float32(self, func, npfunc, start=0, stop=1):
self.unary_template(func, npfunc, np.float32, float32, start, stop)
def unary_bool_template_float64(self, func, npfunc, start=0, stop=1):
self.unary_template(func, npfunc, np.float64, float64, start, stop)
def unary_bool_template(self, func, npfunc, npdtype, npmtype, start, stop):
nelem = 50
A = np.linspace(start, stop, nelem).astype(npdtype)
B = np.empty(A.shape, dtype=np.int32)
iarytype = npmtype[::1]
oarytype = int32[::1]
cfunc = cuda.jit((iarytype, oarytype))(func)
cfunc[1, nelem](A, B)
self.assertTrue(np.all(npfunc(A), B))
def binary_template_float32(self, func, npfunc, start=0, stop=1):
self.binary_template(func, npfunc, np.float32, float32, start, stop)
def binary_template_float64(self, func, npfunc, start=0, stop=1):
self.binary_template(func, npfunc, np.float64, float64, start, stop)
def binary_template(self, func, npfunc, npdtype, npmtype, start, stop):
nelem = 50
A = np.linspace(start, stop, nelem).astype(npdtype)
B = np.empty_like(A)
arytype = npmtype[::1]
cfunc = cuda.jit((arytype, arytype, arytype))(func)
cfunc.bind()
cfunc[1, nelem](A, A, B)
self.assertTrue(np.allclose(npfunc(A, A), B))
#------------------------------------------------------------------------------
# test_math_acos
def test_math_acos(self):
self.unary_template_float32(math_acos, np.arccos)
self.unary_template_float64(math_acos, np.arccos)
#------------------------------------------------------------------------------
# test_math_asin
def test_math_asin(self):
self.unary_template_float32(math_asin, np.arcsin)
self.unary_template_float64(math_asin, np.arcsin)
#------------------------------------------------------------------------------
# test_math_atan
def test_math_atan(self):
self.unary_template_float32(math_atan, np.arctan)
self.unary_template_float64(math_atan, np.arctan)
#------------------------------------------------------------------------------
# test_math_acosh
def test_math_acosh(self):
self.unary_template_float32(math_acosh, np.arccosh, start=1, stop=2)
self.unary_template_float64(math_acosh, np.arccosh, start=1, stop=2)
#------------------------------------------------------------------------------
# test_math_asinh
def test_math_asinh(self):
self.unary_template_float32(math_asinh, np.arcsinh)
self.unary_template_float64(math_asinh, np.arcsinh)
#------------------------------------------------------------------------------
# test_math_atanh
def test_math_atanh(self):
self.unary_template_float32(math_atanh, np.arctanh, start=0, stop=.9)
self.unary_template_float64(math_atanh, np.arctanh, start=0, stop=.9)
#------------------------------------------------------------------------------
# test_math_cos
def test_math_cos(self):
self.unary_template_float32(math_cos, np.cos)
self.unary_template_float64(math_cos, np.cos)
#------------------------------------------------------------------------------
# test_math_sin
def test_math_sin(self):
self.unary_template_float32(math_sin, np.sin)
self.unary_template_float64(math_sin, np.sin)
#------------------------------------------------------------------------------
# test_math_tan
def test_math_tan(self):
self.unary_template_float32(math_tan, np.tan)
self.unary_template_float64(math_tan, np.tan)
#------------------------------------------------------------------------------
# test_math_cosh
def test_math_cosh(self):
self.unary_template_float32(math_cosh, np.cosh)
self.unary_template_float64(math_cosh, np.cosh)
#------------------------------------------------------------------------------
# test_math_sinh
def test_math_sinh(self):
self.unary_template_float32(math_sinh, np.sinh)
self.unary_template_float64(math_sinh, np.sinh)
#------------------------------------------------------------------------------
# test_math_tanh
def test_math_tanh(self):
self.unary_template_float32(math_tanh, np.tanh)
self.unary_template_float64(math_tanh, np.tanh)
#------------------------------------------------------------------------------
# test_math_atan2
def test_math_atan2(self):
self.binary_template_float32(math_atan2, np.arctan2)
self.binary_template_float64(math_atan2, np.arctan2)
#------------------------------------------------------------------------------
# test_math_exp
def test_math_exp(self):
self.unary_template_float32(math_exp, np.exp)
self.unary_template_float64(math_exp, np.exp)
#------------------------------------------------------------------------------
# test_math_expm1
if sys.version_info[:2] >= (2, 7):
def test_math_expm1(self):
self.unary_template_float32(math_expm1, np.expm1)
self.unary_template_float64(math_expm1, np.expm1)
#------------------------------------------------------------------------------
# test_math_fabs
def test_math_fabs(self):
self.unary_template_float32(math_fabs, np.fabs, start=-1)
self.unary_template_float64(math_fabs, np.fabs, start=-1)
#------------------------------------------------------------------------------
# test_math_log
def test_math_log(self):
self.unary_template_float32(math_log, np.log, start=1)
self.unary_template_float64(math_log, np.log, start=1)
#------------------------------------------------------------------------------
# test_math_log10
def test_math_log10(self):
self.unary_template_float32(math_log10, np.log10, start=1)
self.unary_template_float64(math_log10, np.log10, start=1)
#------------------------------------------------------------------------------
# test_math_log1p
def test_math_log1p(self):
self.unary_template_float32(math_log1p, np.log1p)
self.unary_template_float64(math_log1p, np.log1p)
#------------------------------------------------------------------------------
# test_math_sqrt
def test_math_sqrt(self):
self.unary_template_float32(math_sqrt, np.sqrt)
self.unary_template_float64(math_sqrt, np.sqrt)
#------------------------------------------------------------------------------
# test_math_pow
def test_math_pow(self):
self.binary_template_float32(math_pow, np.power)
self.binary_template_float64(math_pow, np.power)
#------------------------------------------------------------------------------
# test_math_pow_binop
def test_math_pow_binop(self):
self.binary_template_float32(math_pow_binop, np.power)
self.binary_template_float64(math_pow_binop, np.power)
#------------------------------------------------------------------------------
# test_math_ceil
def test_math_ceil(self):
self.unary_template_float32(math_ceil, np.ceil)
self.unary_template_float64(math_ceil, np.ceil)
#------------------------------------------------------------------------------
# test_math_floor
def test_math_floor(self):
self.unary_template_float32(math_floor, np.floor)
self.unary_template_float64(math_floor, np.floor)
#------------------------------------------------------------------------------
# test_math_copysign
def test_math_copysign(self):
self.binary_template_float32(math_copysign, np.copysign, start=-1)
self.binary_template_float64(math_copysign, np.copysign, start=-1)
#------------------------------------------------------------------------------
# test_math_fmod
def test_math_fmod(self):
self.binary_template_float32(math_fmod, np.fmod, start=1)
self.binary_template_float64(math_fmod, np.fmod, start=1)
#------------------------------------------------------------------------------
# test_math_mod_binop
def test_math_mod_binop(self):
self.binary_template_float32(math_mod_binop, np.fmod, start=1)
self.binary_template_float64(math_mod_binop, np.fmod, start=1)
#------------------------------------------------------------------------------
# test_math_isnan
def test_math_isnan(self):
self.unary_bool_template_float32(math_isnan, np.isnan)
self.unary_bool_template_float64(math_isnan, np.isnan)
#------------------------------------------------------------------------------
# test_math_isinf
def test_math_isinf(self):
self.unary_bool_template_float32(math_isinf, np.isinf)
self.unary_bool_template_float64(math_isinf, np.isinf)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_matmul
from __future__ import print_function, division, absolute_import
import numpy as np
from timeit import default_timer as time
from numba import cuda, float32
from numba.cuda.testing import unittest
bpg = 50
tpb = 32
n = bpg * tpb
SM_SIZE = (tpb, tpb)
@cuda.jit(argtypes=[float32[:, ::1], float32[:, ::1], float32[:, ::1]])
def cu_square_matrix_mul(A, B, C):
sA = cuda.shared.array(shape=SM_SIZE, dtype=float32)
sB = cuda.shared.array(shape=SM_SIZE, dtype=float32)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bx = cuda.blockIdx.x
by = cuda.blockIdx.y
bw = cuda.blockDim.x
bh = cuda.blockDim.y
x = tx + bx * bw
y = ty + by * bh
acc = float32(0) # forces all the math to be f32
for i in range(bpg):
if x < n and y < n:
sA[ty, tx] = A[y, tx + i * tpb]
sB[ty, tx] = B[ty + i * tpb, x]
cuda.syncthreads()
if x < n and y < n:
for j in range(tpb):
acc += sA[ty, j] * sB[j, tx]
cuda.syncthreads()
if x < n and y < n:
C[y, x] = acc
class TestCudaMatMul(unittest.TestCase):
def test_func(self):
A = np.array(np.random.random((n, n)), dtype=np.float32)
B = np.array(np.random.random((n, n)), dtype=np.float32)
C = np.empty_like(A)
print("N = %d x %d" % (n, n))
s = time()
stream = cuda.stream()
with stream.auto_synchronize():
dA = cuda.to_device(A, stream)
dB = cuda.to_device(B, stream)
dC = cuda.to_device(C, stream)
cu_square_matrix_mul[(bpg, bpg), (tpb, tpb), stream](dA, dB, dC)
dC.copy_to_host(C, stream)
e = time()
tcuda = e - s
# Host compute
Amat = np.matrix(A)
Bmat = np.matrix(B)
s = time()
Cans = Amat * Bmat
e = time()
tcpu = e - s
print('cpu: %f' % tcpu)
print('cuda: %f' % tcuda)
print('cuda speedup: %.2fx' % (tcpu / tcuda))
# Check result
self.assertTrue(np.allclose(C, Cans))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_montecarlo
from __future__ import print_function, absolute_import
import math
from numba import cuda
from numba.cuda.testing import unittest
class TestCudaMonteCarlo(unittest.TestCase):
def test_montecarlo(self):
"""Just make sure we can compile this
"""
@cuda.jit(
'void(double[:], double[:], double, double, double, double[:])')
def step(last, paths, dt, c0, c1, normdist):
i = cuda.grid(1)
if i >= paths.shape[0]:
return
noise = normdist[i]
paths[i] = last[i] * math.exp(c0 * dt + c1 * noise)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_nondet
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest
def generate_input(n):
A = np.array(np.arange(n * n).reshape(n, n), dtype=np.float32)
B = np.array(np.arange(n) + 0, dtype=A.dtype)
return A, B
class TestCudaNonDet(unittest.TestCase):
def test_for_pre(self):
"""Test issue with loop not running due to bad sign-extension at the for loop
precondition.
"""
@cuda.jit(argtypes=[float32[:, :], float32[:, :], float32[:]])
def diagproduct(c, a, b):
startX, startY = cuda.grid(2)
gridX = cuda.gridDim.x * cuda.blockDim.x
gridY = cuda.gridDim.y * cuda.blockDim.y
height = c.shape[0]
width = c.shape[1]
for x in range(startX, width, (gridX)):
for y in range(startY, height, (gridY)):
c[y, x] = a[y, x] * b[x]
N = 8
A, B = generate_input(N)
E = np.zeros(A.shape, dtype=A.dtype)
F = np.empty(A.shape, dtype=A.dtype)
E = np.dot(A, np.diag(B))
blockdim = (32, 8)
griddim = (1, 1)
dA = cuda.to_device(A)
dB = cuda.to_device(B)
dF = cuda.to_device(F, copy=False)
diagproduct[griddim, blockdim](dF, dA, dB)
dF.to_host()
self.assertTrue(np.allclose(F, E))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_powi
from __future__ import print_function, absolute_import
import math
import numpy as np
from numba import cuda, float64, int8, int32
from numba.cuda.testing import unittest
def cu_mat_power(A, power, power_A):
y, x = cuda.grid(2)
m, n = power_A.shape
if x >= n or y >= m:
return
power_A[y, x] = math.pow(A[y, x], int32(power))
def cu_mat_power_binop(A, power, power_A):
y, x = cuda.grid(2)
m, n = power_A.shape
if x >= n or y >= m:
return
power_A[y, x] = A[y, x] ** power
class TestCudaPowi(unittest.TestCase):
def test_powi(self):
dec = cuda.jit(argtypes=[float64[:, :], int8, float64[:, :]])
kernel = dec(cu_mat_power)
power = 2
A = np.arange(10, dtype=np.float64).reshape(2, 5)
Aout = np.empty_like(A)
kernel[1, A.shape](A, power, Aout)
self.assertTrue(np.allclose(Aout, A ** power))
def test_powi_binop(self):
dec = cuda.jit(argtypes=[float64[:, :], int8, float64[:, :]])
kernel = dec(cu_mat_power_binop)
power = 2
A = np.arange(10, dtype=np.float64).reshape(2, 5)
Aout = np.empty_like(A)
kernel[1, A.shape](A, power, Aout)
self.assertTrue(np.allclose(Aout, A ** power))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_py2_div_issue
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda, float32, int32
from numba.cuda.testing import unittest
class TestCudaPy2Div(unittest.TestCase):
def test_py2_div_issue(self):
@cuda.jit(argtypes=[float32[:], float32[:], float32[:], int32])
def preCalc(y, yA, yB, numDataPoints):
i = cuda.grid(1)
k = i % numDataPoints
ans = float32(1.001 * float32(i))
y[i] = ans
yA[i] = ans * 1.0
yB[i] = ans / 1.0
numDataPoints = 15
y = np.zeros(numDataPoints, dtype=np.float32)
yA = np.zeros(numDataPoints, dtype=np.float32)
yB = np.zeros(numDataPoints, dtype=np.float32)
z = 1.0
preCalc[1, 15](y, yA, yB, numDataPoints)
print('y')
print(y)
print('yA')
print(yA)
print('yB')
print(yB)
self.assertTrue(np.all(y == yA))
self.assertTrue(np.all(y == yB))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_slicing
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda, float32, int32
from numba.cuda.testing import unittest
def foo(inp, out):
for i in range(out.shape[0]):
out[i] = inp[i]
def copy(inp, out):
i = cuda.grid(1)
cufoo(inp[i, :], out[i, :])
class TestCudaSlicing(unittest.TestCase):
def test_slice_as_arg(self):
global cufoo
cufoo = cuda.jit("void(int32[:], int32[:])", device=True)(foo)
cucopy = cuda.jit("void(int32[:,:], int32[:,:])")(copy)
inp = np.arange(100, dtype=np.int32).reshape(10, 10)
out = np.zeros_like(inp)
cucopy[1, 10](inp, out)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_sync
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda, int32, float32
from numba.cuda.testing import unittest
def useless_sync(ary):
i = cuda.grid(1)
cuda.syncthreads()
ary[i] = i
def simple_smem(ary):
N = 100
sm = cuda.shared.array(N, int32)
i = cuda.grid(1)
if i == 0:
for j in range(N):
sm[j] = j
cuda.syncthreads()
ary[i] = sm[i]
def coop_smem2d(ary):
i, j = cuda.grid(2)
sm = cuda.shared.array((10, 20), float32)
sm[i, j] = (i + 1) / (j + 1)
cuda.syncthreads()
ary[i, j] = sm[i, j]
def dyn_shared_memory(ary):
i = cuda.grid(1)
sm = cuda.shared.array(0, float32)
sm[i] = i * 2
cuda.syncthreads()
ary[i] = sm[i]
class TestCudaSync(unittest.TestCase):
def test_useless_sync(self):
compiled = cuda.jit("void(int32[::1])")(useless_sync)
nelem = 10
ary = np.empty(nelem, dtype=np.int32)
exp = np.arange(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == exp))
def test_simple_smem(self):
compiled = cuda.jit("void(int32[::1])")(simple_smem)
nelem = 100
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32)))
def test_coop_smem2d(self):
compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d)
shape = 10, 20
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape](ary)
exp = np.empty_like(ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = (i + 1) / (j + 1)
self.assertTrue(np.allclose(ary, exp))
def test_dyn_shared_memory(self):
compiled = cuda.jit("void(float32[::1])")(dyn_shared_memory)
shape = 50
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape, 0, ary.size * 4](ary)
self.assertTrue(np.all(ary == 2 * np.arange(ary.size, dtype=np.int32)))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = dataflow
from __future__ import print_function, division, absolute_import
from pprint import pprint
from numba import utils
import warnings
class DataFlowAnalysis(object):
"""
Perform stack2reg
This is necessary to resolve blocks that propagates stack value.
This would allow the use of `and` and `or` and python2.6 jumps.
"""
def __init__(self, cfa):
self.cfa = cfa
self.bytecode = cfa.bytecode
self.infos = {}
self.syntax_blocks = []
def run(self):
for blk in self.cfa.iterblocks():
self.infos[blk.offset] = self.run_on_block(blk)
def run_on_block(self, blk):
info = BlockInfo(blk.offset)
for offset in blk:
inst = self.bytecode[offset]
self.dispatch(info, inst)
return info
def dump(self):
for blk in utils.dict_itervalues(self.infos):
blk.dump()
def dispatch(self, info, inst):
fname = "op_%s" % inst.opname.replace('+', '_')
fn = getattr(self, fname)
fn(info, inst)
def dup_topx(self, info, count):
stack = [info.pop() for _ in range(count)]
for val in reversed(stack):
info.push(val)
for val in reversed(stack):
info.push(val)
def op_DUP_TOP(self, info, inst):
tos = info.pop()
info.push(tos)
info.push(tos)
def op_DUP_TOPX(self, info, inst):
count = inst.arg
assert 1 <= count <= 5, "Invalid DUP_TOPX count"
self.dup_topx(info, count)
def op_DUP_TOP_TWO(self, info, inst):
self.dup_topx(info, count=2)
def op_ROT_TWO(self, info, inst):
first = info.pop()
second = info.pop()
info.push(first)
info.push(second)
def op_ROT_THREE(self, info, inst):
first = info.pop()
second = info.pop()
third = info.pop()
info.push(first)
info.push(third)
info.push(second)
def op_ROT_FOUR(self, info, inst):
first = info.pop()
second = info.pop()
third = info.pop()
forth = info.pop()
info.push(first)
info.push(forth)
info.push(third)
info.push(second)
def op_UNPACK_SEQUENCE(self, info, inst):
count = inst.arg
sequence = info.pop()
stores = [info.make_temp() for _ in range(count)]
iterobj = info.make_temp()
info.append(inst, sequence=sequence, stores=stores, iterobj=iterobj)
for st in reversed(stores):
info.push(st)
def op_BUILD_TUPLE(self, info, inst):
count = inst.arg
items = list(reversed([info.pop() for _ in range(count)]))
tup = info.make_temp()
info.append(inst, items=items, res=tup)
info.push(tup)
def op_BUILD_LIST(self, info, inst):
count = inst.arg
items = list(reversed([info.pop() for _ in range(count)]))
lst = info.make_temp()
info.append(inst, items=items, res=lst)
info.push(lst)
def op_POP_TOP(self, info, inst):
info.pop()
def op_STORE_FAST(self, info, inst):
value = info.pop()
info.append(inst, value=value)
def op_LOAD_FAST(self, info, inst):
name = self.bytecode.co_varnames[inst.arg]
info.push(name)
def op_LOAD_CONST(self, info, inst):
res = info.make_temp()
info.append(inst, res=res)
info.push(res)
def op_LOAD_GLOBAL(self, info, inst):
res = info.make_temp()
info.append(inst, res=res)
info.push(res)
def op_LOAD_ATTR(self, info, inst):
item = info.pop()
res = info.make_temp()
info.append(inst, item=item, res=res)
info.push(res)
def op_BINARY_SUBSCR(self, info, inst):
index = info.pop()
target = info.pop()
res = info.make_temp()
info.append(inst, index=index, target=target, res=res)
info.push(res)
def op_STORE_SUBSCR(self, info, inst):
index = info.pop()
target = info.pop()
value = info.pop()
info.append(inst, target=target, index=index, value=value)
def op_GET_ITER(self, info, inst):
value = info.pop()
res = info.make_temp()
info.append(inst, value=value, res=res)
info.push(res)
if self.syntax_blocks:
loop = self.syntax_blocks[-1]
if isinstance(loop, LoopBlock) and loop.iterator is None:
loop.iterator = res
def op_FOR_ITER(self, info, inst):
loop = self.syntax_blocks[-1]
iterator = loop.iterator
indval = info.make_temp()
pred = info.make_temp()
info.append(inst, iterator=iterator, indval=indval, pred=pred)
info.push(indval)
def op_CALL_FUNCTION(self, info, inst):
narg = inst.arg & 0xff
nkws = (inst.arg >> 8) & 0xff
def pop_kws():
val = info.pop()
key = info.pop()
return key, val
kws = list(reversed([pop_kws() for _ in range(nkws)]))
args = list(reversed([info.pop() for _ in range(narg)]))
func = info.pop()
res = info.make_temp()
info.append(inst, func=func, args=args, kws=kws, res=res)
info.push(res)
def op_PRINT_ITEM(self, info, inst):
warnings.warn("Python2 style print partially supported. Please use "
"Python3 style print.", RuntimeWarning)
item = info.pop()
printvar = info.make_temp()
res = info.make_temp()
info.append(inst, item=item, printvar=printvar, res=res)
info.push(item)
def op_PRINT_NEWLINE(self, info, inst):
printvar = info.make_temp()
res = info.make_temp()
info.append(inst, printvar=printvar, res=res)
def _unaryop(self, info, inst):
val = info.pop()
res = info.make_temp()
info.append(inst, value=val, res=res)
info.push(res)
op_UNARY_NEGATIVE = _unaryop
op_UNARY_NOT = _unaryop
op_UNARY_INVERT = _unaryop
def _binaryop(self, info, inst):
rhs = info.pop()
lhs = info.pop()
res = info.make_temp()
info.append(inst, lhs=lhs, rhs=rhs, res=res)
info.push(res)
op_COMPARE_OP = _binaryop
op_INPLACE_ADD = _binaryop
op_INPLACE_SUBTRACT = _binaryop
op_INPLACE_MULTIPLY = _binaryop
op_INPLACE_DIVIDE = _binaryop
op_INPLACE_TRUE_DIVIDE = _binaryop
op_INPLACE_FLOOR_DIVIDE = _binaryop
op_INPLACE_MODULO = _binaryop
op_INPLACE_POWER = _binaryop
op_INPLACE_LSHIFT = _binaryop
op_INPLACE_RSHIFT = _binaryop
op_INPLACE_AND = _binaryop
op_INPLACE_OR = _binaryop
op_INPLACE_XOR = _binaryop
op_BINARY_ADD = _binaryop
op_BINARY_SUBTRACT = _binaryop
op_BINARY_MULTIPLY = _binaryop
op_BINARY_DIVIDE = _binaryop
op_BINARY_TRUE_DIVIDE = _binaryop
op_BINARY_FLOOR_DIVIDE = _binaryop
op_BINARY_MODULO = _binaryop
op_BINARY_POWER = _binaryop
op_BINARY_LSHIFT = _binaryop
op_BINARY_RSHIFT = _binaryop
op_BINARY_AND = _binaryop
op_BINARY_OR = _binaryop
op_BINARY_XOR = _binaryop
def op_SLICE_0(self, info, inst):
"""
TOS = TOS[:]
"""
tos = info.pop()
res = info.make_temp()
slicevar = info.make_temp()
indexvar = info.make_temp()
nonevar = info.make_temp()
info.append(inst, base=tos, res=res, slicevar=slicevar,
indexvar=indexvar, nonevar=nonevar)
info.push(res)
def op_SLICE_1(self, info, inst):
"""
TOS = TOS1[TOS:]
"""
tos = info.pop()
tos1 = info.pop()
res = info.make_temp()
slicevar = info.make_temp()
indexvar = info.make_temp()
nonevar = info.make_temp()
info.append(inst, base=tos1, start=tos, res=res, slicevar=slicevar,
indexvar=indexvar, nonevar=nonevar)
info.push(res)
def op_SLICE_2(self, info, inst):
"""
TOS = TOS1[:TOS]
"""
tos = info.pop()
tos1 = info.pop()
res = info.make_temp()
slicevar = info.make_temp()
indexvar = info.make_temp()
nonevar = info.make_temp()
info.append(inst, base=tos1, stop=tos, res=res, slicevar=slicevar,
indexvar=indexvar, nonevar=nonevar)
info.push(res)
def op_SLICE_3(self, info, inst):
"""
TOS = TOS2[TOS1:TOS]
"""
tos = info.pop()
tos1 = info.pop()
tos2 = info.pop()
res = info.make_temp()
slicevar = info.make_temp()
indexvar = info.make_temp()
info.append(inst, base=tos2, start=tos1, stop=tos, res=res,
slicevar=slicevar, indexvar=indexvar)
info.push(res)
def op_STORE_SLICE_0(self, info, inst):
"""
TOS[:] = TOS1
"""
tos = info.pop()
value = info.pop()
slicevar = info.make_temp()
indexvar = info.make_temp()
nonevar = info.make_temp()
info.append(inst, base=tos, value=value, slicevar=slicevar,
indexvar=indexvar, nonevar=nonevar)
def op_STORE_SLICE_1(self, info, inst):
"""
TOS1[TOS:] = TOS2
"""
tos = info.pop()
tos1 = info.pop()
value = info.pop()
slicevar = info.make_temp()
indexvar = info.make_temp()
nonevar = info.make_temp()
info.append(inst, base=tos1, start=tos, slicevar=slicevar,
value=value, indexvar=indexvar, nonevar=nonevar)
def op_STORE_SLICE_2(self, info, inst):
"""
TOS1[:TOS] = TOS2
"""
tos = info.pop()
tos1 = info.pop()
value = info.pop()
slicevar = info.make_temp()
indexvar = info.make_temp()
nonevar = info.make_temp()
info.append(inst, base=tos1, stop=tos, value=value, slicevar=slicevar,
indexvar=indexvar, nonevar=nonevar)
def op_STORE_SLICE_3(self, info, inst):
"""
TOS2[TOS1:TOS] = TOS3
"""
tos = info.pop()
tos1 = info.pop()
tos2 = info.pop()
value = info.pop()
slicevar = info.make_temp()
indexvar = info.make_temp()
info.append(inst, base=tos2, start=tos1, stop=tos, value=value,
slicevar=slicevar, indexvar=indexvar)
def op_BUILD_SLICE(self, info, inst):
"""
slice(TOS1, TOS) or slice(TOS2, TOS1, TOS)
"""
argc = inst.arg
if argc == 2:
tos = info.pop()
tos1 = info.pop()
start = tos1
stop = tos
step = None
elif argc == 3:
tos = info.pop()
tos1 = info.pop()
tos2 = info.pop()
start = tos2
stop = tos1
step = tos
else:
raise Exception("unreachable")
slicevar = info.make_temp()
res = info.make_temp()
info.append(inst, start=start, stop=stop, step=step, res=res,
slicevar=slicevar)
info.push(res)
def op_POP_JUMP_IF_TRUE(self, info, inst):
pred = info.pop()
info.append(inst, pred=pred)
info.terminator = inst
def op_POP_JUMP_IF_FALSE(self, info, inst):
pred = info.pop()
info.append(inst, pred=pred)
info.terminator = inst
def op_JUMP_IF_TRUE(self, info, inst):
pred = info.tos
info.append(inst, pred=pred)
info.terminator = inst
def op_JUMP_IF_FALSE(self, info, inst):
pred = info.tos
info.append(inst, pred=pred)
info.terminator = inst
op_JUMP_IF_FALSE_OR_POP = op_JUMP_IF_FALSE
op_JUMP_IF_TRUE_OR_POP = op_JUMP_IF_TRUE
def op_JUMP_ABSOLUTE(self, info, inst):
info.append(inst)
info.terminator = inst
def op_JUMP_FORWARD(self, info, inst):
info.append(inst)
info.terminator = inst
def op_BREAK_LOOP(self, info, inst):
info.append(inst)
info.terminator = inst
def op_RETURN_VALUE(self, info, inst):
info.append(inst, retval=info.pop())
info.terminator = inst
def op_SETUP_LOOP(self, info, inst):
self.syntax_blocks.append(LoopBlock())
info.append(inst)
def op_POP_BLOCK(self, info, inst):
block = self.syntax_blocks.pop()
if isinstance(block, LoopBlock):
info.append(inst, delitem=block.iterator)
else:
info.append(inst)
def _ignored(self, info, inst):
pass
class LoopBlock(object):
__slots__ = 'iterator'
def __init__(self):
self.iterator = None
class BlockInfo(object):
def __init__(self, offset):
self.offset = offset
self.stack = []
self.incomings = []
self.insts = []
self.tempct = 0
self._term = None
def dump(self):
print("offset", self.offset, "{")
print(" stack: ", end='')
pprint(self.stack)
print(" incomings: ", end='')
pprint(self.incomings)
pprint(self.insts)
print("}")
def make_temp(self):
self.tempct += 1
name = '$%d.%d' % (self.offset, self.tempct)
return name
def push(self, val):
self.stack.append(val)
def pop(self):
# TODO: lingering incoming values
if not self.stack:
assert not self.insts
ret = self.make_temp()
self.incomings.append(ret)
else:
ret = self.stack.pop()
return ret
@property
def tos(self):
r = self.pop()
self.push(r)
return r
def append(self, inst, **kws):
self.insts.append((inst.offset, kws))
@property
def terminator(self):
assert self._term is None
return self._term
@terminator.setter
def terminator(self, inst):
self._term = inst
########NEW FILE########
__FILENAME__ = decorators
"""
Contains function decorators and target_registry
"""
from __future__ import print_function, division, absolute_import
import warnings
from numba import sigutils
from numba.targets import registry
# -----------------------------------------------------------------------------
# Decorators
def autojit(*args, **kws):
"""Deprecated.
Use jit instead. Calls to jit internally.
"""
warnings.warn("autojit is deprecated, use jit instead which now performs "
"the same functionality", DeprecationWarning)
return jit(*args, **kws)
def jit(signature_or_function=None, argtypes=None, restype=None, locals={},
target='cpu', **targetoptions):
"""jit([signature_or_function, [locals={}, [target='cpu',
[**targetoptions]]]])
The function can be used as the following versions:
1) jit(signature, [target='cpu', [**targetoptions]]) -> jit(function)
Equivalent to:
d = dispatcher(function, targetoptions)
d.compile(signature)
Create a dispatcher object for a python function and default
target-options. Then, compile the funciton with the given signature.
Example:
@jit("void(int32, float32)")
def foo(x, y):
return x + y
2) jit(function) -> dispatcher
Same as old autojit. Create a dispatcher function object that
specialize at call site.
Example:
@jit
def foo(x, y):
return x + y
3) jit([target='cpu', [**targetoptions]]) -> configured_jit(function)
Same as old autojit and 2). But configure with target and default
target-options.
Example:
@jit(target='cpu', nopython=True)
def foo(x, y):
return x + y
Target Options
---------------
The CPU (default target) defines the following:
- nopython: [bool]
Set to True to disable the use of PyObjects and Python API
calls. The default behavior is to allow the use of PyObjects and
Python API. Default value is False.
- forceobj: [bool]
Set to True to force the use of PyObjects for every value. Default
value is False.
"""
# Handle deprecated argtypes and restype keyword arguments
if argtypes is not None:
assert signature_or_function is None, "argtypes used but " \
"signature is provided"
warnings.warn("Keyword argument 'argtypes' is deprecated",
DeprecationWarning)
if restype is None:
signature_or_function = tuple(argtypes)
else:
signature_or_function = restype(*argtypes)
# Handle signature
if signature_or_function is None:
# Used as autojit
def configured_jit(arg):
return jit(arg, locals=locals, target=target, **targetoptions)
return configured_jit
elif sigutils.is_signature(signature_or_function):
# Function signature is provided
sig = signature_or_function
return _jit(sig, locals=locals, target=target,
targetoptions=targetoptions)
else:
# No signature is provided
pyfunc = signature_or_function
dispatcher = registry.target_registry[target]
dispatcher = dispatcher(py_func=pyfunc, locals=locals,
targetoptions=targetoptions)
# NOTE This affects import time for large function
# # Compile a pure object mode
# if target == 'cpu' and not targetoptions.get('nopython', False):
# dispatcher.compile((), locals=locals, forceobj=True)
return dispatcher
def _jit(sig, locals, target, targetoptions):
dispatcher = registry.target_registry[target]
def wrapper(func):
disp = dispatcher(py_func=func, locals=locals,
targetoptions=targetoptions)
disp.compile(sig)
disp.disable_compile()
return disp
return wrapper
def njit(*args, **kws):
"""Equavilent to jit(nopython=True)
"""
if 'nopython' in kws:
warnings.warn('nopython is set for njit and is ignored', RuntimeWarning)
if 'forceobj' in kws:
warnings.warn('forceobj is set for njit and is ignored', RuntimeWarning)
kws.update({'nopython': True})
return jit(*args, **kws)
########NEW FILE########
__FILENAME__ = dispatcher
from __future__ import print_function, division, absolute_import
import inspect
import contextlib
import numpy
from numba.config import PYVERSION
from numba import _dispatcher, compiler, utils
from numba.typeconv.rules import default_type_manager
from numba.typing.templates import resolve_overload
from numba import types, sigutils
from numba import numpy_support
from numba.bytecode import get_code_object
class Overloaded(_dispatcher.Dispatcher):
"""
Abstract class. Subclass should define targetdescr class attribute.
"""
__numba__ = "py_func"
def __init__(self, py_func, locals={}, targetoptions={}):
self.tm = default_type_manager
argspec = inspect.getargspec(py_func)
argct = len(argspec.args)
super(Overloaded, self).__init__(self.tm.get_pointer(), argct)
self.py_func = py_func
self.func_code = get_code_object(py_func)
self.overloads = {}
self.fallback = None
self.targetoptions = targetoptions
self.locals = locals
self.doc = py_func.__doc__
self._compiling = False
self.targetdescr.typing_context.insert_overloaded(self)
@property
def signatures(self):
"""
Returns a list of compiled function signatures.
"""
return list(self.overloads.keys())
def disable_compile(self, val=True):
"""Disable the compilation of new signatures at call time.
"""
self._disable_compile(int(val))
def add_overload(self, cres):
sig = [a._code for a in cres.signature.args]
self._insert(sig, cres.entry_point_addr, cres.objectmode)
if cres.objectmode:
self.fallback = cres.entry_point
self.overloads[cres.signature] = cres
# Add native function for correct typing the code generation
typing = cres.typing_context
target = cres.target_context
cfunc = cres.entry_point
if cfunc in target.native_funcs:
target.dynamic_map_function(cfunc)
calltemplate = target.get_user_function(cfunc)
typing.insert_user_function(cfunc, calltemplate)
def get_overload(self, sig):
args, return_type = sigutils.normalize_signature(sig)
return self.overloads[tuple(args)].entry_point
@contextlib.contextmanager
def _compile_lock(self):
if self._compiling:
raise RuntimeError("Compiler re-entrant")
self._compiling = True
yield
self._compiling = False
@property
def is_compiling(self):
return self._compiling
def compile(self, sig, locals={}, **targetoptions):
with self._compile_lock():
locs = self.locals.copy()
locs.update(locals)
topt = self.targetoptions.copy()
topt.update(targetoptions)
flags = compiler.Flags()
self.targetdescr.options.parse_as_flags(flags, topt)
glctx = self.targetdescr
typingctx = glctx.typing_context
targetctx = glctx.target_context
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exist.
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
cres = compiler.compile_extra(typingctx, targetctx, self.py_func,
args=args, return_type=return_type,
flags=flags, locals=locs)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
def jit(self, sig, **kws):
"""Alias of compile(sig, **kws)
"""
return self.compile(sig, **kws)
def _compile_and_call(self, *args, **kws):
assert not kws
sig = tuple([typeof_pyval(a) for a in args])
self.jit(sig)
return self(*args, **kws)
def inspect_types(self):
for ver, res in utils.dict_iteritems(self.overloads):
print("%s %s" % (self.py_func.__name__, ver))
print('-' * 80)
print(res.type_annotation)
print('=' * 80)
def _explain_ambiguous(self, *args, **kws):
assert not kws, "kwargs not handled"
args = tuple([typeof_pyval(a) for a in args])
resolve_overload(self.targetdescr.typing_context, self.py_func,
tuple(self.overloads.keys()), args, kws)
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self.py_func)
INT_TYPES = (int,)
if PYVERSION < (3, 0):
INT_TYPES += (long,)
def typeof_pyval(val):
"""
This is called from numba._dispatcher as a fallback if the native code
cannot decide the type.
"""
if isinstance(val, numpy.ndarray):
# TODO complete dtype mapping
dtype = numpy_support.from_dtype(val.dtype)
ndim = val.ndim
if ndim == 0:
# is array scalar
return numpy_support.from_dtype(val.dtype)
layout = numpy_support.map_layout(val)
aryty = types.Array(dtype, ndim, layout)
return aryty
# The following are handled in the C version for exact type match
# So test these later
elif isinstance(val, INT_TYPES):
return types.int64
elif isinstance(val, float):
return types.float64
elif isinstance(val, complex):
return types.complex128
elif numpy_support.is_arrayscalar(val):
# Array scalar
return numpy_support.from_dtype(numpy.dtype(type(val)))
# Other object
else:
return types.pyobject
class LiftedLoop(Overloaded):
def __init__(self, bytecode, typingctx, targetctx, locals, flags):
self.tm = default_type_manager
argspec = bytecode.argspec
argct = len(argspec.args)
_dispatcher.Dispatcher.__init__(self, self.tm.get_pointer(), argct)
self.bytecode = bytecode
self.typingctx = typingctx
self.targetctx = targetctx
self.locals = locals
self.flags = flags
self.py_func = bytecode.func
self.overloads = {}
self.fallback = None
self.doc = self.py_func.__doc__
self._compiling = False
def compile(self, sig):
with self._compile_lock():
# FIXME this is mostly duplicated from Overloaded
flags = self.flags
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exist.
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
assert not flags.enable_looplift, "Enable looplift flags is on"
cres = compiler.compile_bytecode(typingctx=self.typingctx,
targetctx=self.targetctx,
bc=self.bytecode,
args=args,
return_type=return_type,
flags=flags,
locals=self.locals)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
# Initialize dispatcher
_dispatcher.init_types(dict((str(t), t._code) for t in types.number_domain))
########NEW FILE########
__FILENAME__ = dummyarray
from __future__ import print_function, division
import itertools
import functools
import operator
import numpy
from collections import namedtuple
Extent = namedtuple("Extent", ["begin", "end"])
class Dim(object):
"""A single dimension of the array
Attributes
----------
start:
start offset
stop:
stop offset
size:
number of items
stride:
item stride
"""
__slots__ = 'start', 'stop', 'size', 'stride'
def __init__(self, start, stop, size, stride):
if stop < start:
raise ValueError("end offset is before start offset")
self.start = start
self.stop = stop
self.size = size
self.stride = stride
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = item.start, item.stop, item.step
else:
start = item
stop = start + 1
step = None
if start is None:
start = 0
if stop is None:
stop = self.size
if step is None:
step = 1
stride = step * self.stride
if start >= 0:
start = self.start + start * self.stride
else:
start = self.stop + start * self.stride
if stop >= 0:
stop = self.start + stop * self.stride
else:
stop = self.stop + stop * self.stride
size = (stop - start) // stride
if self.start >= start >= self.stop:
raise IndexError("start index out-of-bound")
if self.start >= stop >= self.stop:
raise IndexError("stop index out-of-bound")
if stop < start:
start = stop
size = 0
return Dim(start, stop, size, stride)
def get_offset(self, idx):
return self.start + idx * self.stride
def __repr__(self):
strfmt = "Dim(start=%s, stop=%s, size=%s, stride=%s)"
return strfmt % (self.start, self.stop, self.size, self.stride)
def normalize(self, base):
return Dim(start=self.start - base, stop=self.stop - base,
size=self.size, stride=self.stride)
def copy(self, start=None, stop=None, size=None, stride=None):
if start is None:
start = self.start
if stop is None:
stop = self.stop
if size is None:
size = self.size
if stride is None:
stride = self.stride
return Dim(start, stop, size, stride)
def is_contiguous(self, itemsize):
return self.stride == itemsize
def compute_index(indices, dims):
return sum(d.get_offset(i) for i, d in zip(indices, dims))
class Array(object):
"""A dummy numpy array-like object. Consider it an array without the
actual data, but offset from the base data pointer.
Attributes
----------
dims: tuple of Dim
describing each dimension of the array
ndim: int
number of dimension
shape: tuple of int
size of each dimension
strides: tuple of int
stride of each dimension
itemsize: int
itemsize
extent: (start, end)
start and end offset containing the memory region
"""
@classmethod
def from_desc(cls, offset, shape, strides, itemsize):
dims = []
for ashape, astride in zip(shape, strides):
dim = Dim(offset, offset + ashape * astride, ashape, astride)
dims.append(dim)
return cls(dims, itemsize)
def __init__(self, dims, itemsize):
self.dims = tuple(dims)
self.ndim = len(self.dims)
self.shape = tuple(dim.size for dim in self.dims)
self.strides = tuple(dim.stride for dim in self.dims)
self.itemsize = itemsize
self.size = numpy.prod(self.shape)
self.extent = self._compute_extent()
self.flags = self._compute_layout()
def _compute_layout(self):
leftmost = self.dims[0].is_contiguous(self.itemsize)
rightmost = self.dims[-1].is_contiguous(self.itemsize)
flags = {}
def is_contig(traverse):
last = next(traverse)
for dim in traverse:
if last.size != 0 and last.size * last.stride != dim.stride:
return False
last = dim
return True
flags['F_CONTIGUOUS'] = leftmost and is_contig(iter(self.dims))
flags['C_CONTIGUOUS'] = rightmost and is_contig(reversed(self.dims))
return flags
def _compute_extent(self):
firstidx = [0] * self.ndim
lastidx = [s - 1 for s in self.shape]
start = compute_index(firstidx, self.dims)
stop = compute_index(lastidx, self.dims) + self.itemsize
return Extent(start, stop)
def __repr__(self):
return '<Array dims=%s itemsize=%s>' % (self.dims, self.itemsize)
def __getitem__(self, item):
if not isinstance(item, tuple):
item = [item]
else:
item = list(item)
nitem = len(item)
ndim = len(self.dims)
if nitem > ndim:
raise IndexError("%d extra indices given" % (nitem - ndim,))
# Add empty slices for missing indices
while len(item) < ndim:
item.append(slice(None, None))
dims = [dim.__getitem__(it) for dim, it in zip(self.dims, item)]
return Array(dims, self.itemsize)
@property
def is_c_contig(self):
return self.flags['C_CONTIGUOUS']
@property
def is_f_contig(self):
return self.flags['F_CONTIGUOUS']
def iter_contiguous_extent(self):
""" Generates extents
"""
if self.is_c_contig or self.is_f_contig:
yield self.extent
else:
if self.dims[0].stride < self.dims[-1].stride:
innerdim = self.dims[0]
outerdims = self.dims[1:]
outershape = self.shape[1:]
else:
innerdim = self.dims[-1]
outerdims = self.dims[:-1]
outershape = self.shape[:-1]
if innerdim.is_contiguous(self.itemsize):
oslen = [range(s) for s in outershape]
for indices in itertools.product(*oslen):
base = compute_index(indices, outerdims)
yield base + innerdim.start, base + innerdim.stop
else:
oslen = [range(s) for s in self.shape]
for indices in itertools.product(*oslen):
offset = compute_index(indices, self.dims)
yield offset, offset + self.itemsize
def reshape(self, *newshape, **kws):
order = kws.pop('order', 'C')
if kws:
raise TypeError('unknown keyword arguments %s' % kws.keys())
if order not in 'CFA':
raise ValueError('order not C|F|A')
newsize = functools.reduce(operator.mul, newshape, 1)
if order == 'A':
order = 'F' if self.is_f_contig else 'C'
if newsize != self.size:
raise ValueError("reshape changes the size of the array")
elif self.is_c_contig or self.is_f_contig:
if order == 'C':
newstrides = list(iter_strides_c_contig(self, newshape))
elif order == 'F':
newstrides = list(iter_strides_f_contig(self, newshape))
else:
raise AssertionError("unreachable")
ret = self.from_desc(self.extent.begin, shape=newshape,
strides=newstrides, itemsize=self.itemsize)
return ret, list(self.iter_contiguous_extent())
else:
raise NotImplementedError("reshape on non-contiguous array")
def ravel(self, order='C'):
if order not in 'CFA':
raise ValueError('order not C|F|A')
if self.ndim <= 1:
return self
elif (order == 'C' and self.is_c_contig or
order == 'F' and self.is_f_contig):
newshape = (self.size,)
newstrides = (self.itemsize,)
arr = self.from_desc(self.extent.begin, newshape, newstrides,
self.itemsize)
return arr, list(self.iter_contiguous_extent())
else:
raise NotImplementedError("ravel on non-contiguous array")
def iter_strides_f_contig(arr, shape=None):
"""yields the f-contigous strides
"""
assert arr.is_f_contig
shape = arr.shape if shape is None else shape
itemsize = arr.itemsize
yield itemsize
sum = 1
for s in shape[:-1]:
sum *= s
yield sum * itemsize
def iter_strides_c_contig(arr, shape=None):
"""yields the c-contigous strides
"""
assert arr.is_c_contig
shape = arr.shape if shape is None else shape
itemsize = arr.itemsize
def gen():
yield itemsize
sum = 1
for s in reversed(shape[1:]):
sum *= s
yield sum * itemsize
for i in reversed(list(gen())):
yield i
def is_element_indexing(item, ndim):
if isinstance(item, slice):
return False
elif isinstance(item, tuple):
if len(item) == ndim:
if not any(isinstance(it, slice) for it in item):
return True
else:
return True
return False
########NEW FILE########
__FILENAME__ = findlib
from __future__ import print_function, absolute_import
import sys
import os
import re
def get_lib_dir():
"""
Anaconda specific
"""
dirname = 'DLLs' if sys.platform == 'win32' else 'lib'
libdir = os.path.join(sys.prefix, dirname)
return libdir
DLLNAMEMAP = {
'linux2': r'lib%(name)s\.so\.%(ver)s$',
'darwin': r'lib%(name)s\.%(ver)s\.dylib$',
'win32': r'%(name)s%(ver)s\.dll$',
}
RE_VER = r'[0-9]*([_\.][0-9]+)*'
def find_lib(libname, libdir=None, platform=None):
platform = platform or sys.platform
pat = DLLNAMEMAP[platform] % {"name": libname, "ver": RE_VER}
regex = re.compile(pat)
return find_file(regex, libdir)
def find_file(pat, libdir=None):
libdir = libdir or get_lib_dir()
entries = os.listdir(libdir)
candidates = [os.path.join(libdir, ent)
for ent in entries if pat.match(ent)]
return [c for c in candidates if os.path.isfile(c)]
########NEW FILE########
__FILENAME__ = interpreter
from __future__ import print_function, division, absolute_import
try:
import __builtin__ as builtins
except ImportError:
import builtins
import sys
import dis
from numba import ir, controlflow, dataflow, utils
class Interpreter(object):
"""A bytecode interpreter that builds up the IR.
"""
def __init__(self, bytecode):
self.bytecode = bytecode
self.scopes = []
self.loc = ir.Loc(filename=bytecode.filename, line=1)
self.argspec = bytecode.argspec
# Control flow analysis
self.cfa = controlflow.ControlFlowAnalysis(bytecode)
self.cfa.run()
# Data flow analysis
self.dfa = dataflow.DataFlowAnalysis(self.cfa)
self.dfa.run()
global_scope = ir.Scope(parent=None, loc=self.loc)
self._fill_global_scope(global_scope)
self.scopes.append(global_scope)
# { inst offset : ir.Block }
self.blocks = {}
self.syntax_info = []
# Temp states during interpretation
self.current_block = None
self.current_block_offset = None
self.syntax_blocks = []
self.dfainfo = None
self._block_actions = {}
def _fill_global_scope(self, scope):
"""TODO
"""
pass
def _fill_args_into_scope(self, scope):
for arg in self.argspec.args:
scope.define(name=arg, loc=self.loc)
def interpret(self):
firstblk = min(self.cfa.blocks.keys())
self.loc = ir.Loc(filename=self.bytecode.filename,
line=self.bytecode[firstblk].lineno)
self.scopes.append(ir.Scope(parent=self.current_scope, loc=self.loc))
self._fill_args_into_scope(self.current_scope)
# Interpret loop
for inst, kws in self._iter_inst():
self._dispatch(inst, kws)
# Clean up
self._remove_invalid_syntax_blocks()
def _remove_invalid_syntax_blocks(self):
self.syntax_info = [syn for syn in self.syntax_info if syn.valid()]
def verify(self):
for b in utils.dict_itervalues(self.blocks):
b.verify()
def _iter_inst(self):
for block in self.cfa.iterliveblocks():
firstinst = self.bytecode[block.body[0]]
self._start_new_block(firstinst)
for offset, kws in self.dfainfo.insts:
inst = self.bytecode[offset]
self.loc = ir.Loc(filename=self.bytecode.filename,
line=inst.lineno)
yield inst, kws
def _start_new_block(self, inst):
self.loc = ir.Loc(filename=self.bytecode.filename, line=inst.lineno)
oldblock = self.current_block
self.insert_block(inst.offset)
# Ensure the last block is terminated
if oldblock is not None and not oldblock.is_terminated:
jmp = ir.Jump(inst.offset, loc=self.loc)
oldblock.append(jmp)
# Get DFA block info
self.dfainfo = self.dfa.infos[self.current_block_offset]
# Insert PHI
self._insert_phi()
# Notify listeners for the new block
for fn in utils.dict_itervalues(self._block_actions):
fn(self.current_block_offset, self.current_block)
def _insert_phi(self):
if self.dfainfo.incomings:
assert len(self.dfainfo.incomings) == 1
incomings = self.cfa.blocks[self.current_block_offset].incoming
phivar = self.dfainfo.incomings[0]
if len(incomings) == 1:
ib = utils.iter_next(iter(incomings))
lingering = self.dfa.infos[ib].stack
assert len(lingering) == 1
iv = lingering[0]
self.store(self.get(iv), phivar)
else:
# Invert the PHI node
for ib in incomings:
lingering = self.dfa.infos[ib].stack
assert len(lingering) == 1
iv = lingering[0]
# Add assignment in incoming block to forward the value
target = self.current_scope.get_or_define('$phi' + phivar,
loc=self.loc)
stmt = ir.Assign(value=self.get(iv), target=target,
loc=self.loc)
self.blocks[ib].insert_before_terminator(stmt)
self.store(target, phivar)
def get_global_value(self, name):
"""
Get a global value from the func_global (first) or
as a builtins (second). If both failed, return a ir.UNDEFINED.
"""
try:
return utils.func_globals(self.bytecode.func)[name]
except KeyError:
return getattr(builtins, name, ir.UNDEFINED)
@property
def current_scope(self):
return self.scopes[-1]
@property
def code_consts(self):
return self.bytecode.co_consts
@property
def code_locals(self):
return self.bytecode.co_varnames
@property
def code_names(self):
return self.bytecode.co_names
def _dispatch(self, inst, kws):
assert self.current_block is not None
fname = "op_%s" % inst.opname.replace('+', '_')
try:
fn = getattr(self, fname)
except AttributeError:
raise NotImplementedError(inst)
else:
return fn(inst, **kws)
def dump(self, file=None):
file = file or sys.stdout
for offset, block in sorted(self.blocks.items()):
print('label %d:' % offset, file=file)
block.dump(file=file)
# --- Scope operations ---
def store(self, value, name):
if self.current_block_offset in self.cfa.backbone:
target = self.current_scope.redefine(name, loc=self.loc)
else:
target = self.current_scope.get_or_define(name, loc=self.loc)
stmt = ir.Assign(value=value, target=target, loc=self.loc)
self.current_block.append(stmt)
# def store_temp(self, value):
# target = self.current_scope.make_temp(loc=self.loc)
# stmt = ir.Assign(value=value, target=target, loc=self.loc)
# self.current_block.append(stmt)
# return target
def get(self, name):
return self.current_scope.get(name)
# --- Block operations ---
def insert_block(self, offset, scope=None, loc=None):
scope = scope or self.current_scope
loc = loc or self.loc
blk = ir.Block(scope=scope, loc=loc)
self.blocks[offset] = blk
self.current_block = blk
self.current_block_offset = offset
return blk
def block_constains_opname(self, offset, opname):
for offset in self.cfa.blocks[offset]:
inst = self.bytecode[offset]
if inst.opname == opname:
return True
return False
# --- Bytecode handlers ---
def op_PRINT_ITEM(self, inst, item, printvar, res):
item = self.get(item)
printgv = ir.Global("print", print, loc=self.loc)
self.store(value=printgv, name=printvar)
call = ir.Expr.call(self.get(printvar), (item,), (), loc=self.loc)
self.store(value=call, name=res)
def op_PRINT_NEWLINE(self, inst, printvar, res):
printgv = ir.Global("print", print, loc=self.loc)
self.store(value=printgv, name=printvar)
call = ir.Expr.call(self.get(printvar), (), (), loc=self.loc)
self.store(value=call, name=res)
def op_UNPACK_SEQUENCE(self, inst, sequence, stores, iterobj):
sequence = self.get(sequence)
getiter = ir.Expr.getiter(value=sequence, loc=self.loc)
self.store(value=getiter, name=iterobj)
for st in stores:
iternext = ir.Expr.iternextsafe(value=self.get(iterobj),
loc=self.loc)
self.store(value=iternext, name=st)
def op_BUILD_SLICE(self, inst, start, stop, step, res, slicevar):
start = self.get(start)
stop = self.get(stop)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
if step is None:
sliceinst = ir.Expr.call(self.get(slicevar), (start, stop), (),
loc=self.loc)
else:
step = self.get(step)
sliceinst = ir.Expr.call(self.get(slicevar), (start, stop, step),
(), loc=self.loc)
self.store(value=sliceinst, name=res)
def op_SLICE_0(self, inst, base, res, slicevar, indexvar, nonevar):
base = self.get(base)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
index = ir.Expr.call(self.get(slicevar), (none, none), (), loc=self.loc)
self.store(value=index, name=indexvar)
expr = ir.Expr.getitem(base, self.get(indexvar), loc=self.loc)
self.store(value=expr, name=res)
def op_SLICE_1(self, inst, base, start, nonevar, res, slicevar, indexvar):
base = self.get(base)
start = self.get(start)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, none), (),
loc=self.loc)
self.store(value=index, name=indexvar)
expr = ir.Expr.getitem(base, self.get(indexvar), loc=self.loc)
self.store(value=expr, name=res)
def op_SLICE_2(self, inst, base, nonevar, stop, res, slicevar, indexvar):
base = self.get(base)
stop = self.get(stop)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (none, stop,), (),
loc=self.loc)
self.store(value=index, name=indexvar)
expr = ir.Expr.getitem(base, self.get(indexvar), loc=self.loc)
self.store(value=expr, name=res)
def op_SLICE_3(self, inst, base, start, stop, res, slicevar, indexvar):
base = self.get(base)
start = self.get(start)
stop = self.get(stop)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, stop), (),
loc=self.loc)
self.store(value=index, name=indexvar)
expr = ir.Expr.getitem(base, self.get(indexvar), loc=self.loc)
self.store(value=expr, name=res)
def op_STORE_SLICE_0(self, inst, base, value, slicevar, indexvar, nonevar):
base = self.get(base)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
index = ir.Expr.call(self.get(slicevar), (none, none), (), loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.SetItem(base, self.get(indexvar), self.get(value),
loc=self.loc)
self.current_block.append(stmt)
def op_STORE_SLICE_1(self, inst, base, start, nonevar, value, slicevar,
indexvar):
base = self.get(base)
start = self.get(start)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, none), (),
loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.SetItem(base, self.get(indexvar), self.get(value),
loc=self.loc)
self.current_block.append(stmt)
def op_STORE_SLICE_2(self, inst, base, nonevar, stop, value, slicevar,
indexvar):
base = self.get(base)
stop = self.get(stop)
nonegv = ir.Const(None, loc=self.loc)
self.store(value=nonegv, name=nonevar)
none = self.get(nonevar)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (none, stop,), (),
loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.SetItem(base, self.get(indexvar), self.get(value),
loc=self.loc)
self.current_block.append(stmt)
def op_STORE_SLICE_3(self, inst, base, start, stop, value, slicevar,
indexvar):
base = self.get(base)
start = self.get(start)
stop = self.get(stop)
slicegv = ir.Global("slice", slice, loc=self.loc)
self.store(value=slicegv, name=slicevar)
index = ir.Expr.call(self.get(slicevar), (start, stop), (),
loc=self.loc)
self.store(value=index, name=indexvar)
stmt = ir.SetItem(base, self.get(indexvar), self.get(value),
loc=self.loc)
self.current_block.append(stmt)
def op_STORE_FAST(self, inst, value):
dstname = self.code_locals[inst.arg]
value = self.get(value)
self.store(value=value, name=dstname)
def op_LOAD_ATTR(self, inst, item, res):
item = self.get(item)
attr = self.code_names[inst.arg]
getattr = ir.Expr.getattr(item, attr, loc=self.loc)
self.store(getattr, res)
def op_LOAD_CONST(self, inst, res):
value = self.code_consts[inst.arg]
const = ir.Const(value, loc=self.loc)
self.store(const, res)
def op_LOAD_GLOBAL(self, inst, res):
name = self.code_names[inst.arg]
value = self.get_global_value(name)
gl = ir.Global(name, value, loc=self.loc)
self.store(gl, res)
def op_SETUP_LOOP(self, inst):
assert self.blocks[inst.offset] is self.current_block
loop = ir.Loop(inst.offset, exit=(inst.next + inst.arg))
self.syntax_blocks.append(loop)
self.syntax_info.append(loop)
def op_CALL_FUNCTION(self, inst, func, args, kws, res):
func = self.get(func)
args = [self.get(x) for x in args]
# Process keywords
keyvalues = []
removethese = []
for k, v in kws:
k, v = self.get(k), self.get(v)
for inst in self.current_block.body:
if isinstance(inst, ir.Assign) and inst.target is k:
removethese.append(inst)
keyvalues.append((inst.value.value, v))
# Remove keyword constant statements
for inst in removethese:
self.current_block.remove(inst)
expr = ir.Expr.call(func, args, keyvalues, loc=self.loc)
self.store(expr, res)
def op_GET_ITER(self, inst, value, res):
expr = ir.Expr.getiter(value=self.get(value), loc=self.loc)
self.store(expr, res)
def op_FOR_ITER(self, inst, iterator, indval, pred):
"""
Assign new block other this instruction.
"""
assert inst.offset in self.blocks, "FOR_ITER must be block head"
# Mark this block as the loop condition
loop = self.syntax_blocks[-1]
loop.condition = self.current_block_offset
# Emit code
val = self.get(iterator)
iternext = ir.Expr.iternext(value=val, loc=self.loc)
self.store(iternext, indval)
itervalid = ir.Expr.itervalid(value=val, loc=self.loc)
self.store(itervalid, pred)
# Conditional jump
br = ir.Branch(cond=self.get(pred), truebr=inst.next,
falsebr=inst.get_jump_target(),
loc=self.loc)
self.current_block.append(br)
# Add event listener to mark the following blocks as loop body
def mark_as_body(offset, block):
loop.body.append(offset)
self._block_actions[loop] = mark_as_body
def op_BINARY_SUBSCR(self, inst, target, index, res):
index = self.get(index)
target = self.get(target)
expr = ir.Expr.getitem(target=target, index=index, loc=self.loc)
self.store(expr, res)
def op_STORE_SUBSCR(self, inst, target, index, value):
index = self.get(index)
target = self.get(target)
value = self.get(value)
stmt = ir.SetItem(target=target, index=index, value=value,
loc=self.loc)
self.current_block.append(stmt)
def op_BUILD_TUPLE(self, inst, items, res):
expr = ir.Expr.build_tuple(items=[self.get(x) for x in items],
loc=self.loc)
self.store(expr, res)
def op_BUILD_LIST(self, inst, items, res):
expr = ir.Expr.build_list(items=[self.get(x) for x in items],
loc=self.loc)
self.store(expr, res)
def op_UNARY_NEGATIVE(self, inst, value, res):
value = self.get(value)
expr = ir.Expr.unary('-', value=value, loc=self.loc)
return self.store(expr, res)
def op_UNARY_INVERT(self, inst, value, res):
value = self.get(value)
expr = ir.Expr.unary('~', value=value, loc=self.loc)
return self.store(expr, res)
def op_UNARY_NOT(self, inst, value, res):
value = self.get(value)
expr = ir.Expr.unary('not', value=value, loc=self.loc)
return self.store(expr, res)
def _binop(self, op, lhs, rhs, res):
lhs = self.get(lhs)
rhs = self.get(rhs)
expr = ir.Expr.binop(op, lhs=lhs, rhs=rhs, loc=self.loc)
self.store(expr, res)
def op_BINARY_ADD(self, inst, lhs, rhs, res):
self._binop('+', lhs, rhs, res)
def op_BINARY_SUBTRACT(self, inst, lhs, rhs, res):
self._binop('-', lhs, rhs, res)
def op_BINARY_MULTIPLY(self, inst, lhs, rhs, res):
self._binop('*', lhs, rhs, res)
def op_BINARY_DIVIDE(self, inst, lhs, rhs, res):
self._binop('/?', lhs, rhs, res)
def op_BINARY_TRUE_DIVIDE(self, inst, lhs, rhs, res):
self._binop('/', lhs, rhs, res)
def op_BINARY_FLOOR_DIVIDE(self, inst, lhs, rhs, res):
self._binop('//', lhs, rhs, res)
def op_BINARY_MODULO(self, inst, lhs, rhs, res):
self._binop('%', lhs, rhs, res)
def op_BINARY_POWER(self, inst, lhs, rhs, res):
self._binop('**', lhs, rhs, res)
def op_BINARY_LSHIFT(self, inst, lhs, rhs, res):
self._binop('<<', lhs, rhs, res)
def op_BINARY_RSHIFT(self, inst, lhs, rhs, res):
self._binop('>>', lhs, rhs, res)
def op_BINARY_AND(self, inst, lhs, rhs, res):
self._binop('&', lhs, rhs, res)
def op_BINARY_OR(self, inst, lhs, rhs, res):
self._binop('|', lhs, rhs, res)
def op_BINARY_XOR(self, inst, lhs, rhs, res):
self._binop('^', lhs, rhs, res)
_inplace_binop = _binop
def op_INPLACE_ADD(self, inst, lhs, rhs, res):
self._inplace_binop('+', lhs, rhs, res)
def op_INPLACE_SUBTRACT(self, inst, lhs, rhs, res):
self._inplace_binop('-', lhs, rhs, res)
def op_INPLACE_MULTIPLY(self, inst, lhs, rhs, res):
self._inplace_binop('*', lhs, rhs, res)
def op_INPLACE_DIVIDE(self, inst, lhs, rhs, res):
self._inplace_binop('/?', lhs, rhs, res)
def op_INPLACE_TRUE_DIVIDE(self, inst, lhs, rhs, res):
self._inplace_binop('/', lhs, rhs, res)
def op_INPLACE_FLOOR_DIVIDE(self, inst, lhs, rhs, res):
self._inplace_binop('//', lhs, rhs, res)
def op_INPLACE_MODULO(self, inst, lhs, rhs, res):
self._inplace_binop('%', lhs, rhs, res)
def op_INPLACE_POWER(self, inst, lhs, rhs, res):
self._inplace_binop('**', lhs, rhs, res)
def op_INPLACE_LSHIFT(self, inst, lhs, rhs, res):
self._inplace_binop('<<', lhs, rhs, res)
def op_INPLACE_RSHIFT(self, inst, lhs, rhs, res):
self._inplace_binop('>>', lhs, rhs, res)
def op_INPLACE_AND(self, inst, lhs, rhs, res):
self._inplace_binop('&', lhs, rhs, res)
def op_INPLACE_OR(self, inst, lhs, rhs, res):
self._inplace_binop('|', lhs, rhs, res)
def op_INPLACE_XOR(self, inst, lhs, rhs, res):
self._inplace_binop('^', lhs, rhs, res)
def op_JUMP_ABSOLUTE(self, inst):
jmp = ir.Jump(inst.get_jump_target(), loc=self.loc)
self.current_block.append(jmp)
def op_JUMP_FORWARD(self, inst):
jmp = ir.Jump(inst.get_jump_target(), loc=self.loc)
self.current_block.append(jmp)
def op_POP_BLOCK(self, inst, delitem=None):
blk = self.syntax_blocks.pop()
if delitem is not None:
delete = ir.Del(delitem, loc=self.loc)
self.current_block.append(delete)
if blk in self._block_actions:
del self._block_actions[blk]
def op_RETURN_VALUE(self, inst, retval):
ret = ir.Return(self.get(retval), loc=self.loc)
self.current_block.append(ret)
def op_COMPARE_OP(self, inst, lhs, rhs, res):
op = dis.cmp_op[inst.arg]
self._binop(op, lhs, rhs, res)
def op_BREAK_LOOP(self, inst):
loop = self.syntax_blocks[-1]
assert isinstance(loop, ir.Loop)
jmp = ir.Jump(target=loop.exit, loc=self.loc)
self.current_block.append(jmp)
def _op_JUMP_IF(self, inst, pred, iftrue):
brs = {
True: inst.get_jump_target(),
False: inst.next,
}
truebr = brs[iftrue]
falsebr = brs[not iftrue]
bra = ir.Branch(cond=self.get(pred), truebr=truebr, falsebr=falsebr,
loc=self.loc)
self.current_block.append(bra)
# In a while loop?
self._determine_while_condition((truebr, falsebr))
def op_JUMP_IF_FALSE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=False)
def op_JUMP_IF_TRUE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=True)
def op_POP_JUMP_IF_FALSE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=False)
def op_POP_JUMP_IF_TRUE(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=True)
def op_JUMP_IF_FALSE_OR_POP(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=False)
def op_JUMP_IF_TRUE_OR_POP(self, inst, pred):
self._op_JUMP_IF(inst, pred=pred, iftrue=True)
def _determine_while_condition(self, branches):
assert branches
# There is a active syntax block
if not self.syntax_blocks:
return
# TOS is a Loop instance
loop = self.syntax_blocks[-1]
if not isinstance(loop, ir.Loop):
return
# Its condition is not defined
if loop.condition is not None:
return
# One of the branches goes to a POP_BLOCK
for br in branches:
if self.block_constains_opname(br, 'POP_BLOCK'):
break
else:
return
# Which is the exit of the loop
if br not in self.cfa.blocks[loop.exit].incoming:
return
# Therefore, current block is a while loop condition
loop.condition = self.current_block_offset
# Add event listener to mark the following blocks as loop body
def mark_as_body(offset, block):
loop.body.append(offset)
self._block_actions[loop] = mark_as_body
########NEW FILE########
__FILENAME__ = io_support
try:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
except ImportError:
from io import StringIO
########NEW FILE########
__FILENAME__ = ir
from __future__ import print_function, division, absolute_import
import sys
import os
import pprint
from collections import defaultdict
class RedefinedError(NameError):
pass
class NotDefinedError(NameError):
pass
class VerificationError(Exception):
pass
class Loc(object):
"""Source location
"""
def __init__(self, filename, line, col=None):
self.filename = filename
self.line = line
self.col = col
def __repr__(self):
return "Loc(filename=%s, line=%s, col=%s)" % (self.filename,
self.line, self.col)
def __str__(self):
if self.col is not None:
return "%s (%s:%s)" % (self.filename, self.line, self.col)
else:
return "%s (%s)" % (self.filename, self.line)
def strformat(self):
try:
# Try to get a relative path
path = os.path.relpath(self.filename)
except ValueError:
# Fallback to absolute path if error occured in getting the
# relative path.
# This may happen on windows if the drive is different
path = os.path.abspath(self.filename)
return 'File "%s", line %d' % (path, self.line)
class VarMap(object):
def __init__(self):
self._con = {}
def define(self, name, var):
if name in self._con:
raise RedefinedError(name)
else:
self._con[name] = var
def get(self, name):
try:
return self._con[name]
except KeyError:
raise NotDefinedError(name)
def __contains__(self, name):
return name in self._con
def __len__(self):
return len(self._con)
def __repr__(self):
return pprint.pformat(self._con)
def __hash__(self):
return hash(self.name)
def __iter__(self):
return self._con.iterkeys()
class Stmt(object):
is_terminator = False
class Expr(object):
def __init__(self, op, loc, **kws):
self.op = op
self.loc = loc
self._kws = kws
for k, v in kws.items():
setattr(self, k, v)
@classmethod
def binop(cls, fn, lhs, rhs, loc):
op = 'binop'
return cls(op=op, loc=loc, fn=fn, lhs=lhs, rhs=rhs)
@classmethod
def unary(cls, fn, value, loc):
op = 'unary'
return cls(op=op, loc=loc, fn=fn, value=value)
@classmethod
def call(cls, func, args, kws, loc):
op = 'call'
return cls(op=op, loc=loc, func=func, args=args, kws=kws)
@classmethod
def build_tuple(cls, items, loc):
op = 'build_tuple'
return cls(op=op, loc=loc, items=items)
@classmethod
def build_list(cls, items, loc):
op = 'build_list'
return cls(op=op, loc=loc, items=items)
@classmethod
def getiter(cls, value, loc):
op = 'getiter'
return cls(op=op, loc=loc, value=value)
@classmethod
def iternext(cls, value, loc):
op = 'iternext'
return cls(op=op, loc=loc, value=value)
@classmethod
def iternextsafe(cls, value, loc):
op = 'iternextsafe'
return cls(op=op, loc=loc, value=value)
@classmethod
def itervalid(cls, value, loc):
op = 'itervalid'
return cls(op=op, loc=loc, value=value)
@classmethod
def getattr(cls, value, attr, loc):
op = 'getattr'
return cls(op=op, loc=loc, value=value, attr=attr)
@classmethod
def getitem(cls, target, index, loc):
op = 'getitem'
return cls(op=op, loc=loc, target=target, index=index)
def __repr__(self):
if self.op == 'call':
args = ', '.join(str(a) for a in self.args)
kws = ', '.join('%s=%s' % (k, v) for k, v in self.kws)
return 'call %s(%s, %s)' % (self.func, args, kws)
elif self.op == 'binop':
return '%s %s %s' % (self.lhs, self.fn, self.rhs)
else:
args = ('%s=%s' % (k, v) for k, v in self._kws.items())
return '%s(%s)' % (self.op, ', '.join(args))
def list_vars(self):
for v in self._kws.values():
if isinstance(v, Var):
return v
class SetItem(Stmt):
def __init__(self, target, index, value, loc):
self.target = target
self.index = index
self.value = value
self.loc = loc
def __repr__(self):
return '%s[%s] = %s' % (self.target, self.index, self.value)
class Del(Stmt):
def __init__(self, value, loc):
self.value = value
self.loc = loc
def __str__(self):
return "del %s" % self.value
class Return(Stmt):
is_terminator = True
def __init__(self, value, loc):
self.value = value
self.loc = loc
def __str__(self):
return 'return %s' % self.value
class Jump(Stmt):
is_terminator = True
def __init__(self, target, loc):
self.target = target
self.loc = loc
def __str__(self):
return 'jump %s' % self.target
class Branch(Stmt):
is_terminator = True
def __init__(self, cond, truebr, falsebr, loc):
self.cond = cond
self.truebr = truebr
self.falsebr = falsebr
self.loc = loc
def __str__(self):
return 'branch %s, %s, %s' % (self.cond, self.truebr, self.falsebr)
class Assign(Stmt):
def __init__(self, value, target, loc):
self.value = value
self.target = target
self.loc = loc
def __str__(self):
return '%s = %s' % (self.target, self.value)
class Const(object):
def __init__(self, value, loc):
self.value = value
self.loc = loc
def __repr__(self):
return 'const(%s, %s)' % (type(self.value), self.value)
class Global(object):
def __init__(self, name, value, loc):
self.name = name
self.value = value
self.loc = loc
def __str__(self):
return 'global(%s: %s)' % (self.name, self.value)
class Var(object):
"""
Attributes
-----------
- scope: Scope
- name: str
- loc: Loc
Definition location
"""
def __init__(self, scope, name, loc):
self.scope = scope
self.name = name
self.loc = loc
def __repr__(self):
return 'Var(%s, %s)' % (self.name, self.loc)
def __str__(self):
return self.name
@property
def is_temp(self):
return self.name.startswith("$")
class Intrinsic(object):
"""
For inserting intrinsic node into the IR
"""
def __init__(self, name, type, args):
self.name = name
self.type = type
self.loc = None
self.args = args
def __repr__(self):
return 'Intrinsic(%s, %s, %s)' % (self.name, self.type, self.loc)
def __str__(self):
return self.name
class Scope(object):
"""
Attributes
-----------
- parent: Scope
Parent scope
- localvars: VarMap
Scope-local variable map
- loc: Loc
Start of scope location
"""
def __init__(self, parent, loc):
self.parent = parent
self.localvars = VarMap()
self.loc = loc
self.redefined = defaultdict(int)
def define(self, name, loc):
"""
Define a variable
"""
v = Var(scope=self, name=name, loc=loc)
self.localvars.define(v.name, v)
return v
def get(self, name):
"""
Refer to a variable
"""
if name in self.redefined:
name = "%s.%d" % (name, self.redefined[name])
try:
return self.localvars.get(name)
except NotDefinedError:
if self.has_parent:
return self.parent.get(name)
else:
raise
def get_or_define(self, name, loc):
if name in self.redefined:
name = "%s.%d" % (name, self.redefined[name])
v = Var(scope=self, name=name, loc=loc)
if name not in self.localvars:
return self.define(name, loc)
else:
return self.localvars.get(name)
def redefine(self, name, loc):
"""
Redefine if the name is already defined
"""
if name not in self.localvars:
return self.define(name, loc)
else:
ct = self.redefined[name]
self.redefined[name] = ct + 1
newname = "%s.%d" % (name, ct + 1)
return self.define(newname, loc)
def make_temp(self, loc):
n = len(self.localvars)
v = Var(scope=self, name='$%d' % n, loc=loc)
self.localvars.define(v.name, v)
return v
@property
def has_parent(self):
return self.parent is not None
def __repr__(self):
return "Scope(has_parent=%r, num_vars=%d, %s)" % (self.has_parent,
len(self.localvars),
self.loc)
class Block(object):
"""A code block
"""
def __init__(self, scope, loc):
self.scope = scope
self.body = []
self.loc = loc
def append(self, inst):
assert isinstance(inst, Stmt)
self.body.append(inst)
def remove(self, inst):
assert isinstance(inst, Stmt)
del self.body[self.body.index(inst)]
def dump(self, file=sys.stdout):
for inst in self.body:
print(' ', inst, file=file)
@property
def terminator(self):
return self.body[-1]
@property
def is_terminated(self):
return self.body and self.body[-1].is_terminator
def verify(self):
if not self.is_terminated:
raise VerificationError("Missing block terminator")
# Only the last instruction can be a terminator
for inst in self.body[:-1]:
if inst.is_terminator:
raise VerificationError("Terminator before the last "
"instruction")
def insert_before_terminator(self, stmt):
assert isinstance(stmt, Stmt)
assert self.is_terminated
self.body.insert(-1, stmt)
class Loop(object):
__slots__ = "entry", "condition", "body", "exit"
def __init__(self, entry, exit, condition=None):
self.entry = entry
self.condition = condition
self.body = []
self.exit = exit
def valid(self):
try:
self.verify()
except VerificationError:
return False
else:
return True
def verify(self):
if self.entry is None:
raise VerificationError("Missing entry block")
if self.condition is None:
raise VerificationError("Missing condition block")
if self.exit is None:
raise VerificationError("Missing exit block")
if not self.body:
raise VerificationError("Missing body block")
def __repr__(self):
args = self.entry, self.condition, self.body, self.exit
return "Loop(entry=%s, condition=%s, body=%s, exit=%s)" % args
# A stub for undefined global reference
UNDEFINED = object()
########NEW FILE########
__FILENAME__ = irpasses
"""
Contains optimization passes for the IR.
"""
from __future__ import print_function, division, absolute_import
from numba import ir, utils
class RemoveRedundantAssign(object):
"""
Turn assignment pairs into one assignment
"""
def __init__(self, interp):
self.interp = interp
def run(self):
for blkid, blk in utils.dict_iteritems(self.interp.blocks):
self.run_block(blk)
def run_block(self, blk):
tempassign = {}
removeset = set()
for offset, inst in enumerate(blk.body):
self.mark_asssignment(tempassign, offset, inst)
for bag in utils.dict_itervalues(tempassign):
if len(bag) == 2:
off1, off2 = bag
first = blk.body[off1]
second = blk.body[off2]
inst = ir.Assign(value=first.value, target=second.target,
loc=first.loc)
# Replacement the second instruction
blk.body[off2] = inst
# Remove the first
removeset.add(off1)
# Remove from the highest offset to the lowest to preserve order
for off in reversed(sorted(removeset)):
del blk.body[off]
def mark_asssignment(self, tempassign, offset, inst):
if isinstance(inst, ir.Assign):
if inst.target.is_temp:
tempassign[inst.target.name] = [offset]
elif inst.value.name in tempassign:
bag = tempassign[inst.value.name]
if bag[0] == offset - 1:
bag.append(offset)
else:
# Only apply to use once temp variable
del tempassign[inst.value.name]
########NEW FILE########
__FILENAME__ = looplifting
from __future__ import print_function, division, absolute_import
from numba import utils
from numba.bytecode import ByteCodeInst, CustomByteCode
def bind(loops, typingctx, targetctx, locals, flags):
"""
Install loop dispatchers into the module
"""
disps = []
for loopbc in loops:
d = bind_loop(loopbc, typingctx, targetctx, locals, flags)
disps.append(d)
return disps
def bind_loop(loopbc, typingctx, targetctx, locals, flags):
from numba.dispatcher import LiftedLoop
fname = loopbc.func_name
disp = getattr(loopbc.module, fname, None)
if disp is not None:
if not isinstance(disp, LiftedLoop):
raise ValueError("Function %s exist but not a lifted-loop" % fname)
# Short circuit
return disp
else:
disp = LiftedLoop(loopbc, typingctx, targetctx, locals, flags)
setattr(loopbc.module, fname, disp)
return disp
def lift_loop(bytecode):
"""Lift the top-level loops.
Returns (outer, loops)
------------------------
* outer: ByteCode of a copy of the loop-less function.
* loops: a list of ByteCode of the loops.
"""
outer = []
loops = []
separate_loops(bytecode, outer, loops)
# Discover variables references
outer_rds, outer_wrs = find_varnames_uses(bytecode, outer)
outer_wrs |= set(bytecode.argspec.args)
lbclist = []
outerlabels = set(bytecode.labels)
outernames = list(bytecode.co_names)
for loop in loops:
args, rets = discover_args_and_returns(bytecode, loop, outer_rds,
outer_wrs)
if rets:
# Cannot deal with loop that write to variables used in outer body
# Put the loop back into the outer function
outer = stitch_instructions(outer, loop)
# Recompute read-write variable set
wrs, rds = find_varnames_uses(bytecode, loop)
outer_wrs |= wrs
outer_rds |= rds
else:
insert_loop_call(bytecode, loop, args, lbclist, outer, outerlabels,
outernames)
# Build outer bytecode
codetable = utils.SortedMap((i.offset, i) for i in outer)
outerbc = CustomByteCode(func=bytecode.func,
func_name=bytecode.func_name,
argspec=bytecode.argspec,
filename=bytecode.filename,
co_names=outernames,
co_varnames=bytecode.co_varnames,
co_consts=bytecode.co_consts,
table=codetable,
labels=outerlabels & set(codetable.keys()))
return outerbc, lbclist
def insert_loop_call(bytecode, loop, args, lbclist, outer, outerlabels,
outernames):
endloopoffset = loop[-1].next
# Accepted. Create a bytecode object for the loop
args = tuple(args)
lbc = make_loop_bytecode(bytecode, loop, args)
lbclist.append(lbc)
# Insert jump to the end
jmp = ByteCodeInst.get(loop[0].offset, 'JUMP_ABSOLUTE',
outer[-1].next)
jmp.lineno = loop[0].lineno
insert_instruction(outer, jmp)
outerlabels.add(outer[-1].next)
# Prepare arguments
outernames.append(lbc.func_name)
loadfn = ByteCodeInst.get(outer[-1].next, "LOAD_GLOBAL",
outernames.index(lbc.func_name))
loadfn.lineno = loop[0].lineno
insert_instruction(outer, loadfn)
for arg in args:
loadarg = ByteCodeInst.get(outer[-1].next, 'LOAD_FAST',
bytecode.co_varnames.index(arg))
loadarg.lineno = loop[0].lineno
insert_instruction(outer, loadarg)
# Call function
assert len(args) < 256
call = ByteCodeInst.get(outer[-1].next, "CALL_FUNCTION", len(args))
call.lineno = loop[0].lineno
insert_instruction(outer, call)
poptop = ByteCodeInst.get(outer[-1].next, "POP_TOP", None)
poptop.lineno = loop[0].lineno
insert_instruction(outer, poptop)
jmpback = ByteCodeInst.get(outer[-1].next, 'JUMP_ABSOLUTE',
endloopoffset)
jmpback.lineno = loop[0].lineno
insert_instruction(outer, jmpback)
def insert_instruction(insts, item):
i = find_previous_inst(insts, item.offset)
insts.insert(i, item)
def find_previous_inst(insts, offset):
for i, inst in enumerate(insts):
if inst.offset > offset:
return i
return len(insts)
def make_loop_bytecode(bytecode, loop, args):
# Add return None
co_consts = tuple(bytecode.co_consts)
if None not in co_consts:
co_consts += (None,)
# Load None
load_none = ByteCodeInst.get(loop[-1].next, "LOAD_CONST",
co_consts.index(None))
load_none.lineno = loop[-1].lineno
loop.append(load_none)
# Return None
return_value = ByteCodeInst.get(loop[-1].next, "RETURN_VALUE", 0)
return_value.lineno = loop[-1].lineno
loop.append(return_value)
# Function name
loopfuncname = bytecode.func_name+"__numba__loop%d__" % loop[0].offset
# Argspec
argspectype = type(bytecode.argspec)
argspec = argspectype(args=args, varargs=(), keywords=(), defaults=())
# Code table
codetable = utils.SortedMap((i.offset, i) for i in loop)
# Custom bytecode object
lbc = CustomByteCode(func=bytecode.func,
func_name=loopfuncname,
argspec=argspec,
filename=bytecode.filename,
co_names=bytecode.co_names,
co_varnames=bytecode.co_varnames,
co_consts=co_consts,
table=codetable,
labels=bytecode.labels)
return lbc
def stitch_instructions(outer, loop):
begin = loop[0].offset
i = find_previous_inst(outer, begin)
return outer[:i] + loop + outer[i:]
def discover_args_and_returns(bytecode, insts, outer_rds, outer_wrs):
"""
Basic analysis for args and returns
This completely ignores the ordering or the read-writes.
"""
rdnames, wrnames = find_varnames_uses(bytecode, insts)
# Pass names that are written outside and read locally
args = outer_wrs & rdnames
# Return values that it written locally and read outside
rets = wrnames & outer_rds
return args, rets
def find_varnames_uses(bytecode, insts):
rdnames = set()
wrnames = set()
for inst in insts:
if inst.opname == 'LOAD_FAST':
rdnames.add(bytecode.co_varnames[inst.arg])
elif inst.opname == 'STORE_FAST':
wrnames.add(bytecode.co_varnames[inst.arg])
return rdnames, wrnames
def separate_loops(bytecode, outer, loops):
"""
Separate top-level loops from the function
Stores loopless instructions from the original function into `outer`.
Stores list of loop instructions into `loops`.
Both `outer` and `loops` are list-like (`append(item)` defined).
"""
endloop = None
cur = None
for inst in bytecode:
if endloop is None:
if inst.opname == 'SETUP_LOOP':
cur = [inst]
endloop = inst.next + inst.arg
else:
outer.append(inst)
else:
cur.append(inst)
if inst.next == endloop:
for inst in cur:
if inst.opname == 'RETURN_VALUE':
# Reject if return inside loop
outer.extend(cur)
break
else:
loops.append(cur)
endloop = None
########NEW FILE########
__FILENAME__ = lowering
from __future__ import print_function, division, absolute_import
from collections import defaultdict
from llvm.core import Type, Builder, Module
import llvm.core as lc
from numba import ir, types, cgutils, utils, config
try:
import builtins
except ImportError:
import __builtin__ as builtins
class LoweringError(Exception):
def __init__(self, msg, loc):
self.msg = msg
self.loc = loc
super(LoweringError, self).__init__("%s\n%s" % (msg, loc.strformat()))
def default_mangler(name, argtypes):
codedargs = '.'.join(str(a).replace(' ', '_') for a in argtypes)
return '.'.join([name, codedargs])
class FunctionDescriptor(object):
__slots__ = ('native', 'pymod', 'name', 'doc', 'blocks', 'typemap',
'calltypes', 'args', 'kws', 'restype', 'argtypes',
'qualified_name', 'mangled_name')
def __init__(self, native, pymod, name, doc, blocks, typemap,
restype, calltypes, args, kws, mangler=None, argtypes=None,
qualname=None):
self.native = native
self.pymod = pymod
self.name = name
self.doc = doc
self.blocks = blocks
self.typemap = typemap
self.calltypes = calltypes
self.args = args
self.kws = kws
self.restype = restype
# Argument types
self.argtypes = argtypes or [self.typemap[a] for a in args]
self.qualified_name = qualname or '.'.join([self.pymod.__name__,
self.name])
mangler = default_mangler if mangler is None else mangler
self.mangled_name = mangler(self.qualified_name, self.argtypes)
def _describe(interp):
func = interp.bytecode.func
fname = interp.bytecode.func_name
pymod = interp.bytecode.module
doc = func.__doc__ or ''
args = interp.argspec.args
kws = () # TODO
return fname, pymod, doc, args, kws
def describe_external(name, restype, argtypes):
args = ["arg%d" % i for i in range(len(argtypes))]
fd = FunctionDescriptor(native=True, pymod=None, name=name, doc='',
blocks=None, restype=restype, calltypes=None,
argtypes=argtypes, args=args, kws=None,
typemap=None, qualname=name, mangler=lambda a,x: a)
return fd
def describe_function(interp, typemap, restype, calltypes, mangler):
fname, pymod, doc, args, kws = _describe(interp)
native = True
sortedblocks = utils.SortedMap(utils.dict_iteritems(interp.blocks))
fd = FunctionDescriptor(native, pymod, fname, doc, sortedblocks,
typemap, restype, calltypes, args, kws, mangler)
return fd
def describe_pyfunction(interp):
fname, pymod, doc, args, kws = _describe(interp)
defdict = lambda: defaultdict(lambda: types.pyobject)
typemap = defdict()
restype = types.pyobject
calltypes = defdict()
native = False
sortedblocks = utils.SortedMap(utils.dict_iteritems(interp.blocks))
fd = FunctionDescriptor(native, pymod, fname, doc, sortedblocks,
typemap, restype, calltypes, args, kws)
return fd
class BaseLower(object):
"""
Lower IR to LLVM
"""
def __init__(self, context, fndesc):
self.context = context
self.fndesc = fndesc
# Initialize LLVM
self.module = Module.new("module.%s" % self.fndesc.name)
# Install metadata
md_pymod = cgutils.MetadataKeyStore(self.module, "python.module")
md_pymod.set(fndesc.pymod.__name__)
# Setup function
self.function = context.declare_function(self.module, fndesc)
self.entry_block = self.function.append_basic_block('entry')
self.builder = Builder.new(self.entry_block)
# self.builder = cgutils.VerboseProxy(self.builder)
# Internal states
self.blkmap = {}
self.varmap = {}
self.firstblk = min(self.fndesc.blocks.keys())
# Subclass initialization
self.init()
def init(self):
pass
def post_lower(self):
"""Called after all blocks are lowered
"""
pass
def lower(self):
# Init argument variables
fnargs = self.context.get_arguments(self.function)
for ak, av in zip(self.fndesc.args, fnargs):
at = self.typeof(ak)
av = self.context.get_argument_value(self.builder, at, av)
av = self.init_argument(av)
self.storevar(av, ak)
# Init blocks
for offset in self.fndesc.blocks:
bname = "B%d" % offset
self.blkmap[offset] = self.function.append_basic_block(bname)
# Lower all blocks
for offset, block in self.fndesc.blocks.items():
bb = self.blkmap[offset]
self.builder.position_at_end(bb)
self.lower_block(block)
self.post_lower()
# Close entry block
self.builder.position_at_end(self.entry_block)
self.builder.branch(self.blkmap[self.firstblk])
if config.DUMP_LLVM:
print(("LLVM DUMP %s" % self.fndesc.qualified_name).center(80,'-'))
print(self.module)
print('=' * 80)
self.module.verify()
def init_argument(self, arg):
return arg
def lower_block(self, block):
for inst in block.body:
try:
self.lower_inst(inst)
except LoweringError:
raise
except Exception as e:
msg = "Internal error:\n%s: %s" % (type(e).__name__, e)
raise LoweringError(msg, inst.loc)
def typeof(self, varname):
return self.fndesc.typemap[varname]
class Lower(BaseLower):
def lower_inst(self, inst):
if config.DEBUG_JIT:
self.context.debug_print(self.builder, str(inst))
if isinstance(inst, ir.Assign):
ty = self.typeof(inst.target.name)
val = self.lower_assign(ty, inst)
self.storevar(val, inst.target.name)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
condty = self.typeof(inst.cond.name)
pred = self.context.cast(self.builder, cond, condty, types.boolean)
assert pred.type == Type.int(1), ("cond is not i1: %s" % pred.type)
self.builder.cbranch(pred, tr, fl)
elif isinstance(inst, ir.Jump):
target = self.blkmap[inst.target]
self.builder.branch(target)
elif isinstance(inst, ir.Return):
val = self.loadvar(inst.value.name)
oty = self.typeof(inst.value.name)
ty = self.fndesc.restype
if isinstance(ty, types.Optional):
if oty == types.none:
self.context.return_native_none(self.builder)
return
else:
ty = ty.type
if ty != oty:
val = self.context.cast(self.builder, val, oty, ty)
retval = self.context.get_return_value(self.builder, ty, val)
self.context.return_value(self.builder, retval)
elif isinstance(inst, ir.SetItem):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
index = self.loadvar(inst.index.name)
targetty = self.typeof(inst.target.name)
valuety = self.typeof(inst.value.name)
indexty = self.typeof(inst.index.name)
signature = self.fndesc.calltypes[inst]
assert signature is not None
impl = self.context.get_function('setitem', signature)
# Convert argument to match
assert targetty == signature.args[0]
index = self.context.cast(self.builder, index, indexty,
signature.args[1])
value = self.context.cast(self.builder, value, valuety,
signature.args[2])
return impl(self.builder, (target, index, value))
elif isinstance(inst, ir.Del):
pass
else:
raise NotImplementedError(type(inst))
def lower_assign(self, ty, inst):
value = inst.value
if isinstance(value, ir.Const):
if self.context.is_struct_type(ty):
const = self.context.get_constant_struct(self.builder, ty,
value.value)
else:
const = self.context.get_constant(ty, value.value)
return const
elif isinstance(value, ir.Expr):
return self.lower_expr(ty, value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
oty = self.typeof(value.name)
return self.context.cast(self.builder, val, oty, ty)
elif isinstance(value, ir.Global):
if (isinstance(ty, types.Dummy) or
isinstance(ty, types.Module) or
isinstance(ty, types.Function) or
isinstance(ty, types.Dispatcher)):
return self.context.get_dummy_value()
elif ty == types.boolean:
return self.context.get_constant(ty, value.value)
elif isinstance(ty, types.Array):
return self.context.make_constant_array(self.builder, ty,
value.value)
elif self.context.is_struct_type(ty):
return self.context.get_constant_struct(self.builder, ty,
value.value)
elif ty in types.number_domain:
return self.context.get_constant(ty, value.value)
elif isinstance(ty, types.UniTuple):
consts = [self.context.get_constant(t, v)
for t, v in zip(ty, value.value)]
return cgutils.pack_array(self.builder, consts)
else:
raise NotImplementedError('global', ty)
else:
raise NotImplementedError(type(value), value)
def lower_expr(self, resty, expr):
if expr.op == 'binop':
lhs = expr.lhs
rhs = expr.rhs
lty = self.typeof(lhs.name)
rty = self.typeof(rhs.name)
lhs = self.loadvar(lhs.name)
rhs = self.loadvar(rhs.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.fn, signature)
# Convert argument to match
lhs = self.context.cast(self.builder, lhs, lty, signature.args[0])
rhs = self.context.cast(self.builder, rhs, rty, signature.args[1])
res = impl(self.builder, (lhs, rhs))
return self.context.cast(self.builder, res, signature.return_type,
resty)
elif expr.op == 'unary':
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.fn, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
return self.context.cast(self.builder, res, signature.return_type,
resty)
elif expr.op == 'call':
argvals = [self.loadvar(a.name) for a in expr.args]
argtyps = [self.typeof(a.name) for a in expr.args]
signature = self.fndesc.calltypes[expr]
if isinstance(expr.func, ir.Intrinsic):
fnty = expr.func.name
castvals = expr.func.args
else:
assert not expr.kws, expr.kws
fnty = self.typeof(expr.func.name)
castvals = [self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps,
signature.args)]
if isinstance(fnty, types.Method):
# Method of objects are handled differently
fnobj = self.loadvar(expr.func.name)
res = self.context.call_class_method(self.builder, fnobj,
signature, castvals)
elif isinstance(fnty, types.FunctionPointer):
# Handle function pointer)
pointer = fnty.funcptr
res = self.context.call_function_pointer(self.builder, pointer,
signature, castvals)
else:
# Normal function resolution
impl = self.context.get_function(fnty, signature)
res = impl(self.builder, castvals)
libs = getattr(impl, "libs", ())
if libs:
self.context.add_libs(libs)
return self.context.cast(self.builder, res, signature.return_type,
resty)
elif expr.op in ('getiter', 'iternext', 'itervalid', 'iternextsafe'):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
return self.context.cast(self.builder, res, signature.return_type,
resty)
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
impl = self.context.get_attribute(val, ty, expr.attr)
if impl is None:
# ignore the attribute
res = self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val)
if not isinstance(impl.return_type, types.Kind):
res = self.context.cast(self.builder, res, impl.return_type,
resty)
return res
elif expr.op == "getitem":
baseval = self.loadvar(expr.target.name)
indexval = self.loadvar(expr.index.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
argtyps = (self.typeof(expr.target.name),
self.typeof(expr.index.name))
castvals = [self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps,
signature.args)]
res = impl(self.builder, castvals)
return self.context.cast(self.builder, res, signature.return_type,
resty)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)]
tup = self.context.get_constant_undef(resty)
for i in range(len(castvals)):
tup = self.builder.insert_value(tup, itemvals[i], i)
return tup
raise NotImplementedError(expr)
def getvar(self, name):
if name not in self.varmap:
self.varmap[name] = self.alloca(name, self.typeof(name))
return self.varmap[name]
def loadvar(self, name):
ptr = self.getvar(name)
return self.builder.load(ptr)
def storevar(self, value, name):
ptr = self.getvar(name)
assert value.type == ptr.type.pointee,\
"store %s to ptr of %s" % (value.type, ptr.type.pointee)
self.builder.store(value, ptr)
def alloca(self, name, type):
ltype = self.context.get_value_type(type)
bb = self.builder.basic_block
self.builder.position_at_end(self.entry_block)
ptr = self.builder.alloca(ltype, name=name)
self.builder.position_at_end(bb)
return ptr
PYTHON_OPMAP = {
'+': "number_add",
'-': "number_subtract",
'*': "number_multiply",
'/?': "number_divide",
'/': "number_truedivide",
'//': "number_floordivide",
'%': "number_remainder",
'**': "number_power",
'<<': "number_lshift",
'>>': "number_rshift",
'&': "number_and",
'|': "number_or",
'^': "number_xor",
}
class PyLower(BaseLower):
def init(self):
self.pyapi = self.context.get_python_api(self.builder)
# Add error handling block
self.ehblock = self.function.append_basic_block('error')
def post_lower(self):
with cgutils.goto_block(self.builder, self.ehblock):
self.cleanup()
self.context.return_exc(self.builder)
def init_argument(self, arg):
self.incref(arg)
return arg
def lower_inst(self, inst):
if isinstance(inst, ir.Assign):
value = self.lower_assign(inst)
self.storevar(value, inst.target.name)
elif isinstance(inst, ir.SetItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.object_setitem(target, index, value)
negone = lc.Constant.int_signextend(ok.type, -1)
pred = self.builder.icmp(lc.ICMP_EQ, ok, negone)
with cgutils.if_unlikely(self.builder, pred):
self.return_exception_raised()
elif isinstance(inst, ir.Return):
retval = self.loadvar(inst.value.name)
self.incref(retval)
self.cleanup()
self.context.return_value(self.builder, retval)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
if cond.type == Type.int(1):
istrue = cond
else:
istrue = self.pyapi.object_istrue(cond)
zero = lc.Constant.null(istrue.type)
pred = self.builder.icmp(lc.ICMP_NE, istrue, zero)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
self.builder.cbranch(pred, tr, fl)
elif isinstance(inst, ir.Jump):
target = self.blkmap[inst.target]
self.builder.branch(target)
elif isinstance(inst, ir.Del):
obj = self.loadvar(inst.value)
self.decref(obj)
else:
raise NotImplementedError(type(inst), inst)
def lower_assign(self, inst):
"""
The returned object must have a new reference
"""
value = inst.value
if isinstance(value, ir.Const):
return self.lower_const(value.value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
self.incref(val)
return val
elif isinstance(value, ir.Expr):
return self.lower_expr(value)
elif isinstance(value, ir.Global):
return self.lower_global(value.name, value.value)
else:
raise NotImplementedError(type(value), value)
def lower_expr(self, expr):
if expr.op == 'binop':
lhs = self.loadvar(expr.lhs.name)
rhs = self.loadvar(expr.rhs.name)
if expr.fn in PYTHON_OPMAP:
fname = PYTHON_OPMAP[expr.fn]
fn = getattr(self.pyapi, fname)
res = fn(lhs, rhs)
else:
# Assume to be rich comparision
res = self.pyapi.object_richcompare(lhs, rhs, expr.fn)
self.check_error(res)
return res
elif expr.op == 'unary':
value = self.loadvar(expr.value.name)
if expr.fn == '-':
res = self.pyapi.number_negative(value)
elif expr.fn == 'not':
res = self.pyapi.object_not(value)
negone = lc.Constant.int_signextend(Type.int(), -1)
err = self.builder.icmp(lc.ICMP_EQ, res, negone)
with cgutils.if_unlikely(self.builder, err):
self.return_exception_raised()
longval = self.builder.zext(res, self.pyapi.long)
res = self.pyapi.bool_from_long(longval)
elif expr.fn == '~':
res = self.pyapi.number_invert(value)
else:
raise NotImplementedError(expr)
self.check_error(res)
return res
elif expr.op == 'call':
argvals = [self.loadvar(a.name) for a in expr.args]
fn = self.loadvar(expr.func.name)
if not expr.kws:
# No keyword
ret = self.pyapi.call_function_objargs(fn, argvals)
else:
# Have Keywords
keyvalues = [(k, self.loadvar(v.name)) for k, v in expr.kws]
args = self.pyapi.tuple_pack(argvals)
kws = self.pyapi.dict_pack(keyvalues)
ret = self.pyapi.call(fn, args, kws)
self.decref(kws)
self.decref(args)
self.check_error(ret)
return ret
elif expr.op == 'getattr':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getattr_string(obj, expr.attr)
self.check_error(res)
return res
elif expr.op == 'build_tuple':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.tuple_pack(items)
self.check_error(res)
return res
elif expr.op == 'build_list':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.list_pack(items)
self.check_error(res)
return res
elif expr.op == 'getiter':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getiter(obj)
self.check_error(res)
self.storevar(res, '$iter$' + expr.value.name)
return self.pack_iter(res)
elif expr.op == 'iternext':
iterstate = self.loadvar(expr.value.name)
iterobj, valid = self.unpack_iter(iterstate)
item = self.pyapi.iter_next(iterobj)
self.set_iter_valid(iterstate, item)
return item
elif expr.op == 'iternextsafe':
iterstate = self.loadvar(expr.value.name)
iterobj, _ = self.unpack_iter(iterstate)
item = self.pyapi.iter_next(iterobj)
# TODO need to add exception
self.check_error(item)
self.set_iter_valid(iterstate, item)
return item
elif expr.op == 'itervalid':
iterstate = self.loadvar(expr.value.name)
_, valid = self.unpack_iter(iterstate)
return self.builder.trunc(valid, Type.int(1))
elif expr.op == 'getitem':
target = self.loadvar(expr.target.name)
index = self.loadvar(expr.index.name)
res = self.pyapi.object_getitem(target, index)
self.check_error(res)
return res
elif expr.op == 'getslice':
target = self.loadvar(expr.target.name)
start = self.loadvar(expr.start.name)
stop = self.loadvar(expr.stop.name)
slicefn = self.get_builtin_obj("slice")
sliceobj = self.pyapi.call_function_objargs(slicefn, (start, stop))
self.decref(slicefn)
self.check_error(sliceobj)
res = self.pyapi.object_getitem(target, sliceobj)
self.check_error(res)
return res
else:
raise NotImplementedError(expr)
def lower_const(self, const):
if isinstance(const, str):
ret = self.pyapi.string_from_string_and_size(const)
self.check_error(ret)
return ret
elif isinstance(const, complex):
real = self.context.get_constant(types.float64, const.real)
imag = self.context.get_constant(types.float64, const.imag)
ret = self.pyapi.complex_from_doubles(real, imag)
self.check_error(ret)
return ret
elif isinstance(const, float):
fval = self.context.get_constant(types.float64, const)
ret = self.pyapi.float_from_double(fval)
self.check_error(ret)
return ret
elif isinstance(const, int):
if utils.bit_length(const) >= 64:
raise ValueError("Integer is too big to be lowered")
ival = self.context.get_constant(types.intp, const)
return self.pyapi.long_from_ssize_t(ival)
elif isinstance(const, tuple):
items = [self.lower_const(i) for i in const]
return self.pyapi.tuple_pack(items)
elif const is Ellipsis:
return self.get_builtin_obj("Ellipsis")
elif const is None:
return self.pyapi.make_none()
else:
raise NotImplementedError(type(const))
def lower_global(self, name, value):
"""
1) Check global scope dictionary.
2) Check __builtins__.
2a) is it a dictionary (for non __main__ module)
2b) is it a module (for __main__ module)
"""
moddict = self.pyapi.get_module_dict()
obj = self.pyapi.dict_getitem_string(moddict, name)
self.incref(obj) # obj is borrowed
if hasattr(builtins, name):
obj_is_null = self.is_null(obj)
bbelse = self.builder.basic_block
with cgutils.ifthen(self.builder, obj_is_null):
mod = self.pyapi.dict_getitem_string(moddict, "__builtins__")
builtin = self.builtin_lookup(mod, name)
bbif = self.builder.basic_block
retval = self.builder.phi(self.pyapi.pyobj)
retval.add_incoming(obj, bbelse)
retval.add_incoming(builtin, bbif)
else:
retval = obj
with cgutils.if_unlikely(self.builder, self.is_null(retval)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
self.incref(retval)
return retval
# -------------------------------------------------------------------------
def get_builtin_obj(self, name):
moddict = self.pyapi.get_module_dict()
mod = self.pyapi.dict_getitem_string(moddict, "__builtins__")
return self.builtin_lookup(mod, name)
def builtin_lookup(self, mod, name):
"""
Args
----
mod:
The __builtins__ dictionary or module
name: str
The object to lookup
"""
fromdict = self.pyapi.dict_getitem_string(mod, name)
self.incref(fromdict) # fromdict is borrowed
bbifdict = self.builder.basic_block
with cgutils.if_unlikely(self.builder, self.is_null(fromdict)):
# This happen if we are using the __main__ module
frommod = self.pyapi.object_getattr_string(mod, name)
with cgutils.if_unlikely(self.builder, self.is_null(frommod)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
bbifmod = self.builder.basic_block
builtin = self.builder.phi(self.pyapi.pyobj)
builtin.add_incoming(fromdict, bbifdict)
builtin.add_incoming(frommod, bbifmod)
return builtin
def pack_iter(self, obj):
iterstate = PyIterState(self.context, self.builder)
iterstate.iterator = obj
iterstate.valid = cgutils.true_byte
return iterstate._getpointer()
def unpack_iter(self, state):
iterstate = PyIterState(self.context, self.builder, ref=state)
return tuple(iterstate)
def set_iter_valid(self, state, item):
iterstate = PyIterState(self.context, self.builder, ref=state)
iterstate.valid = cgutils.as_bool_byte(self.builder,
cgutils.is_not_null(self.builder,
item))
with cgutils.if_unlikely(self.builder, self.is_null(item)):
self.check_occurred()
def check_occurred(self):
err_occurred = cgutils.is_not_null(self.builder,
self.pyapi.err_occurred())
with cgutils.if_unlikely(self.builder, err_occurred):
self.return_exception_raised()
def check_error(self, obj):
with cgutils.if_unlikely(self.builder, self.is_null(obj)):
self.return_exception_raised()
return obj
def is_null(self, obj):
return cgutils.is_null(self.builder, obj)
def return_exception_raised(self):
self.builder.branch(self.ehblock)
def return_error_occurred(self):
self.cleanup()
self.context.return_exc(self.builder)
def getvar(self, name, ltype=None):
if name not in self.varmap:
self.varmap[name] = self.alloca(name, ltype=ltype)
return self.varmap[name]
def loadvar(self, name):
ptr = self.getvar(name)
return self.builder.load(ptr)
def storevar(self, value, name):
"""
Stores a llvm value and allocate stack slot if necessary.
The llvm value can be of arbitrary type.
"""
ptr = self.getvar(name, ltype=value.type)
old = self.builder.load(ptr)
assert value.type == ptr.type.pointee, (str(value.type),
str(ptr.type.pointee))
self.builder.store(value, ptr)
# Safe to call decref even on non python object
self.decref(old)
def cleanup(self):
for var in utils.dict_itervalues(self.varmap):
self.decref(self.builder.load(var))
def alloca(self, name, ltype=None):
"""
Allocate a stack slot and initialize it to NULL.
The default is to allocate a pyobject pointer.
Use ``ltype`` to override.
"""
if ltype is None:
ltype = self.context.get_value_type(types.pyobject)
bb = self.builder.basic_block
self.builder.position_at_end(self.entry_block)
ptr = self.builder.alloca(ltype, name=name)
self.builder.store(cgutils.get_null_value(ltype), ptr)
self.builder.position_at_end(bb)
return ptr
def incref(self, value):
self.pyapi.incref(value)
def decref(self, value):
"""
This is allow to be called on non pyobject pointer, in which case
no code is inserted.
If the value is a PyIterState, it unpack the structure and decref
the iterator.
"""
lpyobj = self.context.get_value_type(types.pyobject)
if value.type.kind == lc.TYPE_POINTER:
if value.type != lpyobj:
pass
#raise AssertionError(value.type)
# # Handle PyIterState
# not_null = cgutils.is_not_null(self.builder, value)
# with cgutils.if_likely(self.builder, not_null):
# iterstate = PyIterState(self.context, self.builder,
# value=value)
# value = iterstate.iterator
# self.pyapi.decref(value)
else:
self.pyapi.decref(value)
class PyIterState(cgutils.Structure):
_fields = [
("iterator", types.pyobject),
("valid", types.boolean),
]
########NEW FILE########
__FILENAME__ = macro
"""
Macro handling passes
Macros are expanded on block-by-block
"""
from __future__ import absolute_import, print_function, division
from numba import ir
def expand_macros(blocks):
constants = {}
for blk in blocks.values():
module_getattr_folding(constants, blk)
expand_macros_in_block(constants, blk)
def module_getattr_folding(constants, block):
for inst in block.body:
if isinstance(inst, ir.Assign):
rhs = inst.value
if isinstance(rhs, ir.Global):
constants[inst.target.name] = rhs.value
elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':
if rhs.value.name in constants:
base = constants[rhs.value.name]
constants[inst.target.name] = getattr(base, rhs.attr)
elif isinstance(rhs, ir.Const):
constants[inst.target.name] = rhs.value
elif isinstance(rhs, ir.Var) and rhs.name in constants:
constants[inst.target.name] = constants[rhs.name]
def expand_macros_in_block(constants, block):
calls = []
for inst in block.body:
if isinstance(inst, ir.Assign):
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == 'call':
callee = rhs.func
macro = constants.get(callee.name)
if isinstance(macro, Macro):
# Rewrite calling macro
assert macro.callable
calls.append((inst, macro))
args = [constants[arg.name] for arg in rhs.args]
kws = dict((k, constants[v.name]) for k, v in rhs.kws)
result = macro.func(*args, **kws)
if result:
# Insert a new function
result.loc = rhs.loc
inst.value = ir.Expr.call(func=result, args=rhs.args,
kws=rhs.kws, loc=rhs.loc)
elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':
# Rewrite get attribute to macro call
# Non-calling macro must be triggered by get attribute
base = constants.get(rhs.value.name)
if base:
value = getattr(base, rhs.attr)
if isinstance(value, Macro):
macro = value
if not macro.callable:
intr = ir.Intrinsic(macro.name, macro.func, args=())
inst.value = ir.Expr.call(func=intr, args=(),
kws=(), loc=rhs.loc)
class Macro(object):
"""A macro object is expanded to a function call
"""
__slots__ = 'name', 'func', 'callable', 'argnames'
def __init__(self, name, func, callable=False, argnames=None):
self.name = name
self.func = func
self.callable = callable
self.argnames = argnames
def __repr__(self):
return '<macro %s -> %s>' % (self.name, self.func)
########NEW FILE########
__FILENAME__ = decorators
from __future__ import print_function, division, absolute_import
from .ufuncbuilder import UFuncBuilder, GUFuncBuilder
from numba.targets.registry import TargetRegistry
class Vectorize(object):
target_registry = TargetRegistry({'cpu': UFuncBuilder})
def __new__(cls, func, **kws):
target = kws.pop('target', 'cpu')
try:
imp = cls.target_registry[target]
except KeyError:
raise ValueError("Unsupported target: %s" % target)
return imp(func, kws)
class GUVectorize(object):
target_registry = TargetRegistry({'cpu': GUFuncBuilder})
def __new__(cls, func, signature, **kws):
target = kws.pop('target', 'cpu')
try:
imp = cls.target_registry[target]
except KeyError:
raise ValueError("Unsupported target: %s" % target)
return imp(func, signature, kws)
def vectorize(ftylist, **kws):
"""vectorize(ftylist[, target='cpu', [**kws]])
A decorator to create numpy ufunc object from Numba compiled code.
Args
-----
ftylist: iterable
An iterable of type signatures, which are either
function type object or a string describing the
function type.
target: str
A string for code generation target. Default to "cpu".
Returns
--------
A NumPy universal function
Example
-------
@vectorize(['float32(float32, float32)',
'float64(float64, float64)'])
def sum(a, b):
return a + b
"""
if isinstance(ftylist, str):
# Common user mistake
ftylist = [ftylist]
def wrap(func):
vec = Vectorize(func, **kws)
for fty in ftylist:
vec.add(fty)
return vec.build_ufunc()
return wrap
def guvectorize(ftylist, signature, **kws):
"""guvectorize(ftylist, signature, [, target='cpu', [**kws]])
A decorator to create numpy generialized-ufunc object from Numba compiled
code.
Args
-----
ftylist: iterable
An iterable of type signatures, which are either
function type object or a string describing the
function type.
signature: str
A NumPy generialized-ufunc signature.
e.g. "(m, n), (n, p)->(m, p)"
target: str
A string for code generation target. Defaults to "cpu".
Returns
--------
A NumPy generialized universal-function
Example
-------
@guvectorize(['void(int32[:,:], int32[:,:], int32[:,:])',
'void(float32[:,:], float32[:,:], float32[:,:])'],
'(x, y),(x, y)->(x, y)')
def add_2d_array(a, b):
for i in range(c.shape[0]):
for j in range(c.shape[1]):
c[i, j] = a[i, j] + b[i, j]
"""
if isinstance(ftylist, str):
# Common user mistake
ftylist = [ftylist]
def wrap(func):
guvec = GUVectorize(func, signature, **kws)
for fty in ftylist:
guvec.add(fty)
return guvec.build_ufunc()
return wrap
########NEW FILE########
__FILENAME__ = gufunc
from . import ufuncbuilder
from numba import llvm_types
from llvm_cbuilder import CFuncRef, CStruct, CDefinition
from llvm_cbuilder import shortnames as C
from numba.codegen.llvmcontext import LLVMContextManager
from numba.vectorize import _internal
from numba import decorators
import numpy as np
class _GeneralizedUFuncFromFunc(ufuncbuilder.CommonVectorizeFromFunc):
def datalist(self, lfunclist, ptrlist):
"""
Return a list of data pointers to the kernels.
"""
return [None] * len(lfunclist)
def __call__(self, lfunclist, tyslist, signature, engine,
vectorizer, **kws):
'''create generailized ufunc from a llvm.core.Function
lfunclist : a single or iterable of llvm.core.Function instance
engine : a llvm.ee.ExecutionEngine instance
return a function object which can be called from python.
'''
assert 'cuda_dispatcher' not in kws, "Temporary check for mismatch API"
kws['signature'] = signature
try:
iter(lfunclist)
except TypeError:
lfunclist = [lfunclist]
self.tyslist = tyslist
ptrlist = self._prepare_pointers(lfunclist, tyslist, engine, **kws)
inct = len(tyslist[0]) - 1
outct = 1
datlist = self.datalist(lfunclist, ptrlist)
# Becareful that fromfunc does not provide full error checking yet.
# If typenum is out-of-bound, we have nasty memory corruptions.
# For instance, -1 for typenum will cause segfault.
# If elements of type-list (2nd arg) is tuple instead,
# there will also memory corruption. (Seems like code rewrite.)
# Hold on to the vectorizer while the ufunc lives
tyslist = self.get_dtype_nums(tyslist)
gufunc = _internal.fromfuncsig(ptrlist, tyslist, inct, outct, datlist,
signature, vectorizer)
return gufunc
def build(self, lfunc, dtypes, signature):
def_guf = GUFuncEntry(dtypes, signature, CFuncRef(lfunc))
guf = def_guf(lfunc.module)
return guf
class GUFuncASTVectorize(object):
"""
Vectorizer for generalized ufuncs.
"""
def __init__(self, func, sig):
self.pyfunc = func
self.translates = []
self.signature = sig
self.gufunc_from_func = _GeneralizedUFuncFromFunc()
self.args_restypes = getattr(self, 'args_restypes', [])
self.signatures = []
self.llvm_context = LLVMContextManager()
def _get_lfunc_list(self):
return [t.lfunc for t in self.translates]
def build_ufunc(self):
assert self.translates, "No translation"
lfunclist = self._get_lfunc_list()
tyslist = self._get_tys_list()
engine = self._get_ee()
return self.gufunc_from_func(
lfunclist, tyslist, self.signature, engine,
vectorizer=self)
def get_argtypes(self, numba_func):
return numba_func.signature.args
def _get_ee(self):
return self.llvm_context.execution_engine
def add(self, restype=None, argtypes=None):
dec = decorators.jit(restype, argtypes, backend='ast')
numba_func = dec(self.pyfunc)
self.args_restypes.append(list(numba_func.signature.args) +
[numba_func.signature.return_type])
self.signatures.append((restype, argtypes, {}))
self.translates.append(numba_func)
def _get_tys_list(self):
types_lists = []
for numba_func in self.translates:
dtype_nums = []
types_lists.append(dtype_nums)
for arg_type in self.get_argtypes(numba_func):
if arg_type.is_array:
arg_type = arg_type.dtype
dtype_nums.append(arg_type.get_dtype())
return types_lists
GUFuncVectorize = GUFuncASTVectorize
_intp_ptr = C.pointer(C.intp)
class PyObjectHead(CStruct):
_fields_ = [
('ob_refcnt', C.intp),
# NOTE: not a integer, just need to match definition in numba
('ob_type', C.void_p),
]
if llvm_types._trace_refs_:
# Account for _PyObject_HEAD_EXTRA
_fields_ = [
('ob_next', _intp_ptr),
('ob_prev', _intp_ptr),
] + _fields_
class PyArray(CStruct):
_fields_ = PyObjectHead._fields_ + [
('data', C.void_p),
('nd', C.int),
('dimensions', _intp_ptr),
('strides', _intp_ptr),
('base', C.void_p),
('descr', C.void_p),
('flags', C.int),
('weakreflist', C.void_p),
# ('maskna_dtype', C.void_p),
# ('maskna_data', C.void_p),
# ('maskna_strides', _intp_ptr),
]
def fakeit(self, dtype, data, dimensions, steps):
assert len(dimensions) == len(steps)
constant = self.parent.constant
self.ob_refcnt.assign(constant(C.intp, 1))
type_p = constant(C.py_ssize_t, id(np.ndarray))
self.ob_type.assign(type_p.cast(C.void_p))
self.base.assign(self.parent.constant_null(C.void_p))
dtype_p = constant(C.py_ssize_t, id(dtype))
self.descr.assign(dtype_p.cast(C.void_p))
self.flags.assign(constant(C.int, _internal.NPY_WRITEABLE))
self.data.assign(data)
self.nd.assign(constant(C.int, len(dimensions)))
ary_dims = self.parent.array(C.intp, len(dimensions) * 2)
ary_steps = ary_dims[len(dimensions):]
for i, dim in enumerate(dimensions):
ary_dims[i] = dim
self.dimensions.assign(ary_dims)
# ary_steps = self.parent.array(C.intp, len(steps))
for i, step in enumerate(steps):
ary_steps[i] = step
self.strides.assign(ary_steps)
def _parse_signature(sig):
inargs, outarg = sig.split('->')
for inarg in filter(bool, inargs.split(')')):
dimnames = inarg[1+inarg.find('('):].split(',')
yield dimnames
else:
dimnames = outarg.strip('()').split(',')
yield dimnames
class GUFuncEntry(CDefinition):
'''a generalized ufunc that wraps a numba jit'ed function
NOTE: Currently, this only works for array return type.
And, return type must be the last argument of the nubma jit'ed function.
'''
_argtys_ = [
('args', C.pointer(C.char_p)),
('dimensions', C.pointer(C.intp)),
('steps', C.pointer(C.intp)),
('data', C.void_p),
]
def _outer_loop(self, dargs, dimensions, pyarys, steps, data):
# implement outer loop
innerfunc = self.depends(self.FuncDef)
with self.for_range(dimensions[0]) as (loop, idx):
args = []
for i, (arg, arg_type) in enumerate(zip(pyarys, innerfunc.handle.args)):
if C.pointer(PyArray.llvm_type()) != arg_type.type: # scalar
val = arg.data[0:].cast(C.pointer(arg_type.type)).load()
args.append(val)
else:
casted = arg.reference().cast(arg_type.type)
args.append(casted)
innerfunc(*args)
for i, ary in enumerate(pyarys):
ary.data.assign(ary.data[steps[i]:])
def body(self, args, dimensions, steps, data):
diminfo = list(_parse_signature(self.Signature))
n_pyarys = len(diminfo)
assert n_pyarys == len(self.dtypes)
# extract unique dimension names
dims = []
for grp in diminfo:
for it in grp:
if it not in dims:
if it:
dims.append(it)
# build pyarrays for argument to inner function
pyarys = [self.var(PyArray) for _ in range(n_pyarys)]
# populate pyarrays
step_offset = len(pyarys)
for i, (dtype, ary) in enumerate(zip(self.dtypes, pyarys)):
ary_ndim = len([x for x in diminfo[i] if x])
ary_dims = []
for k in diminfo[i]:
if k:
ary_dims.append(dimensions[1 + dims.index(k)])
else:
ary_dims.append(self.constant(C.intp, 0))
ary_steps = []
if not ary_ndim:
ary_steps.append(self.constant(C.intp, 0))
for j in range(ary_ndim):
ary_steps.append(steps[step_offset])
step_offset += 1
ary.fakeit(dtype, args[i], ary_dims, ary_steps)
self._outer_loop(args, dimensions, pyarys, steps, data)
self.ret()
@classmethod
def specialize(cls, dtypes, signature, func_def):
'''specialize to a workload
'''
signature = signature.replace(' ', '') # remove all spaces
cls.dtypes = dtypes
cls._name_ = 'gufunc_%s_%s'% (signature, func_def)
cls.FuncDef = func_def
cls.Signature = signature
########NEW FILE########
__FILENAME__ = sigparse
from __future__ import absolute_import, print_function, division
import tokenize
import string
from numba import utils
def parse_signature(sig):
'''Parse generalized ufunc signature.
NOTE: ',' (COMMA) is a delimiter; not separator.
This means trailing comma is legal.
'''
def stripws(s):
return ''.join(c for c in s if c not in string.whitespace)
def tokenizer(src):
def readline():
yield src
gen = readline()
return tokenize.generate_tokens(lambda: utils.iter_next(gen))
def parse(src):
tokgen = tokenizer(src)
while True:
tok = utils.iter_next(tokgen)
if tok[1] == '(':
symbols = []
while True:
tok = utils.iter_next(tokgen)
if tok[1] == ')':
break
elif tok[0] == tokenize.NAME:
symbols.append(tok[1])
elif tok[1] == ',':
continue
else:
raise ValueError('bad token in signature "%s"' % tok[1])
yield tuple(symbols)
tok = utils.iter_next(tokgen)
if tok[1] == ',':
continue
elif tokenize.ISEOF(tok[0]):
break
elif tokenize.ISEOF(tok[0]):
break
else:
raise ValueError('bad token in signature "%s"' % tok[1])
ins, outs = stripws(sig).split('->')
inputs = list(parse(ins))
outputs = list(parse(outs))
# check that all output symbols are defined in the inputs
isym = set()
osym = set()
for grp in inputs:
isym |= set(grp)
for grp in outputs:
osym |= set(grp)
diff = osym.difference(isym)
if diff:
raise NameError('undefined output symbols: %s' % ','.join(diff))
return inputs, outputs
########NEW FILE########
__FILENAME__ = ufuncbuilder
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import warnings
import numpy as np
from numba.decorators import jit
from numba.targets.registry import target_registry
from numba.targets.descriptors import TargetDescriptor
from numba.targets.options import TargetOptions
from numba import utils, compiler, types, sigutils
from . import _internal
from .sigparse import parse_signature
from .wrappers import build_ufunc_wrapper, build_gufunc_wrapper
from numba.targets import registry
class UFuncTargetOptions(TargetOptions):
OPTIONS = {
"nopython" : bool,
}
class UFuncTarget(registry.CPUTarget):
options = UFuncTargetOptions
class UFuncDispatcher(object):
targetdescr = UFuncTarget()
def __init__(self, py_func, locals={}, targetoptions={}):
self.py_func = py_func
self.overloads = utils.UniqueDict()
self.targetoptions = targetoptions
self.locals = locals
def compile(self, sig, locals={}, **targetoptions):
locs = self.locals.copy()
locs.update(locals)
topt = self.targetoptions.copy()
topt.update(targetoptions)
if topt.get('nopython', True) == False:
raise TypeError("nopython option must be False")
topt['nopython'] = True
flags = compiler.Flags()
flags.set("no_compile")
self.targetdescr.options.parse_as_flags(flags, topt)
typingctx = self.targetdescr.typing_context
targetctx = self.targetdescr.target_context
args, return_type = sigutils.normalize_signature(sig)
cres = compiler.compile_extra(typingctx, targetctx, self.py_func,
args=args, return_type=return_type,
flags=flags, locals=locals)
self.overloads[cres.signature] = cres
return cres
target_registry['npyufunc'] = UFuncDispatcher
class UFuncBuilder(object):
def __init__(self, py_func, targetoptions={}):
self.py_func = py_func
self.nb_func = jit(target='npyufunc', **targetoptions)(py_func)
def add(self, sig=None, argtypes=None, restype=None):
# Handle argtypes
if argtypes is not None:
warnings.warn("Keyword argument argtypes is deprecated",
DeprecationWarning)
assert sig is None
if restype is None:
sig = tuple(argtypes)
else:
sig = restype(*argtypes)
# Actual work
self.nb_func.compile(sig)
def build_ufunc(self):
dtypelist = []
ptrlist = []
if not self.nb_func:
raise TypeError("No definition")
for sig, cres in self.nb_func.overloads.items():
dtypenums, ptr = self.build(cres)
dtypelist.append(dtypenums)
ptrlist.append(utils.longint(ptr))
datlist = [None] * len(ptrlist)
inct = len(cres.signature.args)
outct = 1
# Becareful that fromfunc does not provide full error checking yet.
# If typenum is out-of-bound, we have nasty memory corruptions.
# For instance, -1 for typenum will cause segfault.
# If elements of type-list (2nd arg) is tuple instead,
# there will also memory corruption. (Seems like code rewrite.)
ufunc = _internal.fromfunc(ptrlist, dtypelist, inct, outct, datlist)
return ufunc
def build(self, cres):
# Buider wrapper for ufunc entry point
ctx = cres.target_context
signature = cres.signature
wrapper = build_ufunc_wrapper(ctx, cres.llvm_func, signature)
ctx.engine.add_module(wrapper.module)
ptr = ctx.engine.get_pointer_to_function(wrapper)
# Get dtypes
dtypenums = [np.dtype(a.name).num for a in signature.args]
dtypenums.append(np.dtype(signature.return_type.name).num)
return dtypenums, ptr
class GUFuncBuilder(object):
# TODO handle scalar
def __init__(self, py_func, signature, targetoptions={}):
self.py_func = py_func
self.nb_func = jit(target='npyufunc')(py_func)
self.signature = signature
self.sin, self.sout = parse_signature(signature)
self.targetoptions = targetoptions
def add(self, sig=None, argtypes=None, restype=None):
# Handle argtypes
if argtypes is not None:
warnings.warn("Keyword argument argtypes is deprecated",
DeprecationWarning)
assert sig is None
if restype is None:
sig = tuple(argtypes)
else:
sig = restype(*argtypes)
# Actual work begins
cres = self.nb_func.compile(sig, **self.targetoptions)
if cres.signature.return_type != types.void:
raise TypeError("gufunc kernel must have void return type")
def build_ufunc(self):
dtypelist = []
ptrlist = []
if not self.nb_func:
raise TypeError("No definition")
for sig, cres in self.nb_func.overloads.items():
dtypenums, ptr = self.build(cres)
dtypelist.append(dtypenums)
ptrlist.append(utils.longint(ptr))
datlist = [None] * len(ptrlist)
inct = len(self.sin)
outct = len(self.sout)
ufunc = _internal.fromfuncsig(ptrlist, dtypelist, inct, outct, datlist,
self.signature)
return ufunc
def build(self, cres):
# Buider wrapper for ufunc entry point
ctx = cres.target_context
signature = cres.signature
wrapper = build_gufunc_wrapper(ctx, cres.llvm_func, signature,
self.sin, self.sout)
ctx.engine.add_module(wrapper.module)
ptr = ctx.engine.get_pointer_to_function(wrapper)
# Get dtypes
dtypenums = []
for a in signature.args:
if isinstance(a, types.Array):
ty = a.dtype
else:
ty = a
dtypenums.append(np.dtype(ty.name).num)
return dtypenums, ptr
########NEW FILE########
__FILENAME__ = wrappers
from __future__ import print_function, division, absolute_import
from llvm.core import Type, Builder
from numba import types, cgutils
def build_ufunc_wrapper(context, func, signature):
"""
Wrap the scalar function with a loop that iterates over the arguments
"""
module = func.module
byte_t = Type.int(8)
byte_ptr_t = Type.pointer(byte_t)
byte_ptr_ptr_t = Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
intp_ptr_t = Type.pointer(intp_t)
fnty = Type.function(Type.void(), [byte_ptr_ptr_t, intp_ptr_t,
intp_ptr_t, byte_ptr_t])
wrapper = module.add_function(fnty, "__ufunc__." + func.name)
arg_args, arg_dims, arg_steps, arg_data = wrapper.args
arg_args.name = "args"
arg_dims.name = "dims"
arg_steps.name = "steps"
arg_data.name = "data"
builder = Builder.new(wrapper.append_basic_block("entry"))
loopcount = builder.load(arg_dims, name="loopcount")
actual_args = context.get_arguments(func)
# Prepare inputs
arrays = []
for i, typ in enumerate(signature.args):
arrays.append(UArrayArg(context, builder, arg_args, arg_steps, i,
context.get_argument_type(typ)))
# Prepare output
out = UArrayArg(context, builder, arg_args, arg_steps, len(actual_args),
context.get_value_type(signature.return_type))
# Loop
with cgutils.for_range(builder, loopcount, intp=intp_t) as ind:
# Load
elems = [ary.load(ind) for ary in arrays]
# Compute
status, retval = context.call_function(builder, func,
signature.return_type,
signature.args, elems)
# Ignoring error status and store result
# Store
if out.byref:
retval = builder.load(retval)
out.store(retval, ind)
builder.ret_void()
return wrapper
class UArrayArg(object):
def __init__(self, context, builder, args, steps, i, argtype):
# Get data
p = builder.gep(args, [context.get_constant(types.intp, i)])
if cgutils.is_struct_ptr(argtype):
self.byref = True
self.data = builder.bitcast(builder.load(p), argtype)
else:
self.byref = False
self.data = builder.bitcast(builder.load(p), Type.pointer(argtype))
# Get step
p = builder.gep(steps, [context.get_constant(types.intp, i)])
self.step = builder.load(p)
self.builder = builder
def load(self, ind):
intp_t = ind.type
addr = self.builder.ptrtoint(self.data, intp_t)
addr_off = self.builder.add(addr, self.builder.mul(self.step, ind))
ptr = self.builder.inttoptr(addr_off, self.data.type)
if self.byref:
return ptr
else:
return self.builder.load(ptr)
def store(self, value, ind):
addr = self.builder.ptrtoint(self.data, ind.type)
addr_off = self.builder.add(addr, self.builder.mul(self.step, ind))
ptr = self.builder.inttoptr(addr_off, self.data.type)
assert ptr.type.pointee == value.type
self.builder.store(value, ptr)
def build_gufunc_wrapper(context, func, signature, sin, sout):
module = func.module
byte_t = Type.int(8)
byte_ptr_t = Type.pointer(byte_t)
byte_ptr_ptr_t = Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
intp_ptr_t = Type.pointer(intp_t)
fnty = Type.function(Type.void(), [byte_ptr_ptr_t, intp_ptr_t,
intp_ptr_t, byte_ptr_t])
wrapper = module.add_function(fnty, "__gufunc__." + func.name)
arg_args, arg_dims, arg_steps, arg_data = wrapper.args
arg_args.name = "args"
arg_dims.name = "dims"
arg_steps.name = "steps"
arg_data.name = "data"
builder = Builder.new(wrapper.append_basic_block("entry"))
loopcount = builder.load(arg_dims, name="loopcount")
# Unpack shapes
unique_syms = set()
for grp in (sin, sout):
for syms in grp:
unique_syms |= set(syms)
sym_map = {}
for grp in (sin, sout):
for syms in sin:
for s in syms:
if s not in sym_map:
sym_map[s] = len(sym_map)
sym_dim = {}
for s, i in sym_map.items():
sym_dim[s] = builder.load(builder.gep(arg_dims,
[context.get_constant(types.intp,
i + 1)]))
# Prepare inputs
arrays = []
step_offset = len(sin) + len(sout)
for i, (typ, sym) in enumerate(zip(signature.args, sin + sout)):
ary = GUArrayArg(context, builder, arg_args, arg_dims, arg_steps, i,
step_offset, typ, sym, sym_dim)
step_offset += ary.ndim
arrays.append(ary)
# Loop
with cgutils.for_range(builder, loopcount, intp=intp_t) as ind:
args = [a.array_value for a in arrays]
status, retval = context.call_function(builder, func,
signature.return_type,
signature.args, args)
# ignore status
# ignore retval
for a in arrays:
a.next(ind)
builder.ret_void()
wrapper.verify()
return wrapper
class GUArrayArg(object):
def __init__(self, context, builder, args, dims, steps, i, step_offset,
typ, syms, sym_dim):
if isinstance(typ, types.Array):
self.dtype = typ.dtype
else:
self.dtype = typ
self.syms = syms
self.ndim = len(syms)
core_step_ptr = builder.gep(steps, [context.get_constant(types.intp,
i)],
name="core.step.ptr")
self.core_step = builder.load(core_step_ptr)
self.strides = []
for j in range(self.ndim):
step = builder.gep(steps, [context.get_constant(types.intp,
step_offset + j)],
name="step.ptr")
self.strides.append(builder.load(step))
self.shape = []
for s in syms:
self.shape.append(sym_dim[s])
data = builder.load(builder.gep(args,
[context.get_constant(types.intp,
i)],
name="data.ptr"),
name="data")
self.data = data
arytyp = types.Array(dtype=self.dtype, ndim=self.ndim, layout="A")
arycls = context.make_array(arytyp)
self.array = arycls(context, builder)
self.array.data = builder.bitcast(self.data, self.array.data.type)
self.array.shape = cgutils.pack_array(builder, self.shape)
self.array.strides = cgutils.pack_array(builder, self.strides)
self.array_value = self.array._getpointer()
self.builder = builder
def next(self, i):
intp_t = i.type
array_data = self.array.data
addr = self.builder.ptrtoint(array_data, intp_t)
addr_new = self.builder.add(addr, self.core_step)
self.array.data = self.builder.inttoptr(addr_new, array_data.type)
########NEW FILE########
__FILENAME__ = numpy_support
from __future__ import print_function, division, absolute_import
import numpy
from numba import types, config
version = tuple(map(int, numpy.__version__.split('.')[:2]))
int_divbyzero_returns_zero = config.PYVERSION <= (3, 0)
FROM_DTYPE = {
numpy.dtype('bool'): types.boolean,
numpy.dtype('int8'): types.int8,
numpy.dtype('int16'): types.int16,
numpy.dtype('int32'): types.int32,
numpy.dtype('int64'): types.int64,
numpy.dtype('uint8'): types.uint8,
numpy.dtype('uint16'): types.uint16,
numpy.dtype('uint32'): types.uint32,
numpy.dtype('uint64'): types.uint64,
numpy.dtype('float32'): types.float32,
numpy.dtype('float64'): types.float64,
numpy.dtype('complex64'): types.complex64,
numpy.dtype('complex128'): types.complex128,
}
def from_dtype(dtype):
return FROM_DTYPE[dtype]
def is_arrayscalar(val):
return numpy.dtype(type(val)) in FROM_DTYPE
def map_arrayscalar_type(val):
return from_dtype(numpy.dtype(type(val)))
def is_array(val):
return isinstance(val, numpy.ndarray)
def map_layout(val):
if val.flags['C_CONTIGUOUS']:
layout = 'C'
elif val.flags['F_CONTIGUOUS']:
layout = 'F'
else:
layout = 'A'
return layout
########NEW FILE########
__FILENAME__ = compiler
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import logging
import os
import sys
import functools
import llvm.core as lc
import llvm.ee as le
import llvm.passes as lp
from numba.utils import IS_PY3
from . import llvm_types as lt
from .decorators import registry as export_registry
from numba.compiler import compile_extra, Flags
from numba.targets.registry import CPUTarget
logger = logging.getLogger(__name__)
__all__ = ['which', 'find_linker', 'find_args', 'find_shared_ending', 'Compiler']
NULL = lc.Constant.null(lt._void_star)
ZERO = lc.Constant.int(lt._int32, 0)
ONE = lc.Constant.int(lt._int32, 1)
METH_VARARGS_AND_KEYWORDS = lc.Constant.int(lt._int32, 1|2)
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, fname)
if is_exe(exe_file):
return exe_file
return None
_configs = {
'win': ("link.exe", ("/dll",), '.dll'),
'dar': ("libtool", ("-dynamic", "-undefined", "dynamic_lookup"), '.so'),
'default': ("ld", ("-shared",), ".so")
}
def get_configs(arg):
return _configs.get(sys.platform[:3], _configs['default'])[arg]
find_linker = functools.partial(get_configs, 0)
find_args = functools.partial(get_configs, 1)
find_shared_ending = functools.partial(get_configs, 2)
def get_header():
import numpy
import textwrap
return textwrap.dedent("""\
#include <stdint.h>
#ifndef HAVE_LONGDOUBLE
#define HAVE_LONGDOUBLE %d
#endif
typedef struct {
float real;
float imag;
} complex64;
typedef struct {
double real;
double imag;
} complex128;
#if HAVE_LONGDOUBLE
typedef struct {
long double real;
long double imag;
} complex256;
#endif
typedef float float32;
typedef double float64;
#if HAVE_LONGDOUBLE
typedef long double float128;
#endif
""" % hasattr(numpy, 'complex256'))
class _Compiler(object):
"""A base class to compile Python modules to a single shared library or
extension module.
:param inputs: input file(s).
:type inputs: iterable
:param module_name: the name of the exported module.
"""
#: Structure used to describe a method of an extension type.
#: struct PyMethodDef {
#: const char *ml_name; /* The name of the built-in function/method */
#: PyCFunction ml_meth; /* The C function that implements it */
#: int ml_flags; /* Combination of METH_xxx flags, which mostly
#: describe the args expected by the C func */
#: const char *ml_doc; /* The __doc__ attribute, or NULL */
#: };
method_def_ty = lc.Type.struct((lt._int8_star,
lt._void_star,
lt._int32,
lt._int8_star))
method_def_ptr = lc.Type.pointer(method_def_ty)
def __init__(self, inputs, module_name='numba_exported'):
self.inputs = inputs
self.module_name = module_name
self.export_python_wrap = False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
del self.exported_signatures[:]
def _emit_python_wrapper(self, llvm_module):
"""Emit generated Python wrapper and extension module code.
"""
raise NotImplementedError
def _cull_exports(self):
"""Read all the exported functions/modules in the translator
environment, and join them into a single LLVM module.
Resets the export environment afterwards.
"""
self.exported_signatures = export_registry
# Create new module containing everything
llvm_module = lc.Module.new(self.module_name)
# Compile all exported functions
typing_ctx = CPUTarget.typing_context
# TODO Use non JIT-ing target
target_ctx = CPUTarget.target_context
modules = []
flags = Flags()
if not self.export_python_wrap:
flags.set("no_compile")
for entry in self.exported_signatures:
cres = compile_extra(typing_ctx, target_ctx, entry.function,
entry.signature.args,
entry.signature.return_type, flags,
locals={})
if self.export_python_wrap:
module = cres.llvm_func.module
cres.llvm_func.linkage = lc.LINKAGE_INTERNAL
wrappername = "wrapper." + cres.llvm_func.name
wrapper = module.get_function_named(wrappername)
wrapper.name = entry.symbol
else:
cres.llvm_func.name = entry.symbol
modules.append(cres.llvm_module)
# Link all exported functions
for mod in modules:
llvm_module.link_in(mod, preserve=self.export_python_wrap)
# Optimize
tm = le.TargetMachine.new(opt=3)
pms = lp.build_pass_managers(tm=tm, opt=3, loop_vectorize=True,
fpm=False)
pms.pm.run(llvm_module)
if self.export_python_wrap:
self._emit_python_wrapper(llvm_module)
print(llvm_module)
return llvm_module
def _process_inputs(self, wrap=False, **kws):
for ifile in self.inputs:
with open(ifile) as fin:
exec(compile(fin.read(), ifile, 'exec'))
self.export_python_wrap = wrap
def write_llvm_bitcode(self, output, **kws):
self._process_inputs(**kws)
lmod = self._cull_exports()
with open(output, 'wb') as fout:
lmod.to_bitcode(fout)
def write_native_object(self, output, **kws):
self._process_inputs(**kws)
lmod = self._cull_exports()
tm = le.TargetMachine.new(opt=3, reloc=le.RELOC_PIC, features='-avx')
with open(output, 'wb') as fout:
objfile = tm.emit_object(lmod)
fout.write(objfile)
def emit_type(self, tyobj):
ret_val = str(tyobj)
if 'int' in ret_val:
if ret_val.endswith(('8', '16', '32', '64')):
ret_val += "_t"
return ret_val
def emit_header(self, output):
fname, ext = os.path.splitext(output)
with open(fname + '.h', 'w') as fout:
fout.write(get_header())
fout.write("\n/* Prototypes */\n")
for export_entry in export_registry:
name = export_entry.symbol
restype = self.emit_type(export_entry.signature.return_type)
args = ", ".join(self.emit_type(argtype)
for argtype in export_entry.signature.args)
fout.write("extern %s %s(%s);\n" % (restype, name, args))
def _emit_method_array(self, llvm_module):
"""Collect exported methods and emit a PyMethodDef array.
:returns: a pointer to the PyMethodDef array.
"""
method_defs = []
for entry in self.exported_signatures:
name = entry.symbol
lfunc = llvm_module.get_function_named(name)
method_name_init = lc.Constant.stringz(name)
method_name = llvm_module.add_global_variable(
method_name_init.type, '.method_name')
method_name.initializer = method_name_init
method_name.linkage = lc.LINKAGE_EXTERNAL
method_def_const = lc.Constant.struct((lc.Constant.gep(method_name, [ZERO, ZERO]),
lc.Constant.bitcast(lfunc, lt._void_star),
METH_VARARGS_AND_KEYWORDS,
NULL))
method_defs.append(method_def_const)
sentinel = lc.Constant.struct([NULL, NULL, ZERO, NULL])
method_defs.append(sentinel)
method_array_init = lc.Constant.array(self.method_def_ty, method_defs)
method_array = llvm_module.add_global_variable(method_array_init.type, '.module_methods')
method_array.initializer = method_array_init
method_array.linkage = lc.LINKAGE_INTERNAL
method_array_ptr = lc.Constant.gep(method_array, [ZERO, ZERO])
return method_array_ptr
class CompilerPy2(_Compiler):
@property
def module_create_definition(self):
"""Return the signature and name of the function to initialize the module.
"""
signature = lc.Type.function(lt._pyobject_head_struct_p,
(lt._int8_star,
self.method_def_ptr,
lt._int8_star,
lt._pyobject_head_struct_p,
lt._int32))
name = "Py_InitModule4"
if lt._trace_refs_:
name += "TraceRefs"
if lt._plat_bits == 64:
name += "_64"
return signature, name
@property
def module_init_definition(self):
"""Return the signature and name of the function to initialize the extension.
"""
return lc.Type.function(lc.Type.void(), ()), "init" + self.module_name
def _emit_python_wrapper(self, llvm_module):
# Define the module initialization function.
mod_init_fn = llvm_module.add_function(*self.module_init_definition)
entry = mod_init_fn.append_basic_block('Entry')
builder = lc.Builder.new(entry)
# Python C API module creation function.
create_module_fn = llvm_module.add_function(*self.module_create_definition)
create_module_fn.linkage = lc.LINKAGE_EXTERNAL
# Define a constant string for the module name.
mod_name_init = lc.Constant.stringz(self.module_name)
mod_name_const = llvm_module.add_global_variable(mod_name_init.type, '.module_name')
mod_name_const.initializer = mod_name_init
mod_name_const.linkage = lc.LINKAGE_INTERNAL
mod = builder.call(create_module_fn,
(lc.Constant.gep(mod_name_const, [ZERO, ZERO]),
self._emit_method_array(llvm_module),
NULL,
lc.Constant.null(lt._pyobject_head_struct_p),
lc.Constant.int(lt._int32, sys.api_version)))
builder.ret_void()
class CompilerPy3(_Compiler):
_ptr_fun = lambda ret, *args: lc.Type.pointer(lc.Type.function(ret, args))
#: typedef int (*visitproc)(PyObject *, void *);
visitproc_ty = _ptr_fun(lt._int8,
lt._pyobject_head_struct_p)
#: typedef int (*inquiry)(PyObject *);
inquiry_ty = _ptr_fun(lt._int8,
lt._pyobject_head_struct_p)
#: typedef int (*traverseproc)(PyObject *, visitproc, void *);
traverseproc_ty = _ptr_fun(lt._int8,
lt._pyobject_head_struct_p,
visitproc_ty,
lt._void_star)
# typedef void (*freefunc)(void *)
freefunc_ty = _ptr_fun(lt._int8,
lt._void_star)
# PyObject* (*m_init)(void);
m_init_ty = _ptr_fun(lt._int8)
_char_star = lt._int8_star
#: typedef struct PyModuleDef_Base {
#: PyObject_HEAD
#: PyObject* (*m_init)(void);
#: Py_ssize_t m_index;
#: PyObject* m_copy;
#: } PyModuleDef_Base;
module_def_base_ty = lc.Type.struct((lt._pyobject_head_struct_p,
lt._void_star,
m_init_ty,
lt._llvm_py_ssize_t,
lt._pyobject_head_struct_p))
#: This struct holds all information that is needed to create a module object.
#: typedef struct PyModuleDef{
#: PyModuleDef_Base m_base;
#: const char* m_name;
#: const char* m_doc;
#: Py_ssize_t m_size;
#: PyMethodDef *m_methods;
#: inquiry m_reload;
#: traverseproc m_traverse;
#: inquiry m_clear;
#: freefunc m_free;
#: }PyModuleDef;
module_def_ty = lc.Type.struct((module_def_base_ty,
_char_star,
_char_star,
lt._llvm_py_ssize_t,
_Compiler.method_def_ptr,
inquiry_ty,
traverseproc_ty,
inquiry_ty,
freefunc_ty))
@property
def module_create_definition(self):
"""Return the signature and name of the function to initialize the module
"""
signature = lc.Type.function(lt._pyobject_head_struct_p,
(lc.Type.pointer(self.module_def_ty),
lt._int32))
name = "PyModule_Create2"
return signature, name
@property
def module_init_definition(self):
"""Return the name and signature of the module
"""
signature = lc.Type.function(lt._pyobject_head_struct_p, ())
return signature, "PyInit_" + self.module_name
def _emit_python_wrapper(self, llvm_module):
# Figure out the Python C API module creation function, and
# get a LLVM function for it.
create_module_fn = llvm_module.add_function(*self.module_create_definition)
create_module_fn.linkage = lc.LINKAGE_EXTERNAL
# Define a constant string for the module name.
mod_name_init = lc.Constant.stringz(self.module_name)
mod_name_const = llvm_module.add_global_variable(mod_name_init.type, '.module_name')
mod_name_const.initializer = mod_name_init
mod_name_const.linkage = lc.LINKAGE_INTERNAL
mod_def_base_init = lc.Constant.struct(
(lc.Constant.null(lt._pyobject_head_struct_p), # PyObject_HEAD
lc.Constant.null(lt._void_star), # PyObject_HEAD
lc.Constant.null(self.m_init_ty), # m_init
lc.Constant.null(lt._llvm_py_ssize_t), # m_index
lc.Constant.null(lt._pyobject_head_struct_p), # m_copy
)
)
mod_def_base = llvm_module.add_global_variable(mod_def_base_init.type, '.module_def_base')
mod_def_base.initializer = mod_def_base_init
mod_def_base.linkage = lc.LINKAGE_INTERNAL
mod_def_init = lc.Constant.struct(
(mod_def_base_init, # m_base
lc.Constant.gep(mod_name_const, [ZERO, ZERO]), # m_name
lc.Constant.null(self._char_star), # m_doc
lc.Constant.int(lt._llvm_py_ssize_t, -1), # m_size
self._emit_method_array(llvm_module), # m_methods
lc.Constant.null(self.inquiry_ty), # m_reload
lc.Constant.null(self.traverseproc_ty), # m_traverse
lc.Constant.null(self.inquiry_ty), # m_clear
lc.Constant.null(self.freefunc_ty) # m_free
)
)
# Define a constant string for the module name.
mod_def = llvm_module.add_global_variable(mod_def_init.type, '.module_def')
mod_def.initializer = mod_def_init
mod_def.linkage = lc.LINKAGE_INTERNAL
# Define the module initialization function.
mod_init_fn = llvm_module.add_function(*self.module_init_definition)
entry = mod_init_fn.append_basic_block('Entry')
builder = lc.Builder.new(entry)
mod = builder.call(create_module_fn,
(mod_def,
lc.Constant.int(lt._int32, sys.api_version)))
# Test if module has been created correctly
cond_true = mod_init_fn.append_basic_block("cond_true")
cond_false = mod_init_fn.append_basic_block("cond_false")
builder.cbranch(builder.icmp(lc.IPRED_EQ, mod, NULL), cond_true, cond_false)
builder.position_at_end(cond_true)
builder.ret(NULL)
builder.position_at_end(cond_false)
builder.ret(mod)
Compiler = CompilerPy3 if IS_PY3 else CompilerPy2
########NEW FILE########
__FILENAME__ = decorators
from __future__ import print_function, absolute_import
import re
import inspect
from numba import sigutils
# Registry is okay to be a global because we are using pycc as a standalone
# commandline tool.
registry = []
def export(prototype):
sym, sig = parse_prototype(prototype)
def wrappped(func):
module = inspect.getmodule(func).__name__
signature = sigutils.parse_signature(sig)
entry = ExportEntry(symbol=sym, signature=signature, function=func,
module=module)
registry.append(entry)
return wrappped
def exportmany(prototypes):
def wrapped(func):
for proto in prototypes:
export(proto)(func)
return wrapped
# --------------------------------- Internal ---------------------------------
re_symbol = re.compile(r'[_a-z][_a-z0-9]*', re.I)
class ExportEntry(object):
"""
A simple record for exporting symbols.
"""
def __init__(self, symbol, signature, function, module):
self.symbol = symbol
self.signature = signature
self.function = function
self.module = module
def __repr__(self):
return "ExportEntry('%s', '%s')" % (self.symbol, self.signature)
def parse_prototype(text):
"""Separate the symbol and function-type in a a string with
"symbol function-type" (e.g. "mult float(float, float)")
Returns
---------
(symbol_string, functype_string)
"""
m = re_symbol.match(text)
if not m:
raise ValueError("Invalid function name for export prototype")
s = m.start(0)
e = m.end(0)
symbol = text[s:e]
functype = text[e + 1:]
return symbol, functype
########NEW FILE########
__FILENAME__ = llvm_types
import sys
import ctypes
import struct as struct_
from llvm.core import Type
_trace_refs_ = hasattr(sys, 'getobjects')
_plat_bits = struct_.calcsize('@P') * 8
_int8 = Type.int(8)
_int32 = Type.int(32)
_void_star = Type.pointer(_int8)
_int8_star = _void_star
_pyobject_head_struct_p = _void_star
_sizeof_py_ssize_t = ctypes.sizeof(getattr(ctypes, 'c_size_t'))
_llvm_py_ssize_t = Type.int(_sizeof_py_ssize_t * 8)
########NEW FILE########
__FILENAME__ = pythonapi
from __future__ import print_function, division, absolute_import
from llvm.core import Type, Constant
import llvm.core as lc
import llvm.ee as le
from llvm import LLVMException
from numba.config import PYVERSION
import numba.ctypes_support as ctypes
from numba import types, utils, cgutils, _numpyadapt, _helperlib, assume
_PyNone = ctypes.c_ssize_t(id(None))
class NativeError(RuntimeError):
pass
@utils.runonce
def fix_python_api():
"""
Execute once to install special symbols into the LLVM symbol table
"""
le.dylib_add_symbol("Py_None", ctypes.addressof(_PyNone))
le.dylib_add_symbol("NumbaArrayAdaptor", _numpyadapt.get_ndarray_adaptor())
le.dylib_add_symbol("NumbaComplexAdaptor",
_helperlib.get_complex_adaptor())
le.dylib_add_symbol("NumbaNativeError", id(NativeError))
le.dylib_add_symbol("PyExc_NameError", id(NameError))
class PythonAPI(object):
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
fix_python_api()
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulonglong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.longlong = self.ulonglong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
# ------ Python API -----
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def dict_getitem_string(self, dic, name):
"""Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
return self.builder.call(fn, (exctype, msg))
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args, kws):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def long_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyLong_FromLong")
return self.builder.call(fn, [ival])
def long_from_ssize_t(self, ival):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyLong_FromSsize_t")
return self.builder.call(fn, [ival])
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
return self.builder.call(fn, [numobj])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_from_ulonglong(self, numobj):
fnty = Type.function(self.pyobj, [self.ulonglong])
fn = self._get_function(fnty, name="PyLong_FromUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_from_longlong(self, numobj):
fnty = Type.function(self.pyobj, [self.ulonglong])
fn = self._get_function(fnty, name="PyLong_FromLongLong")
return self.builder.call(fn, [numobj])
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def number_add(self, lhs, rhs):
fn = self._get_number_operator("Add")
return self.builder.call(fn, [lhs, rhs])
def number_subtract(self, lhs, rhs):
fn = self._get_number_operator("Subtract")
return self.builder.call(fn, [lhs, rhs])
def number_multiply(self, lhs, rhs):
fn = self._get_number_operator("Multiply")
return self.builder.call(fn, [lhs, rhs])
def number_divide(self, lhs, rhs):
assert PYVERSION < (3, 0)
fn = self._get_number_operator("Divide")
return self.builder.call(fn, [lhs, rhs])
def number_truedivide(self, lhs, rhs):
fn = self._get_number_operator("TrueDivide")
return self.builder.call(fn, [lhs, rhs])
def number_floordivide(self, lhs, rhs):
fn = self._get_number_operator("FloorDivide")
return self.builder.call(fn, [lhs, rhs])
def number_remainder(self, lhs, rhs):
fn = self._get_number_operator("Remainder")
return self.builder.call(fn, [lhs, rhs])
def number_lshift(self, lhs, rhs):
fn = self._get_number_operator("Lshift")
return self.builder.call(fn, [lhs, rhs])
def number_rshift(self, lhs, rhs):
fn = self._get_number_operator("Rshift")
return self.builder.call(fn, [lhs, rhs])
def number_and(self, lhs, rhs):
fn = self._get_number_operator("And")
return self.builder.call(fn, [lhs, rhs])
def number_or(self, lhs, rhs):
fn = self._get_number_operator("Or")
return self.builder.call(fn, [lhs, rhs])
def number_xor(self, lhs, rhs):
fn = self._get_number_operator("Xor")
return self.builder.call(fn, [lhs, rhs])
def number_power(self, lhs, rhs):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, "PyNumber_Power")
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
opid = ops.index(opstr)
assert 0 <= opid < len(ops)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getitem(self, obj, key):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
if PYVERSION >= (3, 0):
fname = "PyUnicode_AsUTF8"
else:
fname = "PyString_AsString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_from_string_and_size(self, string):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
if PYVERSION >= (3, 0):
fname = "PyUnicode_FromStringAndSize"
else:
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.builder.call(fn, [cstr, sz])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_setitem(self, seq, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [seq, idx, val])
def dict_new(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def make_none(self):
obj = self._get_object("Py_None")
self.incref(obj)
return obj
def borrow_none(self):
obj = self._get_object("Py_None")
return obj
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_WriteStdout")
return self.builder.call(fn, (fmt,) + args)
# ------ utils -----
def _get_object(self, name):
try:
gv = self.module.get_global_variable_named(name)
except LLVMException:
gv = self.module.add_global_variable(self.pyobj, name)
return self.builder.load(gv)
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s\n")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
not_null = cgutils.is_not_null(self.builder, seq)
with cgutils.if_likely(self.builder, not_null):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
not_null = cgutils.is_not_null(self.builder, dictobj)
with cgutils.if_likely(self.builder, not_null):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
def to_native_arg(self, obj, typ):
return self.to_native_value(obj, typ)
def to_native_value(self, obj, typ):
if isinstance(typ, types.Object) or typ == types.pyobject:
return obj
elif typ == types.boolean:
istrue = self.object_istrue(obj)
zero = Constant.null(istrue.type)
return self.builder.icmp(lc.ICMP_NE, istrue, zero)
elif typ in types.unsigned_domain:
longobj = self.number_long(obj)
ullval = self.long_as_ulonglong(longobj)
self.decref(longobj)
return self.builder.trunc(ullval,
self.context.get_argument_type(typ))
elif typ in types.signed_domain:
longobj = self.number_long(obj)
llval = self.long_as_longlong(longobj)
self.decref(longobj)
return self.builder.trunc(llval,
self.context.get_argument_type(typ))
elif typ == types.float32:
fobj = self.number_float(obj)
fval = self.float_as_double(fobj)
self.decref(fobj)
return self.builder.fptrunc(fval,
self.context.get_argument_type(typ))
elif typ == types.float64:
fobj = self.number_float(obj)
fval = self.float_as_double(fobj)
self.decref(fobj)
return fval
elif typ in (types.complex128, types.complex64):
cplxcls = self.context.make_complex(types.complex128)
cplx = cplxcls(self.context, self.builder)
pcplx = cplx._getpointer()
ok = self.complex_adaptor(obj, pcplx)
failed = cgutils.is_false(self.builder, ok)
with cgutils.if_unlikely(self.builder, failed):
self.builder.ret(self.get_null_object())
if typ == types.complex64:
c64cls = self.context.make_complex(typ)
c64 = c64cls(self.context, self.builder)
freal = self.context.cast(self.builder, cplx.real,
types.float64, types.float32)
fimag = self.context.cast(self.builder, cplx.imag,
types.float64, types.float32)
c64.real = freal
c64.imag = fimag
return c64._getvalue()
else:
return cplx._getvalue()
elif isinstance(typ, types.Array):
return self.to_native_array(typ, obj)
raise NotImplementedError(typ)
def from_native_return(self, val, typ):
return self.from_native_value(val, typ)
def from_native_value(self, val, typ):
if typ == types.pyobject:
return val
elif typ == types.boolean:
longval = self.builder.zext(val, self.long)
return self.bool_from_long(longval)
elif typ in types.unsigned_domain:
ullval = self.builder.zext(val, self.ulonglong)
return self.long_from_ulonglong(ullval)
elif typ in types.signed_domain:
ival = self.builder.sext(val, self.longlong)
return self.long_from_longlong(ival)
elif typ == types.float32:
dbval = self.builder.fpext(val, self.double)
return self.float_from_double(dbval)
elif typ == types.float64:
return self.float_from_double(val)
elif typ == types.complex128:
cmplxcls = self.context.make_complex(typ)
cval = cmplxcls(self.context, self.builder, value=val)
return self.complex_from_doubles(cval.real, cval.imag)
elif typ == types.complex64:
cmplxcls = self.context.make_complex(typ)
cval = cmplxcls(self.context, self.builder, value=val)
freal = self.context.cast(self.builder, cval.real,
types.float32, types.float64)
fimag = self.context.cast(self.builder, cval.imag,
types.float32, types.float64)
return self.complex_from_doubles(freal, fimag)
elif typ == types.none:
ret = self.make_none()
return ret
elif isinstance(typ, types.Optional):
return self.from_native_return(val, typ.type)
elif isinstance(typ, types.Array):
return self.from_native_array(typ, val)
elif isinstance(typ, types.UniTuple):
return self.from_unituple(typ, val)
raise NotImplementedError(typ)
def to_native_array(self, typ, ary):
# TODO check matching dtype.
# currently, mismatching dtype will still work and causes
# potential memory corruption
voidptr = Type.pointer(Type.int(8))
nativearycls = self.context.make_array(typ)
nativeary = nativearycls(self.context, self.builder)
aryptr = nativeary._getpointer()
ptr = self.builder.bitcast(aryptr, voidptr)
errcode = self.numba_array_adaptor(ary, ptr)
failed = cgutils.is_not_null(self.builder, errcode)
with cgutils.if_unlikely(self.builder, failed):
# TODO
self.builder.unreachable()
return self.builder.load(aryptr)
def from_native_array(self, typ, ary):
assert assume.return_argument_array_only
nativearycls = self.context.make_array(typ)
nativeary = nativearycls(self.context, self.builder, value=ary)
parent = nativeary.parent
self.incref(parent)
return parent
def from_unituple(self, typ, val):
tuple_val = self.tuple_new(typ.count)
for i in range(typ.count):
item = self.builder.extract_value(val, i)
obj = self.from_native_value(item, typ.dtype)
self.tuple_setitem(tuple_val, i, obj)
return tuple_val
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
def numba_array_adaptor(self, ary, ptr):
voidptr = Type.pointer(Type.int(8))
fnty = Type.function(Type.int(), [self.pyobj, voidptr])
fn = self._get_function(fnty, name="NumbaArrayAdaptor")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="NumbaComplexAdaptor")
return self.builder.call(fn, [cobj, cmplx])
def get_module_dict_symbol(self):
md_pymod = cgutils.MetadataKeyStore(self.module, "python.module")
pymodname = ".pymodule.dict." + md_pymod.get()
try:
gv = self.module.get_global_variable_named(name=pymodname)
except LLVMException:
gv = self.module.add_global_variable(self.pyobj.pointee,
name=pymodname)
return gv
def get_module_dict(self):
return self.get_module_dict_symbol()
# return self.builder.load(gv)
def raise_native_error(self, msg):
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string(self.native_error_type, cstr)
@property
def native_error_type(self):
name = "NumbaNativeError"
try:
return self.module.get_global_variable_named(name)
except LLVMException:
return self.module.add_global_variable(self.pyobj.pointee,
name=name)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string(self.name_error_type, cstr)
@property
def name_error_type(self):
name = "PyExc_NameError"
try:
return self.module.get_global_variable_named(name)
except LLVMException:
return self.module.add_global_variable(self.pyobj.pointee,
name=name)
########NEW FILE########
__FILENAME__ = service
"""
Implement background services for the application.
This is implemented as a cooperative concurrent task.
"""
from __future__ import absolute_import, print_function, division
import functools
from numba.utils import iter_next
class Service(object):
def __init__(self, name="unnamed", arg=None):
self.name = name
self.enabled = True
self.arg = arg
self._task = self.process(self.arg)
iter_next(self._task)
def service(self):
"""
Request for the service task.
Servicing is disabled if it is disabled thourght the "enabled"
attribute. When the task is executing, the service is disabled to
avoid recursion.
"""
if self.enabled:
enable = self.enabled
try:
# Prevent recursion
self.enabled = False
iter_next(self._task)
finally:
self.enabled = enable
def process(self, arg):
"""
Overrided to implement the service task.
This must be a generator.
Use `yield` to return control.
"""
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.service()
def after(self, fn):
"""
A decorator for a function. Service is triggered on return.
"""
@functools.wraps(fn)
def wrap(*args, **kws):
with self:
return fn(*args, **kws)
return wrap
# -----------------------------------------------------------------------------
# The rest are for testing
class HelloService(Service):
def process(self, arg):
count = 0
yield
while True:
print("Hello", count)
count += 1
yield
def test():
serv = HelloService("my.hello")
print("1")
serv.service()
print("2")
serv.service()
with serv:
print("3")
@serv.after
def nested():
print("4")
nested()
if __name__ == '__main__':
test()
########NEW FILE########
__FILENAME__ = threadlocal
"""
Implements:
- Threadlocal stack
"""
from __future__ import print_function, absolute_import, division
import threading
class TLStack(object):
def __init__(self):
self.local = threading.local()
@property
def stack(self):
try:
# Retrieve thread local stack
return self.local.stack
except AttributeError:
# Initialize stack for the thread
self.local.stack = []
def push(self, item):
self.stack.append(item)
def pop(self):
return self.stack.pop()
@property
def top(self):
return self.stack[-1]
@property
def is_empty(self):
return not self.stack
def __bool__(self):
return not self.is_empty
def __nonzero__(self):
return self.__bool__()
def clear(self):
self.__init__()
########NEW FILE########
__FILENAME__ = sigutils
from __future__ import print_function, division, absolute_import
from numba import types
def is_signature(sig):
return isinstance(sig, (str, tuple, types.Prototype))
def normalize_signature(sig):
if isinstance(sig, str):
return normalize_signature(parse_signature(sig))
elif isinstance(sig, tuple):
return sig, None
elif isinstance(sig, types.Prototype):
return sig.args, sig.return_type
else:
raise TypeError(type(sig))
def parse_signature(signature_str):
# Just eval signature_str using the types submodules as globals
return eval(signature_str, {}, types.__dict__)
########NEW FILE########
__FILENAME__ = special
from __future__ import print_function, division, absolute_import
__all__ = [ 'typeof' ]
def typeof(val):
"""
Get the type of a variable or value.
Used outside of Numba code, infers the type for the object.
"""
from .dispatcher import typeof_pyval
return typeof_pyval(val)
########NEW FILE########
__FILENAME__ = base
from __future__ import print_function
from collections import namedtuple, defaultdict
import llvm.core as lc
from llvm.core import Type, Constant
import numpy
from numba import types, utils, cgutils, typing
from numba.pythonapi import PythonAPI
from numba.targets.imputils import (user_function, python_attr_impl,
builtin_registry)
from numba.targets import builtins
LTYPEMAP = {
types.pyobject: Type.pointer(Type.int(8)),
types.boolean: Type.int(8),
types.uint8: Type.int(8),
types.uint16: Type.int(16),
types.uint32: Type.int(32),
types.uint64: Type.int(64),
types.int8: Type.int(8),
types.int16: Type.int(16),
types.int32: Type.int(32),
types.int64: Type.int(64),
types.float32: Type.float(),
types.float64: Type.double(),
}
STRUCT_TYPES = {
types.complex64: builtins.Complex64,
types.complex128: builtins.Complex128,
types.range_state32_type: builtins.RangeState32,
types.range_iter32_type: builtins.RangeIter32,
types.range_state64_type: builtins.RangeState64,
types.range_iter64_type: builtins.RangeIter64,
types.slice3_type: builtins.Slice,
}
Status = namedtuple("Status", ("code", "ok", "err", "exc", "none"))
RETCODE_OK = Constant.int_signextend(Type.int(), 0)
RETCODE_NONE = Constant.int_signextend(Type.int(), -2)
RETCODE_EXC = Constant.int_signextend(Type.int(), -1)
class Overloads(object):
def __init__(self):
self.versions = []
def find(self, sig):
for ver in self.versions:
if ver.signature == sig:
return ver
# As generic type
if (len(ver.signature.args) == len(sig.args) or
(ver.signature.args and
ver.signature.args[-1] == types.VarArg)):
match = True
for formal, actual in zip(ver.signature.args, sig.args):
if formal == types.VarArg:
# vararg argument matches everything
break
match = self._match(formal, actual)
if not match:
break
if match:
return ver
raise NotImplementedError(self, sig)
@staticmethod
def _match(formal, actual):
if formal == actual:
# formal argument matches actual arguments
return True
elif types.Any == formal:
# formal argument is any
return True
elif (isinstance(formal, types.Kind) and
isinstance(actual, formal.of)):
# formal argument is a kind and the actual argument
# is of that kind
return True
def append(self, impl):
self.versions.append(impl)
class BaseContext(object):
"""
Notes on Structure
------------------
Most objects are lowered as plain-old-data structure in the generated
llvm. They are passed around by reference (a pointer to the structure).
Only POD structure can life across function boundaries by copying the
data.
"""
# Use default mangler (no specific requirement)
mangler = None
# Force powi implementation as math.pow call
implement_powi_as_math_call = False
implement_pow_as_math_call = False
def __init__(self, typing_context):
self.address_size = tuple.__itemsize__ * 8
self.typing_context = typing_context
self.defns = defaultdict(Overloads)
self.attrs = utils.UniqueDict()
self.users = utils.UniqueDict()
self.insert_func_defn(builtin_registry.functions)
self.insert_attr_defn(builtin_registry.attributes)
# Initialize
self.init()
def init(self):
"""
For subclasses to add initializer
"""
pass
def localized(self):
"""
Returns a localized context that contains extra environment information
"""
return ContextProxy(self)
def insert_func_defn(self, defns):
for defn in defns:
self.defns[defn.key].append(defn)
def insert_attr_defn(self, defns):
for attr in defns:
self.attrs[attr.key] = attr
def insert_user_function(self, func, fndesc, libs=()):
imp = user_function(func, fndesc, libs)
self.defns[func].append(imp)
baseclses = (typing.templates.ConcreteTemplate,)
glbls = dict(key=func, cases=[imp.signature])
name = "CallTemplate(%s)" % fndesc.mangled_name
self.users[func] = type(name, baseclses, glbls)
def insert_class(self, cls, attrs):
clsty = types.Object(cls)
for name, vtype in utils.dict_iteritems(attrs):
imp = python_attr_impl(clsty, name, vtype)
self.attrs[imp.key] = imp
def get_user_function(self, func):
return self.users[func]
def get_function_type(self, fndesc):
"""
Calling Convention
------------------
Returns: -2 for return none in native function;
-1 for failure with python exception set;
0 for success;
>0 for user error code.
Return value is passed by reference as the first argument.
It MUST NOT be used if the function is in nopython mode.
Actual arguments starts at the 2nd argument position.
Caller is responsible to allocate space for return value.
"""
argtypes = [self.get_argument_type(aty)
for aty in fndesc.argtypes]
resptr = self.get_return_type(fndesc.restype)
fnty = Type.function(Type.int(), [resptr] + argtypes)
return fnty
def declare_function(self, module, fndesc):
fnty = self.get_function_type(fndesc)
fn = module.get_or_insert_function(fnty, name=fndesc.mangled_name)
assert fn.is_declaration
for ak, av in zip(fndesc.args, self.get_arguments(fn)):
av.name = "arg.%s" % ak
fn.args[0] = ".ret"
return fn
def insert_const_string(self, mod, string):
stringtype = Type.pointer(Type.int(8))
text = Constant.stringz(string)
name = ".const.%s" % string
for gv in mod.global_variables:
if gv.name == name and gv.type.pointee == text.type:
break
else:
gv = mod.add_global_variable(text.type, name=name)
gv.global_constant = True
gv.initializer = text
gv.linkage = lc.LINKAGE_INTERNAL
return Constant.bitcast(gv, stringtype)
def get_arguments(self, func):
return func.args[1:]
def get_argument_type(self, ty):
if ty is types.boolean:
return self.get_data_type(ty)
elif self.is_struct_type(ty):
return Type.pointer(self.get_data_type(ty))
else:
return self.get_value_type(ty)
def get_return_type(self, ty):
if self.is_struct_type(ty):
return self.get_argument_type(ty)
else:
argty = self.get_argument_type(ty)
return Type.pointer(argty)
def get_data_type(self, ty):
"""
Get a data representation of the type
Returns None if it is an opaque pointer
"""
if (isinstance(ty, types.Dummy) or
isinstance(ty, types.Module) or
isinstance(ty, types.Function) or
isinstance(ty, types.Dispatcher) or
isinstance(ty, types.Object) or
isinstance(ty, types.Macro)):
return Type.pointer(Type.int(8))
elif isinstance(ty, types.CPointer):
dty = self.get_data_type(ty.dtype)
return Type.pointer(dty)
elif isinstance(ty, types.Optional):
return self.get_data_type(ty.type)
elif isinstance(ty, types.Array):
return self.get_struct_type(self.make_array(ty))
elif isinstance(ty, types.UniTuple):
dty = self.get_value_type(ty.dtype)
return Type.array(dty, ty.count)
elif isinstance(ty, types.Tuple):
dtys = [self.get_value_type(t) for t in ty]
return Type.struct(dtys)
elif isinstance(ty, types.UniTupleIter):
stty = self.get_struct_type(self.make_unituple_iter(ty))
return stty
elif ty in STRUCT_TYPES:
return self.get_struct_type(STRUCT_TYPES[ty])
else:
return LTYPEMAP[ty]
def get_value_type(self, ty):
if ty == types.boolean:
return Type.int(1)
dataty = self.get_data_type(ty)
return dataty
def pack_value(self, builder, ty, value, ptr):
"""Pack data for array storage
"""
if ty == types.boolean:
value = cgutils.as_bool_byte(builder, value)
assert value.type == ptr.type.pointee
builder.store(value, ptr)
def unpack_value(self, builder, ty, ptr):
"""Unpack data from array storage
"""
assert cgutils.is_pointer(ptr.type)
value = builder.load(ptr)
if ty == types.boolean:
return builder.trunc(value, Type.int(1))
else:
return value
def is_struct_type(self, ty):
return cgutils.is_struct(self.get_data_type(ty))
def get_constant_struct(self, builder, ty, val):
assert self.is_struct_type(ty)
module = cgutils.get_module(builder)
if ty in types.complex_domain:
if ty == types.complex64:
innertype = types.float32
elif ty == types.complex128:
innertype = types.float64
else:
raise Exception("unreachable")
real = self.get_constant(innertype, val.real)
imag = self.get_constant(innertype, val.imag)
const = Constant.struct([real, imag])
gv = module.add_global_variable(const.type, name=".const")
gv.linkage = lc.LINKAGE_INTERNAL
gv.initializer = const
gv.global_constant = True
return builder.load(gv)
else:
raise NotImplementedError(ty)
def get_constant(self, ty, val):
assert not self.is_struct_type(ty)
lty = self.get_value_type(ty)
if ty == types.none:
assert val is None
return self.get_dummy_value()
elif ty == types.boolean:
return Constant.int(Type.int(1), int(val))
elif ty in types.signed_domain:
return Constant.int_signextend(lty, val)
elif ty in types.real_domain:
return Constant.real(lty, val)
elif isinstance(ty, types.UniTuple):
consts = [self.get_constant(ty.dtype, v) for v in val]
return Constant.array(consts[0].type, consts)
raise NotImplementedError(ty)
def get_constant_undef(self, ty):
lty = self.get_value_type(ty)
return Constant.undef(lty)
def get_constant_null(self, ty):
lty = self.get_value_type(ty)
return Constant.null(lty)
def get_function(self, fn, sig):
if isinstance(fn, types.Function):
key = fn.template.key
overloads = self.defns[key]
elif isinstance(fn, types.Dispatcher):
key = fn.overloaded.get_overload(sig.args)
overloads = self.defns[key]
else:
key = fn
overloads = self.defns[key]
try:
return _wrap_impl(overloads.find(sig), self, sig)
except NotImplementedError:
raise Exception("No definition for lowering %s%s" % (key, sig))
def get_attribute(self, val, typ, attr):
key = typ, attr
try:
return self.attrs[key]
except KeyError:
if isinstance(typ, types.Module):
return
elif typ.is_parametric:
key = type(typ), attr
return self.attrs[key]
else:
raise
def get_argument_value(self, builder, ty, val):
"""
Argument representation to local value representation
"""
if ty == types.boolean:
return builder.trunc(val, self.get_value_type(ty))
elif self.is_struct_type(ty):
return builder.load(val)
return val
def get_return_value(self, builder, ty, val):
"""
Local value representation to return type representation
"""
if ty is types.boolean:
r = self.get_return_type(ty).pointee
return builder.zext(val, r)
else:
return val
def get_value_as_argument(self, builder, ty, val):
"""Prepare local value representation as argument type representation
"""
argty = self.get_argument_type(ty)
if argty == val.type:
return val
elif self.is_struct_type(ty):
# Arguments are passed by pointer
assert argty.pointee == val.type
tmp = cgutils.alloca_once(builder, val.type)
builder.store(val, tmp)
return tmp
elif ty == types.boolean:
return builder.zext(val, argty)
raise NotImplementedError("value %s -> arg %s" % (val.type, argty))
def return_value(self, builder, retval):
fn = cgutils.get_function(builder)
retptr = fn.args[0]
assert retval.type == retptr.type.pointee, \
(str(retval.type), str(retptr.type.pointee))
builder.store(retval, retptr)
builder.ret(RETCODE_OK)
def return_native_none(self, builder):
builder.ret(RETCODE_NONE)
def return_errcode(self, builder, code):
assert code > 0
builder.ret(Constant.int(Type.int(), code))
def return_errcode_propagate(self, builder, code):
builder.ret(code)
def return_exc(self, builder):
builder.ret(RETCODE_EXC)
def cast(self, builder, val, fromty, toty):
if fromty == toty or toty == types.Any or isinstance(toty, types.Kind):
return val
elif ((fromty in types.unsigned_domain and
toty in types.signed_domain) or
(fromty in types.integer_domain and
toty in types.unsigned_domain)):
lfrom = self.get_value_type(fromty)
lto = self.get_value_type(toty)
if lfrom.width <= lto.width:
return builder.zext(val, lto)
elif lfrom.width > lto.width:
return builder.trunc(val, lto)
elif fromty in types.signed_domain and toty in types.signed_domain:
lfrom = self.get_value_type(fromty)
lto = self.get_value_type(toty)
if lfrom.width <= lto.width:
return builder.sext(val, lto)
elif lfrom.width > lto.width:
return builder.trunc(val, lto)
elif fromty in types.real_domain and toty in types.real_domain:
lty = self.get_value_type(toty)
if fromty == types.float32 and toty == types.float64:
return builder.fpext(val, lty)
elif fromty == types.float64 and toty == types.float32:
return builder.fptrunc(val, lty)
elif fromty in types.real_domain and toty in types.complex_domain:
if fromty == types.float32:
if toty == types.complex128:
real = self.cast(builder, val, fromty, types.float64)
else:
real = val
elif fromty == types.float64:
if toty == types.complex64:
real = self.cast(builder, val, fromty, types.float32)
else:
real = val
if toty == types.complex128:
imag = self.get_constant(types.float64, 0)
elif toty == types.complex64:
imag = self.get_constant(types.float32, 0)
else:
raise Exception("unreachable")
cmplx = self.make_complex(toty)(self, builder)
cmplx.real = real
cmplx.imag = imag
return cmplx._getvalue()
elif fromty in types.integer_domain and toty in types.real_domain:
lty = self.get_value_type(toty)
if fromty in types.signed_domain:
return builder.sitofp(val, lty)
else:
return builder.uitofp(val, lty)
elif toty in types.integer_domain and fromty in types.real_domain:
lty = self.get_value_type(toty)
if toty in types.signed_domain:
return builder.fptosi(val, lty)
else:
return builder.fptoui(val, lty)
elif fromty in types.integer_domain and toty in types.complex_domain:
cmplxcls, flty = builtins.get_complex_info(toty)
cmpl = cmplxcls(self, builder)
cmpl.real = self.cast(builder, val, fromty, flty)
cmpl.imag = self.get_constant(flty, 0)
return cmpl._getvalue()
elif fromty in types.complex_domain and toty in types.complex_domain:
srccls, srcty = builtins.get_complex_info(fromty)
dstcls, dstty = builtins.get_complex_info(toty)
src = srccls(self, builder, value=val)
dst = dstcls(self, builder)
dst.real = self.cast(builder, src.real, srcty, dstty)
dst.imag = self.cast(builder, src.imag, srcty, dstty)
return dst._getvalue()
elif (isinstance(toty, types.UniTuple) and
isinstance(fromty, types.UniTuple) and
len(fromty) == len(toty)):
olditems = cgutils.unpack_tuple(builder, val, len(fromty))
items = [self.cast(builder, i, fromty.dtype, toty.dtype)
for i in olditems]
tup = self.get_constant_undef(toty)
for idx, val in enumerate(items):
tup = builder.insert_value(tup, val, idx)
return tup
elif toty == types.boolean:
return self.is_true(builder, fromty, val)
elif fromty == types.boolean:
# first promote to int32
asint = builder.zext(val, Type.int())
# then promote to number
return self.cast(builder, asint, types.int32, toty)
raise NotImplementedError("cast", val, fromty, toty)
def is_true(self, builder, typ, val):
if typ in types.integer_domain:
return builder.icmp(lc.ICMP_NE, val, Constant.null(val.type))
elif typ in types.real_domain:
return builder.fcmp(lc.FCMP_ONE, val, Constant.real(val.type, 0))
elif typ in types.complex_domain:
cmplx = self.make_complex(typ)(self, builder, val)
fty = types.float32 if typ == types.complex64 else types.float64
real_istrue = self.is_true(builder, fty, cmplx.real)
imag_istrue = self.is_true(builder, fty, cmplx.imag)
return builder.or_(real_istrue, imag_istrue)
raise NotImplementedError("is_true", val, typ)
def call_function(self, builder, callee, resty, argtys, args):
retty = callee.args[0].type.pointee
retval = cgutils.alloca_once(builder, retty)
args = [self.get_value_as_argument(builder, ty, arg)
for ty, arg in zip(argtys, args)]
realargs = [retval] + list(args)
code = builder.call(callee, realargs)
status = self.get_return_status(builder, code)
return status, builder.load(retval)
def get_return_status(self, builder, code):
norm = builder.icmp(lc.ICMP_EQ, code, RETCODE_OK)
none = builder.icmp(lc.ICMP_EQ, code, RETCODE_NONE)
exc = builder.icmp(lc.ICMP_EQ, code, RETCODE_EXC)
ok = builder.or_(norm, none)
err = builder.not_(ok)
status = Status(code=code, ok=ok, err=err, exc=exc, none=none)
return status
def call_function_pointer(self, builder, funcptr, signature, args):
retty = self.get_value_type(signature.return_type)
fnty = Type.function(retty, [a.type for a in args])
fnptrty = Type.pointer(fnty)
addr = self.get_constant(types.intp, funcptr)
ptr = builder.inttoptr(addr, fnptrty)
return builder.call(ptr, args)
def call_class_method(self, builder, func, signature, args):
api = self.get_python_api(builder)
tys = signature.args
retty = signature.return_type
pyargs = [api.from_native_value(av, at) for av, at in zip(args, tys)]
res = api.call_function_objargs(func, pyargs)
# clean up
api.decref(func)
for obj in pyargs:
api.decref(obj)
with cgutils.ifthen(builder, cgutils.is_null(builder, res)):
self.return_exc(builder)
if retty == types.none:
api.decref(res)
return self.get_dummy_value()
else:
nativeresult = api.to_native_value(res, retty)
api.decref(res)
return nativeresult
def print_string(self, builder, text):
mod = builder.basic_block.function.module
cstring = Type.pointer(Type.int(8))
fnty = Type.function(Type.int(), [cstring])
puts = mod.get_or_insert_function(fnty, "puts")
return builder.call(puts, [text])
def debug_print(self, builder, text):
mod = cgutils.get_module(builder)
cstr = self.insert_const_string(mod, str(text))
self.print_string(builder, cstr)
def get_struct_type(self, struct):
fields = [self.get_data_type(v) for _, v in struct._fields]
return Type.struct(fields)
def get_dummy_value(self):
return Constant.null(self.get_dummy_type())
def get_dummy_type(self):
return Type.pointer(Type.int(8))
def optimize(self, module):
pass
def get_executable(self, func, fndesc):
raise NotImplementedError
def get_python_api(self, builder):
return PythonAPI(self, builder)
def make_array(self, typ):
return builtins.make_array(typ)
def make_complex(self, typ):
cls, _ = builtins.get_complex_info(typ)
return cls
def make_unituple_iter(self, typ):
return builtins.make_unituple_iter(typ)
def make_constant_array(self, builder, typ, ary):
assert typ.layout == 'C' # assumed in typeinfer.py
ary = numpy.ascontiguousarray(ary)
flat = ary.flatten()
# Handle data
if self.is_struct_type(typ.dtype):
# FIXME
raise TypeError("Do not support structure dtype as constant "
"array, yet.")
values = [self.get_constant(typ.dtype, flat[i])
for i in range(flat.size)]
lldtype = values[0].type
consts = Constant.array(lldtype, values)
module = cgutils.get_module(builder)
data = module.add_global_variable(consts.type, name=".const.array"
".data")
data.linkage = lc.LINKAGE_INTERNAL
data.global_constant = True
data.initializer = consts
# Handle shape
llintp = self.get_value_type(types.intp)
shapevals = [self.get_constant(types.intp, s) for s in ary.shape]
cshape = Constant.array(llintp, shapevals)
# Handle strides
stridevals = [self.get_constant(types.intp, s) for s in ary.strides]
cstrides = Constant.array(llintp, stridevals)
# Create array structure
cary = self.make_array(typ)(self, builder)
cary.data = builder.bitcast(data, cary.data.type)
cary.shape = cshape
cary.strides = cstrides
return cary._getvalue()
class _wrap_impl(object):
def __init__(self, imp, context, sig):
self._imp = imp
self._context = context
self._sig = sig
def __call__(self, builder, args):
return self._imp(self._context, builder, self._sig, args)
def __getattr__(self, item):
return getattr(self._imp, item)
def __repr__(self):
return "<wrapped %s>" % self._imp
class ContextProxy(object):
"""
Add localized environment for the context of the compiling unit.
"""
def __init__(self, base):
self.__base = base
self.metadata = utils.UniqueDict()
self.linking = set()
def add_libs(self, libs):
self.linking |= set(libs)
def __getattr__(self, name):
if not name.startswith('_'):
return getattr(self.__base, name)
else:
return super(ContextProxy, self).__getattr__(name)
########NEW FILE########
__FILENAME__ = builtins
from llvm.core import Type, Constant
import llvm.core as lc
import math
from functools import reduce
from numba import types, typing, cgutils, utils
from numba.targets.imputils import (builtin, builtin_attr, implement,
impl_attribute)
#-------------------------------------------------------------------------------
def make_array(ty):
dtype = ty.dtype
nd = ty.ndim
class ArrayTemplate(cgutils.Structure):
_fields = [('data', types.CPointer(dtype)),
('shape', types.UniTuple(types.intp, nd)),
('strides', types.UniTuple(types.intp, nd)),
('parent', types.pyobject), ]
return ArrayTemplate
#-------------------------------------------------------------------------------
def int_add_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
return builder.add(a, b)
def int_sub_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
return builder.sub(a, b)
def int_mul_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
return builder.mul(a, b)
def int_udiv_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
cgutils.guard_zero(context, builder, b)
return builder.udiv(a, b)
def int_divmod(context, builder, x, y):
"""
Reference Objects/intobject.c
xdivy = x / y;
xmody = (long)(x - (unsigned long)xdivy * y);
/* If the signs of x and y differ, and the remainder is non-0,
* C89 doesn't define whether xdivy is now the floor or the
* ceiling of the infinitely precise quotient. We want the floor,
* and we have it iff the remainder's sign matches y's.
*/
if (xmody && ((y ^ xmody) < 0) /* i.e. and signs differ */) {
xmody += y;
--xdivy;
assert(xmody && ((y ^ xmody) >= 0));
}
*p_xdivy = xdivy;
*p_xmody = xmody;
"""
assert x.type == y.type
xdivy = builder.sdiv(x, y)
xmody = builder.srem(x, y) # Intel has divmod instruction
ZERO = Constant.null(y.type)
ONE = Constant.int(y.type, 1)
y_xor_xmody_ltz = builder.icmp(lc.ICMP_SLT, builder.xor(y, xmody), ZERO)
xmody_istrue = builder.icmp(lc.ICMP_NE, xmody, ZERO)
cond = builder.and_(xmody_istrue, y_xor_xmody_ltz)
bb1 = builder.basic_block
with cgutils.ifthen(builder, cond):
xmody_plus_y = builder.add(xmody, y)
xdivy_minus_1 = builder.sub(xdivy, ONE)
bb2 = builder.basic_block
resdiv = builder.phi(y.type)
resdiv.add_incoming(xdivy, bb1)
resdiv.add_incoming(xdivy_minus_1, bb2)
resmod = builder.phi(x.type)
resmod.add_incoming(xmody, bb1)
resmod.add_incoming(xmody_plus_y, bb2)
return resdiv, resmod
def int_sdiv_impl(context, builder, sig, args):
[va, vb] = args
[ta, tb] = sig.args
a = context.cast(builder, va, ta, sig.return_type)
b = context.cast(builder, vb, tb, sig.return_type)
cgutils.guard_zero(context, builder, b)
div, _ = int_divmod(context, builder, a, b)
return div
def int_struediv_impl(context, builder, sig, args):
x, y = args
fx = builder.sitofp(x, Type.double())
fy = builder.sitofp(y, Type.double())
cgutils.guard_zero(context, builder, y)
return builder.fdiv(fx, fy)
def int_utruediv_impl(context, builder, sig, args):
x, y = args
fx = builder.uitofp(x, Type.double())
fy = builder.uitofp(y, Type.double())
cgutils.guard_zero(context, builder, y)
return builder.fdiv(fx, fy)
def int_sfloordiv_impl(context, builder, sig, args):
x, y = args
cgutils.guard_zero(context, builder, y)
return builder.sdiv(x, y)
def int_ufloordiv_impl(context, builder, sig, args):
x, y = args
cgutils.guard_zero(context, builder, y)
return builder.udiv(x, y)
def int_srem_impl(context, builder, sig, args):
x, y = args
cgutils.guard_zero(context, builder, y)
_, rem = int_divmod(context, builder, x, y)
return rem
def int_urem_impl(context, builder, sig, args):
x, y = args
return builder.urem(x, y)
def int_spower_impl(context, builder, sig, args):
module = cgutils.get_module(builder)
x, y = args
if y.type.width > 32:
y = builder.trunc(y, Type.int(32))
elif y.type.width < 32:
y = builder.sext(y, Type.int(32))
if context.implement_powi_as_math_call:
undersig = typing.signature(sig.return_type, sig.args[0], types.int32)
impl = context.get_function(math.pow, undersig)
return impl(builder, (x, y))
else:
powerfn = lc.Function.intrinsic(module, lc.INTR_POWI, [x.type])
return builder.call(powerfn, (x, y))
def int_upower_impl(context, builder, sig, args):
module = cgutils.get_module(builder)
x, y = args
if y.type.width > 32:
y = builder.trunc(y, Type.int(32))
elif y.type.width < 32:
y = builder.zext(y, Type.int(32))
if context.implement_powi_as_math_call:
undersig = typing.signature(sig.return_type, sig.args[0], types.int32)
impl = context.get_function(math.pow, undersig)
return impl(builder, (x, y))
else:
powerfn = lc.Function.intrinsic(module, lc.INTR_POWI, [x.type])
return builder.call(powerfn, (x, y))
def int_power_func_body(context, builder, x, y):
pcounter = builder.alloca(y.type)
presult = builder.alloca(x.type)
result = Constant.int(x.type, 1)
counter = y
builder.store(counter, pcounter)
builder.store(result, presult)
bbcond = cgutils.append_basic_block(builder, ".cond")
bbbody = cgutils.append_basic_block(builder, ".body")
bbexit = cgutils.append_basic_block(builder, ".exit")
del counter
del result
builder.branch(bbcond)
with cgutils.goto_block(builder, bbcond):
counter = builder.load(pcounter)
ONE = Constant.int(counter.type, 1)
ZERO = Constant.null(counter.type)
builder.store(builder.sub(counter, ONE), pcounter)
pred = builder.icmp(lc.ICMP_SGT, counter, ZERO)
builder.cbranch(pred, bbbody, bbexit)
with cgutils.goto_block(builder, bbbody):
result = builder.load(presult)
builder.store(builder.mul(result, x), presult)
builder.branch(bbcond)
builder.position_at_end(bbexit)
return builder.load(presult)
def int_slt_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_SLT, *args)
def int_sle_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_SLE, *args)
def int_sgt_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_SGT, *args)
def int_sge_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_SGE, *args)
def int_ult_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_ULT, *args)
def int_ule_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_ULE, *args)
def int_ugt_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_UGT, *args)
def int_uge_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_UGE, *args)
def int_eq_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_EQ, *args)
def int_ne_impl(context, builder, sig, args):
return builder.icmp(lc.ICMP_NE, *args)
def int_abs_impl(context, builder, sig, args):
[x] = args
ZERO = Constant.null(x.type)
ltz = builder.icmp(lc.ICMP_SLT, x, ZERO)
negated = builder.neg(x)
return builder.select(ltz, negated, x)
def uint_abs_impl(context, builder, sig, args):
[x] = args
return x
def int_print_impl(context, builder, sig, args):
[x] = args
py = context.get_python_api(builder)
szval = context.cast(builder, x, sig.args[0], types.intp)
intobj = py.long_from_ssize_t(szval)
py.print_object(intobj)
py.decref(intobj)
return context.get_dummy_value()
def int_shl_impl(context, builder, sig, args):
[valty, amtty] = sig.args
[val, amt] = args
val = context.cast(builder, val, valty, sig.return_type)
amt = context.cast(builder, amt, amtty, sig.return_type)
return builder.shl(val, amt)
def int_lshr_impl(context, builder, sig, args):
[valty, amtty] = sig.args
[val, amt] = args
val = context.cast(builder, val, valty, sig.return_type)
amt = context.cast(builder, amt, amtty, sig.return_type)
return builder.lshr(val, amt)
def int_ashr_impl(context, builder, sig, args):
[valty, amtty] = sig.args
[val, amt] = args
val = context.cast(builder, val, valty, sig.return_type)
amt = context.cast(builder, amt, amtty, sig.return_type)
return builder.ashr(val, amt)
def int_and_impl(context, builder, sig, args):
[at, bt] = sig.args
[av, bv] = args
cav = context.cast(builder, av, at, sig.return_type)
cbc = context.cast(builder, bv, bt, sig.return_type)
return builder.and_(cav, cbc)
def int_or_impl(context, builder, sig, args):
[at, bt] = sig.args
[av, bv] = args
cav = context.cast(builder, av, at, sig.return_type)
cbc = context.cast(builder, bv, bt, sig.return_type)
return builder.or_(cav, cbc)
def int_xor_impl(context, builder, sig, args):
[at, bt] = sig.args
[av, bv] = args
cav = context.cast(builder, av, at, sig.return_type)
cbc = context.cast(builder, bv, bt, sig.return_type)
return builder.xor(cav, cbc)
def int_negate_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
val = context.cast(builder, val, typ, sig.return_type)
if sig.return_type in types.real_domain:
return builder.fsub(context.get_constant(sig.return_type, 0), val)
else:
return builder.neg(val)
def int_invert_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
val = context.cast(builder, val, typ, sig.return_type)
return builder.xor(val, Constant.all_ones(val.type))
def int_sign_impl(context, builder, sig, args):
[x] = args
POS = Constant.int(x.type, 1)
NEG = Constant.int(x.type, -1)
ZERO = Constant.int(x.type, 0)
cmp_zero = builder.icmp(lc.ICMP_EQ, x, ZERO)
cmp_pos = builder.icmp(lc.ICMP_SGT, x, ZERO)
presult = builder.alloca(x.type)
bb_zero = cgutils.append_basic_block(builder, ".zero")
bb_postest = cgutils.append_basic_block(builder, ".postest")
bb_pos = cgutils.append_basic_block(builder, ".pos")
bb_neg = cgutils.append_basic_block(builder, ".neg")
bb_exit = cgutils.append_basic_block(builder, ".exit")
builder.cbranch(cmp_zero, bb_zero, bb_postest)
with cgutils.goto_block(builder, bb_zero):
builder.store(ZERO, presult)
builder.branch(bb_exit)
with cgutils.goto_block(builder, bb_postest):
builder.cbranch(cmp_pos, bb_pos, bb_neg)
with cgutils.goto_block(builder, bb_pos):
builder.store(POS, presult)
builder.branch(bb_exit)
with cgutils.goto_block(builder, bb_neg):
builder.store(NEG, presult)
builder.branch(bb_exit)
builder.position_at_end(bb_exit)
return builder.load(presult)
builtin(implement('==', types.boolean, types.boolean)(int_eq_impl))
builtin(implement('!=', types.boolean, types.boolean)(int_ne_impl))
builtin(implement('<', types.boolean, types.boolean)(int_ult_impl))
builtin(implement('<=', types.boolean, types.boolean)(int_ule_impl))
builtin(implement('>', types.boolean, types.boolean)(int_ugt_impl))
builtin(implement('>=', types.boolean, types.boolean)(int_uge_impl))
builtin(implement('~', types.boolean)(int_invert_impl))
for ty in types.integer_domain:
builtin(implement('+', ty, ty)(int_add_impl))
builtin(implement('-', ty, ty)(int_sub_impl))
builtin(implement('*', ty, ty)(int_mul_impl))
builtin(implement('==', ty, ty)(int_eq_impl))
builtin(implement('!=', ty, ty)(int_ne_impl))
builtin(implement(types.print_type, ty)(int_print_impl))
builtin(implement('<<', ty, types.uint32)(int_shl_impl))
builtin(implement('&', ty, ty)(int_and_impl))
builtin(implement('|', ty, ty)(int_or_impl))
builtin(implement('^', ty, ty)(int_xor_impl))
builtin(implement('-', ty)(int_negate_impl))
builtin(implement(types.neg_type, ty)(int_negate_impl))
builtin(implement('~', ty)(int_invert_impl))
builtin(implement(types.sign_type, ty)(int_sign_impl))
for ty in types.unsigned_domain:
builtin(implement('/?', ty, ty)(int_udiv_impl))
builtin(implement('//', ty, ty)(int_ufloordiv_impl))
builtin(implement('/', ty, ty)(int_utruediv_impl))
builtin(implement('%', ty, ty)(int_urem_impl))
builtin(implement('<', ty, ty)(int_ult_impl))
builtin(implement('<=', ty, ty)(int_ule_impl))
builtin(implement('>', ty, ty)(int_ugt_impl))
builtin(implement('>=', ty, ty)(int_uge_impl))
builtin(implement('**', types.float64, ty)(int_upower_impl))
# logical shift for unsigned
builtin(implement('>>', ty, types.uint32)(int_lshr_impl))
builtin(implement(types.abs_type, ty)(uint_abs_impl))
for ty in types.signed_domain:
builtin(implement('/?', ty, ty)(int_sdiv_impl))
builtin(implement('//', ty, ty)(int_sfloordiv_impl))
builtin(implement('/', ty, ty)(int_struediv_impl))
builtin(implement('%', ty, ty)(int_srem_impl))
builtin(implement('<', ty, ty)(int_slt_impl))
builtin(implement('<=', ty, ty)(int_sle_impl))
builtin(implement('>', ty, ty)(int_sgt_impl))
builtin(implement('>=', ty, ty)(int_sge_impl))
builtin(implement(types.abs_type, ty)(int_abs_impl))
builtin(implement('**', types.float64, ty)(int_spower_impl))
# arithmetic shift for signed
builtin(implement('>>', ty, types.uint32)(int_ashr_impl))
def real_add_impl(context, builder, sig, args):
return builder.fadd(*args)
def real_sub_impl(context, builder, sig, args):
return builder.fsub(*args)
def real_mul_impl(context, builder, sig, args):
return builder.fmul(*args)
def real_div_impl(context, builder, sig, args):
cgutils.guard_zero(context, builder, args[1])
return builder.fdiv(*args)
def real_divmod(context, builder, x, y):
assert x.type == y.type
floatty = x.type
module = cgutils.get_module(builder)
fname = ".numba.python.rem.%s" % x.type
fnty = Type.function(floatty, (floatty, floatty, Type.pointer(floatty)))
fn = module.get_or_insert_function(fnty, fname)
if fn.is_declaration:
fn.linkage = lc.LINKAGE_LINKONCE_ODR
fnbuilder = lc.Builder.new(fn.append_basic_block('entry'))
fx, fy, pmod = fn.args
div, mod = real_divmod_func_body(context, fnbuilder, fx, fy)
fnbuilder.store(mod, pmod)
fnbuilder.ret(div)
pmod = cgutils.alloca_once(builder, floatty)
quotient = builder.call(fn, (x, y, pmod))
return quotient, builder.load(pmod)
def real_divmod_func_body(context, builder, vx, wx):
# Reference Objects/floatobject.c
#
# float_divmod(PyObject *v, PyObject *w)
# {
# double vx, wx;
# double div, mod, floordiv;
# CONVERT_TO_DOUBLE(v, vx);
# CONVERT_TO_DOUBLE(w, wx);
# mod = fmod(vx, wx);
# /* fmod is typically exact, so vx-mod is *mathematically* an
# exact multiple of wx. But this is fp arithmetic, and fp
# vx - mod is an approximation; the result is that div may
# not be an exact integral value after the division, although
# it will always be very close to one.
# */
# div = (vx - mod) / wx;
# if (mod) {
# /* ensure the remainder has the same sign as the denominator */
# if ((wx < 0) != (mod < 0)) {
# mod += wx;
# div -= 1.0;
# }
# }
# else {
# /* the remainder is zero, and in the presence of signed zeroes
# fmod returns different results across platforms; ensure
# it has the same sign as the denominator; we'd like to do
# "mod = wx * 0.0", but that may get optimized away */
# mod *= mod; /* hide "mod = +0" from optimizer */
# if (wx < 0.0)
# mod = -mod;
# }
# /* snap quotient to nearest integral value */
# if (div) {
# floordiv = floor(div);
# if (div - floordiv > 0.5)
# floordiv += 1.0;
# }
# else {
# /* div is zero - get the same sign as the true quotient */
# div *= div; /* hide "div = +0" from optimizers */
# floordiv = div * vx / wx; /* zero w/ sign of vx/wx */
# }
# return Py_BuildValue("(dd)", floordiv, mod);
# }
pmod = builder.alloca(vx.type)
pdiv = builder.alloca(vx.type)
pfloordiv = builder.alloca(vx.type)
mod = builder.frem(vx, wx)
div = builder.fdiv(builder.fsub(vx, mod), wx)
builder.store(mod, pmod)
builder.store(div, pdiv)
ZERO = Constant.real(vx.type, 0)
ONE = Constant.real(vx.type, 1)
mod_istrue = builder.fcmp(lc.FCMP_ONE, mod, ZERO)
wx_ltz = builder.fcmp(lc.FCMP_OLT, wx, ZERO)
mod_ltz = builder.fcmp(lc.FCMP_OLT, mod, ZERO)
with cgutils.ifthen(builder, mod_istrue):
wx_ltz_ne_mod_ltz = builder.icmp(lc.ICMP_NE, wx_ltz, mod_ltz)
with cgutils.ifthen(builder, wx_ltz_ne_mod_ltz):
mod = builder.fadd(mod, wx)
div = builder.fsub(div, ONE)
builder.store(mod, pmod)
builder.store(div, pdiv)
del mod
del div
with cgutils.ifnot(builder, mod_istrue):
mod = builder.load(pmod)
mod = builder.fmul(mod, mod)
builder.store(mod, pmod)
del mod
with cgutils.ifthen(builder, wx_ltz):
mod = builder.load(pmod)
mod = builder.fsub(ZERO, mod)
builder.store(mod, pmod)
del mod
div = builder.load(pdiv)
div_istrue = builder.fcmp(lc.FCMP_ONE, div, ZERO)
with cgutils.ifthen(builder, div_istrue):
module = cgutils.get_module(builder)
floorfn = lc.Function.intrinsic(module, lc.INTR_FLOOR, [wx.type])
floordiv = builder.call(floorfn, [div])
floordivdiff = builder.fsub(div, floordiv)
floordivincr = builder.fadd(floordiv, ONE)
HALF = Constant.real(wx.type, 0.5)
pred = builder.fcmp(lc.FCMP_OGT, floordivdiff, HALF)
floordiv = builder.select(pred, floordivincr, floordiv)
builder.store(floordiv, pfloordiv)
with cgutils.ifnot(builder, div_istrue):
div = builder.fmul(div, div)
builder.store(div, pdiv)
floordiv = builder.fdiv(builder.fmul(div, vx), wx)
builder.store(floordiv, pfloordiv)
return builder.load(pfloordiv), builder.load(pmod)
def real_mod_impl(context, builder, sig, args):
x, y = args
cgutils.guard_zero(context, builder, y)
_, rem = real_divmod(context, builder, x, y)
return rem
def real_power_impl(context, builder, sig, args):
x, y = args
module = cgutils.get_module(builder)
if context.implement_powi_as_math_call:
imp = context.get_function(math.pow, sig)
return imp(builder, args)
else:
fn = lc.Function.intrinsic(module, lc.INTR_POW, [y.type])
return builder.call(fn, (x, y))
def real_lt_impl(context, builder, sig, args):
return builder.fcmp(lc.FCMP_OLT, *args)
def real_le_impl(context, builder, sig, args):
return builder.fcmp(lc.FCMP_OLE, *args)
def real_gt_impl(context, builder, sig, args):
return builder.fcmp(lc.FCMP_OGT, *args)
def real_ge_impl(context, builder, sig, args):
return builder.fcmp(lc.FCMP_OGE, *args)
def real_eq_impl(context, builder, sig, args):
return builder.fcmp(lc.FCMP_OEQ, *args)
def real_ne_impl(context, builder, sig, args):
return builder.fcmp(lc.FCMP_UNE, *args)
def real_abs_impl(context, builder, sig, args):
[ty] = sig.args
sig = typing.signature(ty, ty)
impl = context.get_function(math.fabs, sig)
return impl(builder, args)
def real_print_impl(context, builder, sig, args):
[x] = args
py = context.get_python_api(builder)
szval = context.cast(builder, x, sig.args[0], types.float64)
intobj = py.float_from_double(szval)
py.print_object(intobj)
py.decref(intobj)
return context.get_dummy_value()
def real_negate_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
val = context.cast(builder, val, typ, sig.return_type)
if sig.return_type in types.real_domain:
return builder.fsub(context.get_constant(sig.return_type, 0), val)
else:
return builder.neg(val)
def real_sign_impl(context, builder, sig, args):
[x] = args
POS = Constant.real(x.type, 1)
NEG = Constant.real(x.type, -1)
ZERO = Constant.real(x.type, 0)
cmp_zero = builder.fcmp(lc.FCMP_OEQ, x, ZERO)
cmp_pos = builder.fcmp(lc.FCMP_OGT, x, ZERO)
presult = builder.alloca(x.type)
bb_zero = cgutils.append_basic_block(builder, ".zero")
bb_postest = cgutils.append_basic_block(builder, ".postest")
bb_pos = cgutils.append_basic_block(builder, ".pos")
bb_neg = cgutils.append_basic_block(builder, ".neg")
bb_exit = cgutils.append_basic_block(builder, ".exit")
builder.cbranch(cmp_zero, bb_zero, bb_postest)
with cgutils.goto_block(builder, bb_zero):
builder.store(ZERO, presult)
builder.branch(bb_exit)
with cgutils.goto_block(builder, bb_postest):
builder.cbranch(cmp_pos, bb_pos, bb_neg)
with cgutils.goto_block(builder, bb_pos):
builder.store(POS, presult)
builder.branch(bb_exit)
with cgutils.goto_block(builder, bb_neg):
builder.store(NEG, presult)
builder.branch(bb_exit)
builder.position_at_end(bb_exit)
return builder.load(presult)
for ty in types.real_domain:
builtin(implement('+', ty, ty)(real_add_impl))
builtin(implement('-', ty, ty)(real_sub_impl))
builtin(implement('*', ty, ty)(real_mul_impl))
builtin(implement('/?', ty, ty)(real_div_impl))
builtin(implement('/', ty, ty)(real_div_impl))
builtin(implement('%', ty, ty)(real_mod_impl))
builtin(implement('**', ty, ty)(real_power_impl))
builtin(implement('==', ty, ty)(real_eq_impl))
builtin(implement('!=', ty, ty)(real_ne_impl))
builtin(implement('<', ty, ty)(real_lt_impl))
builtin(implement('<=', ty, ty)(real_le_impl))
builtin(implement('>', ty, ty)(real_gt_impl))
builtin(implement('>=', ty, ty)(real_ge_impl))
builtin(implement(types.abs_type, ty)(real_abs_impl))
builtin(implement(types.print_type, ty)(real_print_impl))
builtin(implement('-', ty)(real_negate_impl))
builtin(implement(types.neg_type, ty)(real_negate_impl))
builtin(implement(types.sign_type, ty)(real_sign_impl))
class Complex64(cgutils.Structure):
_fields = [('real', types.float32),
('imag', types.float32)]
class Complex128(cgutils.Structure):
_fields = [('real', types.float64),
('imag', types.float64)]
def get_complex_info(ty):
if ty == types.complex64:
cmplxcls = Complex64
flty = types.float32
elif ty == types.complex128:
cmplxcls = Complex128
flty = types.float64
else:
raise TypeError(ty)
return cmplxcls, flty
@builtin_attr
@impl_attribute(types.complex64, "real", types.float32)
def complex64_real_impl(context, builder, typ, value):
cplx = Complex64(context, builder, value=value)
return cplx.real
@builtin_attr
@impl_attribute(types.complex128, "real", types.float64)
def complex128_real_impl(context, builder, typ, value):
cplx = Complex128(context, builder, value=value)
return cplx.real
@builtin_attr
@impl_attribute(types.complex64, "imag", types.float32)
def complex64_imag_impl(context, builder, typ, value):
cplx = Complex64(context, builder, value=value)
return cplx.imag
@builtin_attr
@impl_attribute(types.complex128, "imag", types.float64)
def complex128_imag_impl(context, builder, typ, value):
cplx = Complex128(context, builder, value=value)
return cplx.imag
@builtin
@implement("**", types.complex128, types.complex128)
def complex128_power_impl(context, builder, sig, args):
[ca, cb] = args
a = Complex128(context, builder, value=ca)
b = Complex128(context, builder, value=cb)
c = Complex128(context, builder)
module = cgutils.get_module(builder)
pa = a._getpointer()
pb = b._getpointer()
pc = c._getpointer()
# Optimize for square because cpow looses a lot of precsiion
TWO = context.get_constant(types.float64, 2)
ZERO = context.get_constant(types.float64, 0)
b_real_is_two = builder.fcmp(lc.FCMP_OEQ, b.real, TWO)
b_imag_is_zero = builder.fcmp(lc.FCMP_OEQ, b.imag, ZERO)
b_is_two = builder.and_(b_real_is_two, b_imag_is_zero)
with cgutils.ifelse(builder, b_is_two) as (then, otherwise):
with then:
# Lower as multiplication
res = complex_mult_impl(context, builder, sig, (ca, ca))
cres = Complex128(context, builder, value=res)
c.real = cres.real
c.imag = cres.imag
with otherwise:
# Lower with call to external function
fnty = Type.function(Type.void(), [pa.type] * 3)
cpow = module.get_or_insert_function(fnty, name="numba.math.cpow")
builder.call(cpow, (pa, pb, pc))
return builder.load(pc)
def complex_add_impl(context, builder, sig, args):
[cx, cy] = args
complexClass = context.make_complex(sig.args[0])
x = complexClass(context, builder, value=cx)
y = complexClass(context, builder, value=cy)
z = complexClass(context, builder)
a = x.real
b = x.imag
c = y.real
d = y.imag
z.real = builder.fadd(a, c)
z.imag = builder.fadd(b, d)
return z._getvalue()
def complex_sub_impl(context, builder, sig, args):
[cx, cy] = args
complexClass = context.make_complex(sig.args[0])
x = complexClass(context, builder, value=cx)
y = complexClass(context, builder, value=cy)
z = complexClass(context, builder)
a = x.real
b = x.imag
c = y.real
d = y.imag
z.real = builder.fsub(a, c)
z.imag = builder.fsub(b, d)
return z._getvalue()
def complex_mult_impl(context, builder, sig, args):
"""
(a+bi)(c+di)=(ac-bd)+i(ad+bc)
"""
[cx, cy] = args
complexClass = context.make_complex(sig.args[0])
x = complexClass(context, builder, value=cx)
y = complexClass(context, builder, value=cy)
z = complexClass(context, builder)
a = x.real
b = x.imag
c = y.real
d = y.imag
ac = builder.fmul(a, c)
bd = builder.fmul(b, d)
ad = builder.fmul(a, d)
bc = builder.fmul(b, c)
z.real = builder.fsub(ac, bd)
z.imag = builder.fadd(ad, bc)
return z._getvalue()
def complex_div_impl(context, builder, sig, args):
"""
z = c^2 + d^2
(a+bi)/(c+di) = (ac + bd) / z, (bc - ad) / z
"""
[cx, cy] = args
complexClass = context.make_complex(sig.args[0])
x = complexClass(context, builder, value=cx)
y = complexClass(context, builder, value=cy)
z = complexClass(context, builder)
a = x.real
b = x.imag
c = y.real
d = y.imag
ac = builder.fmul(a, c)
bd = builder.fmul(b, d)
ad = builder.fmul(a, d)
bc = builder.fmul(b, c)
cc = builder.fmul(c, c)
dd = builder.fmul(d, d)
zz = builder.fadd(cc, dd)
ac_bd = builder.fadd(ac, bd)
bc_ad = builder.fsub(bc, ad)
cgutils.guard_zero(context, builder, zz)
z.real = builder.fdiv(ac_bd, zz)
z.imag = builder.fdiv(bc_ad, zz)
return z._getvalue()
def complex_negate_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
cmplxcls = context.make_complex(typ)
cmplx = cmplxcls(context, builder, val)
real = cmplx.real
imag = cmplx.imag
zero = Constant.real(real.type, 0)
res = cmplxcls(context, builder)
res.real = builder.fsub(zero, real)
res.imag = builder.fsub(zero, imag)
return res._getvalue()
for ty, cls in zip([types.complex64, types.complex128],
[Complex64, Complex128]):
builtin(implement("+", ty, ty)(complex_add_impl))
builtin(implement("-", ty, ty)(complex_sub_impl))
builtin(implement("*", ty, ty)(complex_mult_impl))
builtin(implement("/?", ty, ty)(complex_div_impl))
builtin(implement("/", ty, ty)(complex_div_impl))
builtin(implement("-", ty)(complex_negate_impl))
# Complex modulo is deprecated in python3
#------------------------------------------------------------------------------
def number_not_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
istrue = context.cast(builder, val, typ, sig.return_type)
return builder.not_(istrue)
def number_as_bool_impl(context, builder, sig, args):
[typ] = sig.args
[val] = args
istrue = context.cast(builder, val, typ, sig.return_type)
return istrue
for ty in types.number_domain:
builtin(implement('not', ty)(number_not_impl))
builtin(implement(bool, ty)(number_as_bool_impl))
builtin(implement('not', types.boolean)(number_not_impl))
#------------------------------------------------------------------------------
class Slice(cgutils.Structure):
_fields = [('start', types.intp),
('stop', types.intp),
('step', types.intp), ]
@builtin
@implement(types.slice_type, types.intp, types.intp, types.intp)
def slice3_impl(context, builder, sig, args):
start, stop, step = args
slice3 = Slice(context, builder)
slice3.start = start
slice3.stop = stop
slice3.step = step
return slice3._getvalue()
@builtin
@implement(types.slice_type, types.intp, types.intp)
def slice2_impl(context, builder, sig, args):
start, stop = args
slice3 = Slice(context, builder)
slice3.start = start
slice3.stop = stop
slice3.step = context.get_constant(types.intp, 1)
return slice3._getvalue()
@builtin
@implement(types.slice_type, types.intp, types.none)
def slice1_start_impl(context, builder, sig, args):
start, stop = args
slice3 = Slice(context, builder)
slice3.start = start
maxint = (1 << (context.address_size - 1)) - 1
slice3.stop = context.get_constant(types.intp, maxint)
slice3.step = context.get_constant(types.intp, 1)
return slice3._getvalue()
@builtin
@implement(types.slice_type, types.none, types.intp)
def slice1_stop_impl(context, builder, sig, args):
start, stop = args
slice3 = Slice(context, builder)
slice3.start = context.get_constant(types.intp, 0)
slice3.stop = stop
slice3.step = context.get_constant(types.intp, 1)
return slice3._getvalue()
@builtin
@implement(types.slice_type)
def slice0_empty_impl(context, builder, sig, args):
assert not args
slice3 = Slice(context, builder)
slice3.start = context.get_constant(types.intp, 0)
maxint = (1 << (context.address_size - 1)) - 1
slice3.stop = context.get_constant(types.intp, maxint)
slice3.step = context.get_constant(types.intp, 1)
return slice3._getvalue()
@builtin
@implement(types.slice_type, types.none, types.none)
def slice0_none_none_impl(context, builder, sig, args):
assert len(args) == 2
newsig = typing.signature(types.slice_type)
return slice0_empty_impl(context, builder, newsig, ())
class RangeState32(cgutils.Structure):
_fields = [('start', types.int32),
('stop', types.int32),
('step', types.int32)]
class RangeIter32(cgutils.Structure):
_fields = [('iter', types.CPointer(types.int32)),
('stop', types.int32),
('step', types.int32),
('count', types.CPointer(types.int32))]
class RangeState64(cgutils.Structure):
_fields = [('start', types.int64),
('stop', types.int64),
('step', types.int64)]
class RangeIter64(cgutils.Structure):
_fields = [('iter', types.CPointer(types.int64)),
('stop', types.int64),
('step', types.int64),
('count', types.CPointer(types.int64))]
def make_unituple_iter(tupiter):
class UniTupleIter(cgutils.Structure):
_fields = [('index', types.CPointer(types.intp)),
('tuple', tupiter.unituple,)]
return UniTupleIter
@builtin
@implement(types.range_type, types.int32)
def range1_32_impl(context, builder, sig, args):
[stop] = args
state = RangeState32(context, builder)
state.start = context.get_constant(types.int32, 0)
state.stop = stop
state.step = context.get_constant(types.int32, 1)
return state._getvalue()
@builtin
@implement(types.range_type, types.int32, types.int32)
def range2_32_impl(context, builder, sig, args):
start, stop = args
state = RangeState32(context, builder)
state.start = start
state.stop = stop
state.step = context.get_constant(types.int32, 1)
return state._getvalue()
@builtin
@implement(types.range_type, types.int32, types.int32, types.int32)
def range3_32_impl(context, builder, sig, args):
[start, stop, step] = args
state = RangeState32(context, builder)
state.start = start
state.stop = stop
state.step = step
return state._getvalue()
def getiter_range_generic(context, builder, iterobj, start, stop, step):
diff = builder.sub(stop, start)
intty = start.type
zero = Constant.int(intty, 0)
one = Constant.int(intty, 1)
pos_diff = builder.icmp(lc.ICMP_SGT, diff, zero)
pos_step = builder.icmp(lc.ICMP_SGT, step, zero)
sign_differs = builder.xor(pos_diff, pos_step)
zero_step = builder.icmp(lc.ICMP_EQ, step, zero)
with cgutils.if_unlikely(builder, zero_step):
# step shouldn't be zero
context.return_errcode(builder, 1)
with cgutils.ifelse(builder, sign_differs) as (then, orelse):
with then:
builder.store(zero, iterobj.count)
with orelse:
rem = builder.srem(diff, step)
uneven = builder.icmp(lc.ICMP_SGT, rem, zero)
newcount = builder.add(builder.sdiv(diff, step),
builder.select(uneven, one, zero))
builder.store(newcount, iterobj.count)
return iterobj._getvalue()
@builtin
@implement('getiter', types.range_state32_type)
def getiter_range32_impl(context, builder, sig, args):
(value,) = args
state = RangeState32(context, builder, value)
iterobj = RangeIter32(context, builder)
start = state.start
stop = state.stop
step = state.step
startptr = cgutils.alloca_once(builder, start.type)
builder.store(start, startptr)
countptr = cgutils.alloca_once(builder, start.type)
iterobj.iter = startptr
iterobj.stop = stop
iterobj.step = step
iterobj.count = countptr
return getiter_range_generic(context, builder, iterobj, start, stop, step)
@builtin
@implement('iternext', types.range_iter32_type)
def iternext_range32_impl(context, builder, sig, args):
(value,) = args
iterobj = RangeIter32(context, builder, value)
res = builder.load(iterobj.iter)
one = context.get_constant(types.int32, 1)
countptr = iterobj.count
builder.store(builder.sub(builder.load(countptr), one), countptr)
builder.store(builder.add(res, iterobj.step), iterobj.iter)
return res
@builtin
@implement('itervalid', types.range_iter32_type)
def itervalid_range32_impl(context, builder, sig, args):
(value,) = args
iterobj = RangeIter32(context, builder, value)
zero = context.get_constant(types.int32, 0)
gt = builder.icmp(lc.ICMP_SGE, builder.load(iterobj.count), zero)
return gt
@builtin
@implement(types.range_type, types.int64)
def range1_64_impl(context, builder, sig, args):
(stop,) = args
state = RangeState64(context, builder)
state.start = context.get_constant(types.int64, 0)
state.stop = stop
state.step = context.get_constant(types.int64, 1)
return state._getvalue()
@builtin
@implement(types.range_type, types.int64, types.int64)
def range2_64_impl(context, builder, sig, args):
start, stop = args
state = RangeState64(context, builder)
state.start = start
state.stop = stop
state.step = context.get_constant(types.int64, 1)
return state._getvalue()
@builtin
@implement(types.range_type, types.int64, types.int64, types.int64)
def range3_64_impl(context, builder, sig, args):
[start, stop, step] = args
state = RangeState64(context, builder)
state.start = start
state.stop = stop
state.step = step
return state._getvalue()
@builtin
@implement('getiter', types.range_state64_type)
def getiter_range64_impl(context, builder, sig, args):
(value,) = args
state = RangeState64(context, builder, value)
iterobj = RangeIter64(context, builder)
start = state.start
stop = state.stop
step = state.step
startptr = cgutils.alloca_once(builder, start.type)
builder.store(start, startptr)
countptr = cgutils.alloca_once(builder, start.type)
iterobj.iter = startptr
iterobj.stop = stop
iterobj.step = step
iterobj.count = countptr
return getiter_range_generic(context, builder, iterobj, start, stop, step)
@builtin
@implement('iternext', types.range_iter64_type)
def iternext_range64_impl(context, builder, sig, args):
(value,) = args
iterobj = RangeIter64(context, builder, value)
res = builder.load(iterobj.iter)
one = context.get_constant(types.int64, 1)
builder.store(builder.sub(builder.load(iterobj.count), one), iterobj.count)
builder.store(builder.add(res, iterobj.step), iterobj.iter)
return res
@builtin
@implement('itervalid', types.range_iter64_type)
def itervalid_range64_impl(context, builder, sig, args):
(value,) = args
iterobj = RangeIter64(context, builder, value)
zero = context.get_constant(types.int64, 0)
gt = builder.icmp(lc.ICMP_SGE, builder.load(iterobj.count), zero)
return gt
@builtin
@implement('getiter', types.Kind(types.UniTuple))
def getiter_unituple(context, builder, sig, args):
[tupty] = sig.args
[tup] = args
tupitercls = context.make_unituple_iter(types.UniTupleIter(tupty))
iterval = tupitercls(context, builder)
index0 = context.get_constant(types.intp, 0)
indexptr = cgutils.alloca_once(builder, index0.type)
builder.store(index0, indexptr)
iterval.index = indexptr
iterval.tuple = tup
return iterval._getvalue()
@builtin
@implement('iternextsafe', types.Kind(types.UniTupleIter))
def iternextsafe_unituple(context, builder, sig, args):
[tupiterty] = sig.args
[tupiter] = args
tupitercls = context.make_unituple_iter(tupiterty)
iterval = tupitercls(context, builder, value=tupiter)
tup = iterval.tuple
idxptr = iterval.index
idx = builder.load(idxptr)
# TODO lack out-of-bound check
getitem_sig = typing.signature(sig.return_type, tupiterty.unituple,
types.intp)
res = getitem_unituple(context, builder, getitem_sig, [tup, idx])
nidx = builder.add(idx, context.get_constant(types.intp, 1))
builder.store(nidx, iterval.index)
return res
@builtin
@implement('getitem', types.Kind(types.UniTuple), types.intp)
def getitem_unituple(context, builder, sig, args):
tupty, _ = sig.args
tup, idx = args
bbelse = cgutils.append_basic_block(builder, "switch.else")
bbend = cgutils.append_basic_block(builder, "switch.end")
switch = builder.switch(idx, bbelse, n=tupty.count)
with cgutils.goto_block(builder, bbelse):
# TODO: propagate exception to
context.return_errcode(builder, 1)
lrtty = context.get_value_type(tupty.dtype)
with cgutils.goto_block(builder, bbend):
phinode = builder.phi(lrtty)
for i in range(tupty.count):
ki = context.get_constant(types.intp, i)
bbi = cgutils.append_basic_block(builder, "switch.%d" % i)
switch.add_case(ki, bbi)
with cgutils.goto_block(builder, bbi):
value = builder.extract_value(tup, i)
builder.branch(bbend)
phinode.add_incoming(value, bbi)
builder.position_at_end(bbend)
return phinode
@builtin
@implement('getitem', types.Kind(types.Array), types.intp)
def getitem_array1d_intp(context, builder, sig, args):
aryty, _ = sig.args
if aryty.ndim != 1:
# TODO
raise NotImplementedError("1D indexing into %dD array" % aryty.ndim)
ary, idx = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
ptr = cgutils.get_item_pointer(builder, aryty, ary, [idx], wraparound=True)
return context.unpack_value(builder, aryty.dtype, ptr)
@builtin
@implement('getitem', types.Kind(types.Array), types.slice3_type)
def getitem_array1d_slice(context, builder, sig, args):
aryty, _ = sig.args
if aryty.ndim != 1:
# TODO
raise NotImplementedError("1D indexing into %dD array" % aryty.ndim)
ary, idx = args
arystty = make_array(aryty)
ary = arystty(context, builder, value=ary)
shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim)
slicestruct = Slice(context, builder, value=idx)
cgutils.normalize_slice(builder, slicestruct, shapes[0])
dataptr = cgutils.get_item_pointer(builder, aryty, ary,
[slicestruct.start],
wraparound=True)
retstty = make_array(sig.return_type)
retary = retstty(context, builder)
shape = cgutils.get_range_from_slice(builder, slicestruct)
retary.shape = cgutils.pack_array(builder, [shape])
stride = cgutils.get_strides_from_slice(builder, aryty.ndim, ary.strides,
slicestruct, 0)
retary.strides = cgutils.pack_array(builder, [stride])
retary.data = dataptr
return retary._getvalue()
@builtin
@implement('getitem', types.Kind(types.Array),
types.Kind(types.UniTuple))
def getitem_array_unituple(context, builder, sig, args):
aryty, idxty = sig.args
ary, idx = args
ndim = aryty.ndim
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
if idxty.dtype == types.slice3_type:
# Slicing
raw_slices = cgutils.unpack_tuple(builder, idx, aryty.ndim)
slices = [Slice(context, builder, value=sl) for sl in raw_slices]
for sl, sh in zip(slices,
cgutils.unpack_tuple(builder, ary.shape, ndim)):
cgutils.normalize_slice(builder, sl, sh)
indices = [sl.start for sl in slices]
dataptr = cgutils.get_item_pointer(builder, aryty, ary, indices,
wraparound=True)
# Build array
retstty = make_array(sig.return_type)
retary = retstty(context, builder)
retary.data = dataptr
shapes = [cgutils.get_range_from_slice(builder, sl)
for sl in slices]
retary.shape = cgutils.pack_array(builder, shapes)
strides = [cgutils.get_strides_from_slice(builder, ndim, ary.strides,
sl, i)
for i, sl in enumerate(slices)]
retary.strides = cgutils.pack_array(builder, strides)
return retary._getvalue()
else:
# Indexing
assert idxty.dtype == types.intp
indices = cgutils.unpack_tuple(builder, idx, count=len(idxty))
indices = [context.cast(builder, i, t, types.intp)
for t, i in zip(idxty, indices)]
# TODO warparound flag
ptr = cgutils.get_item_pointer(builder, aryty, ary, indices,
wraparound=True)
return context.unpack_value(builder, aryty.dtype, ptr)
@builtin
@implement('getitem', types.Kind(types.Array),
types.Kind(types.Tuple))
def getitem_array_tuple(context, builder, sig, args):
aryty, idxty = sig.args
ary, idx = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
ndim = aryty.ndim
if isinstance(sig.return_type, types.Array):
# Slicing
raw_indices = cgutils.unpack_tuple(builder, idx, aryty.ndim)
start = []
shapes = []
strides = []
oshapes = cgutils.unpack_tuple(builder, ary.shape, ndim)
for ax, (indexval, idxty) in enumerate(zip(raw_indices, idxty)):
if idxty == types.slice3_type:
slice = Slice(context, builder, value=indexval)
cgutils.normalize_slice(builder, slice, oshapes[ax])
start.append(slice.start)
shapes.append(cgutils.get_range_from_slice(builder, slice))
strides.append(cgutils.get_strides_from_slice(builder, ndim,
ary.strides,
slice, ax))
else:
ind = context.cast(builder, indexval, idxty, types.intp)
start.append(ind)
dataptr = cgutils.get_item_pointer(builder, aryty, ary, start,
wraparound=True)
# Build array
retstty = make_array(sig.return_type)
retary = retstty(context, builder)
retary.data = dataptr
retary.shape = cgutils.pack_array(builder, shapes)
retary.strides = cgutils.pack_array(builder, strides)
return retary._getvalue()
else:
# Indexing
indices = cgutils.unpack_tuple(builder, idx, count=len(idxty))
indices = [context.cast(builder, i, t, types.intp)
for t, i in zip(idxty, indices)]
# TODO warparound flag
ptr = cgutils.get_item_pointer(builder, aryty, ary, indices,
wraparound=True)
return context.unpack_value(builder, aryty.dtype, ptr)
@builtin
@implement('setitem', types.Kind(types.Array), types.intp,
types.Any)
def setitem_array1d(context, builder, sig, args):
aryty, _, valty = sig.args
ary, idx, val = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
ptr = cgutils.get_item_pointer(builder, aryty, ary, [idx],
wraparound=True)
val = context.cast(builder, val, valty, aryty.dtype)
context.pack_value(builder, aryty.dtype, val, ptr)
@builtin
@implement('setitem', types.Kind(types.Array),
types.Kind(types.UniTuple), types.Any)
def setitem_array_unituple(context, builder, sig, args):
aryty, idxty, valty = sig.args
ary, idx, val = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
# TODO: other than layout
indices = cgutils.unpack_tuple(builder, idx, count=len(idxty))
indices = [context.cast(builder, i, t, types.intp)
for t, i in zip(idxty, indices)]
ptr = cgutils.get_item_pointer(builder, aryty, ary, indices,
wraparound=True)
context.pack_value(builder, aryty.dtype, val, ptr)
@builtin
@implement('setitem', types.Kind(types.Array),
types.Kind(types.Tuple), types.Any)
def setitem_array_tuple(context, builder, sig, args):
aryty, idxty, valty = sig.args
ary, idx, val = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
# TODO: other than layout
indices = cgutils.unpack_tuple(builder, idx, count=len(idxty))
indices = [context.cast(builder, i, t, types.intp)
for t, i in zip(idxty, indices)]
ptr = cgutils.get_item_pointer(builder, aryty, ary, indices,
wraparound=True)
context.pack_value(builder, aryty.dtype, val, ptr)
@builtin
@implement('setitem', types.Kind(types.Array),
types.Kind(types.Tuple), types.Any)
def setitem_array_tuple(context, builder, sig, args):
aryty, idxty, valty = sig.args
ary, idx, val = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
# TODO: other than layout
indices = cgutils.unpack_tuple(builder, idx, count=len(idxty))
indices = [context.cast(builder, i, t, types.intp)
for t, i in zip(idxty, indices)]
ptr = cgutils.get_item_pointer(builder, aryty, ary, indices,
wraparound=True)
context.pack_value(builder, aryty.dtype, val, ptr)
@builtin
@implement(types.len_type, types.Kind(types.Array))
def array_len(context, builder, sig, args):
(aryty,) = sig.args
(ary,) = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
shapeary = ary.shape
return builder.extract_value(shapeary, 0)
#-------------------------------------------------------------------------------
@builtin_attr
@impl_attribute(types.Array, "shape", types.Kind(types.UniTuple))
def array_shape(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
return array.shape
@builtin_attr
@impl_attribute(types.Array, "strides", types.Kind(types.UniTuple))
def array_strides(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
return array.strides
@builtin_attr
@impl_attribute(types.Array, "ndim", types.intp)
def array_ndim(context, builder, typ, value):
return context.get_constant(types.intp, typ.ndim)
@builtin_attr
@impl_attribute(types.Array, "size", types.intp)
def array_size(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
dims = cgutils.unpack_tuple(builder, array.shape, typ.ndim)
return reduce(builder.mul, dims[1:], dims[0])
#-------------------------------------------------------------------------------
def caster(restype):
@implement(restype, types.Any)
def _cast(context, builder, sig, args):
[val] = args
[valty] = sig.args
return context.cast(builder, val, valty, restype)
return _cast
builtin(caster(types.int8))
builtin(caster(types.int16))
builtin(caster(types.int32))
builtin(caster(types.int64))
builtin(caster(types.uint8))
builtin(caster(types.uint16))
builtin(caster(types.uint32))
builtin(caster(types.uint64))
builtin(caster(types.float32))
builtin(caster(types.float64))
builtin(caster(types.complex64))
builtin(caster(types.complex128))
#-------------------------------------------------------------------------------
@builtin
@implement(max, types.VarArg)
def max_impl(context, builder, sig, args):
argtys = sig.args
for a in argtys:
if a not in types.number_domain:
raise AssertionError("only implemented for numeric types")
def domax(a, b):
at, av = a
bt, bv = b
ty = context.typing_context.unify_types(at, bt)
cav = context.cast(builder, av, at, ty)
cbv = context.cast(builder, bv, bt, ty)
cmpsig = typing.signature(types.boolean, ty, ty)
ge = context.get_function(">=", cmpsig)
pred = ge(builder, (cav, cbv))
res = builder.select(pred, cav, cbv)
return ty, res
typvals = zip(argtys, args)
resty, resval = reduce(domax, typvals)
return resval
@builtin
@implement(min, types.VarArg)
def min_impl(context, builder, sig, args):
argtys = sig.args
for a in argtys:
if a not in types.number_domain:
raise AssertionError("only implemented for numeric types")
def domax(a, b):
at, av = a
bt, bv = b
ty = context.typing_context.unify_types(at, bt)
cav = context.cast(builder, av, at, ty)
cbv = context.cast(builder, bv, bt, ty)
cmpsig = typing.signature(types.boolean, ty, ty)
le = context.get_function("<=", cmpsig)
pred = le(builder, (cav, cbv))
res = builder.select(pred, cav, cbv)
return ty, res
typvals = zip(argtys, args)
resty, resval = reduce(domax, typvals)
return resval
@builtin
@implement(round, types.float32)
def round_impl_f32(context, builder, sig, args):
module = cgutils.get_module(builder)
fnty = Type.function(Type.float(), [Type.float()])
if utils.IS_PY3:
fn = module.get_or_insert_function(fnty, name="numba.roundf")
else:
fn = module.get_or_insert_function(fnty, name="roundf")
assert fn.is_declaration
return builder.call(fn, args)
@builtin
@implement(round, types.float64)
def round_impl_f64(context, builder, sig, args):
module = cgutils.get_module(builder)
fnty = Type.function(Type.double(), [Type.double()])
if utils.IS_PY3:
fn = module.get_or_insert_function(fnty, name="numba.round")
else:
fn = module.get_or_insert_function(fnty, name="round")
assert fn.is_declaration
return builder.call(fn, args)
#-------------------------------------------------------------------------------
@builtin
@implement(int, types.Any)
def int_impl(context, builder, sig, args):
[ty] = sig.args
[val] = args
return context.cast(builder, val, ty, sig.return_type)
@builtin
@implement(float, types.Any)
def float_impl(context, builder, sig, args):
[ty] = sig.args
[val] = args
return context.cast(builder, val, ty, sig.return_type)
@builtin
@implement(complex, types.VarArg)
def complex_impl(context, builder, sig, args):
if len(sig.args) == 1:
[realty] = sig.args
[real] = args
real = context.cast(builder, real, realty, types.float64)
imag = context.get_constant(types.float64, 0)
elif len(sig.args) == 2:
[realty, imagty] = sig.args
[real, imag] = args
real = context.cast(builder, real, realty, types.float64)
imag = context.cast(builder, imag, imagty, types.float64)
cmplx = Complex128(context, builder)
cmplx.real = real
cmplx.imag = imag
return cmplx._getvalue()
# -----------------------------------------------------------------------------
@builtin_attr
@impl_attribute(types.Module(math), "pi", types.float64)
def math_pi_impl(context, builder, typ, value):
return context.get_constant(types.float64, math.pi)
@builtin_attr
@impl_attribute(types.Module(math), "e", types.float64)
def math_e_impl(context, builder, typ, value):
return context.get_constant(types.float64, math.e)
########NEW FILE########
__FILENAME__ = cpu
from __future__ import print_function, absolute_import
import sys
import llvm.core as lc
import llvm.passes as lp
import llvm.ee as le
from llvm.workaround import avx_support
from numba import _dynfunc, _helperlib, config
from numba.callwrapper import PyCallWrapper
from .base import BaseContext
from numba import utils
from numba.targets import intrinsics, mathimpl, npyimpl
from .options import TargetOptions
def _windows_symbol_hacks_32bits():
# if we don't have _ftol2, bind _ftol as _ftol2
ftol2 = le.dylib_address_of_symbol("_ftol2")
if not ftol2:
ftol = le.dylib_address_of_symbol("_ftol")
assert ftol
le.dylib_add_symbol("_ftol2", ftol)
class CPUContext(BaseContext):
def init(self):
self.execmodule = lc.Module.new("numba.exec")
eb = le.EngineBuilder.new(self.execmodule).opt(3)
if not avx_support.detect_avx_support():
eb.mattrs("-avx")
self.tm = tm = eb.select_target()
self.engine = eb.create(tm)
self.pm = self.build_pass_manager()
self.native_funcs = utils.UniqueDict()
self.cmath_provider = {}
self.is32bit = (tuple.__itemsize__ == 4)
# map math functions
self.map_math_functions()
self.map_numpy_math_functions()
# Add target specific implementations
self.insert_func_defn(mathimpl.registry.functions)
self.insert_func_defn(npyimpl.registry.functions)
def build_pass_manager(self):
if config.OPT == 3:
# This uses the same passes for clang -O3
pms = lp.build_pass_managers(tm=self.tm, opt=3,
loop_vectorize=True,
fpm=False)
return pms.pm
else:
# This uses minimum amount of passes for fast code.
# TODO: make it generate vector code
tm = self.tm
pm = lp.PassManager.new()
pm.add(tm.target_data.clone())
pm.add(lp.TargetLibraryInfo.new(tm.triple))
# Re-enable for target infomation for vectorization
# tm.add_analysis_passes(pm)
passes = '''
basicaa
scev-aa
mem2reg
sroa
adce
dse
sccp
instcombine
simplifycfg
loops
indvars
loop-simplify
licm
simplifycfg
instcombine
loop-vectorize
instcombine
simplifycfg
globalopt
globaldce
'''.split()
for p in passes:
pm.add(lp.Pass.new(p))
return pm
def map_math_functions(self):
le.dylib_add_symbol("numba.math.cpow", _helperlib.get_cpow())
le.dylib_add_symbol("numba.math.sdiv", _helperlib.get_sdiv())
le.dylib_add_symbol("numba.math.srem", _helperlib.get_srem())
le.dylib_add_symbol("numba.math.udiv", _helperlib.get_udiv())
le.dylib_add_symbol("numba.math.urem", _helperlib.get_urem())
if sys.platform.startswith('win32') and not le.dylib_address_of_symbol('__ftol2'):
le.dylib_add_symbol("__ftol2", _helperlib.get_fptoui())
elif sys.platform.startswith('linux') and not le.dylib_address_of_symbol('__fixunsdfdi'):
le.dylib_add_symbol("__fixunsdfdi", _helperlib.get_fptoui())
# Necessary for Python3
le.dylib_add_symbol("numba.round", _helperlib.get_round_even())
le.dylib_add_symbol("numba.roundf", _helperlib.get_roundf_even())
# windows symbol hacks
if sys.platform.startswith('win32') and self.is32bit:
_windows_symbol_hacks_32bits()
# List available C-math
for fname in intrinsics.INTR_MATH:
if le.dylib_address_of_symbol(fname):
# Exist
self.cmath_provider[fname] = 'builtin'
else:
# Non-exist
# Bind from C code
imp = getattr(_helperlib, "get_%s" % fname)
le.dylib_add_symbol(fname, imp())
self.cmath_provider[fname] = 'indirect'
def map_numpy_math_functions(self):
# add the symbols for numpy math to the execution environment.
import numba._npymath_exports as npymath
for sym in npymath.symbols:
le.dylib_add_symbol(*sym)
def dynamic_map_function(self, func):
name, ptr = self.native_funcs[func]
le.dylib_add_symbol(name, ptr)
def optimize(self, module):
self.pm.run(module)
def get_executable(self, func, fndesc):
"""
Returns
-------
(cfunc, fnptr)
- cfunc
callable function (Can be None)
- fnptr
callable function address
"""
if self.is32bit:
dmf = intrinsics.DivmodFixer()
dmf.run(func.module)
im = intrinsics.IntrinsicMapping(self)
im.run(func.module)
if not fndesc.native:
self.optimize_pythonapi(func)
cfunc, fnptr = self.prepare_for_call(func, fndesc)
return cfunc, fnptr
def prepare_for_call(self, func, fndesc):
wrapper, api = PyCallWrapper(self, func.module, func, fndesc).build()
self.optimize(func.module)
if config.DUMP_OPTIMIZED:
print(("OPTIMIZED DUMP %s" %
fndesc.qualified_name).center(80,'-'))
print(func.module)
print('=' * 80)
if config.DUMP_ASSEMBLY:
print(("ASSEMBLY %s" %
fndesc.qualified_name).center(80, '-'))
print(self.tm.emit_assembly(func.module))
print('=' * 80)
# Map module.__dict__
le.dylib_add_symbol(".pymodule.dict." + fndesc.pymod.__name__,
id(fndesc.pymod.__dict__))
# Code gen
self.engine.add_module(func.module)
baseptr = self.engine.get_pointer_to_function(func)
fnptr = self.engine.get_pointer_to_function(wrapper)
cfunc = _dynfunc.make_function(fndesc.pymod, fndesc.name, fndesc.doc,
fnptr)
if fndesc.native:
self.native_funcs[cfunc] = fndesc.mangled_name, baseptr
return cfunc, fnptr
def optimize_pythonapi(self, func):
# Simplify the function using
pms = lp.build_pass_managers(tm=self.tm, opt=1,
mod=func.module)
fpm = pms.fpm
fpm.initialize()
fpm.run(func)
fpm.finalize()
# remove extra refct api calls
remove_refct_calls(func)
# ----------------------------------------------------------------------------
# TargetOptions
class CPUTargetOptions(TargetOptions):
OPTIONS = {
"nopython": bool,
"forceobj": bool,
}
# ----------------------------------------------------------------------------
# Internal
def remove_refct_calls(func):
"""
Remove redundant incref/decref within on a per block basis
"""
for bb in func.basic_blocks:
remove_null_refct_call(bb)
remove_refct_pairs(bb)
def remove_null_refct_call(bb):
"""
Remove refct api calls to NULL pointer
"""
for inst in bb.instructions:
if isinstance(inst, lc.CallOrInvokeInstruction):
fname = inst.called_function.name
if fname == "Py_IncRef" or fname == "Py_DecRef":
arg = inst.operands[0]
if isinstance(arg, lc.ConstantPointerNull):
inst.erase_from_parent()
def remove_refct_pairs(bb):
"""
Remove incref decref pairs on the same variable
"""
didsomething = True
while didsomething:
didsomething = False
increfs = {}
decrefs = {}
# Mark
for inst in bb.instructions:
if isinstance(inst, lc.CallOrInvokeInstruction):
fname = inst.called_function.name
if fname == "Py_IncRef":
arg = inst.operands[0]
increfs[arg] = inst
elif fname == "Py_DecRef":
arg = inst.operands[0]
decrefs[arg] = inst
# Sweep
for val in increfs.keys():
if val in decrefs:
increfs[val].erase_from_parent()
decrefs[val].erase_from_parent()
didsomething = True
########NEW FILE########
__FILENAME__ = descriptors
"""
Target Descriptors
"""
from __future__ import print_function, division, absolute_import
class TargetDescriptor(object):
pass
########NEW FILE########
__FILENAME__ = imputils
from __future__ import print_function, absolute_import, division
import functools
from numba.typing import signature
from numba import cgutils, types
def implement(func, *argtys):
def wrapper(impl):
@functools.wraps(impl)
def res(context, builder, sig, args):
ret = impl(context, builder, sig, args)
return ret
res.signature = signature(types.Any, *argtys)
res.key = func
return res
return wrapper
def impl_attribute(ty, attr, rtype):
def wrapper(impl):
@functools.wraps(impl)
def res(context, builder, typ, value):
ret = impl(context, builder, typ, value)
return ret
res.return_type = rtype
res.key = (ty, attr)
return res
return wrapper
def user_function(func, fndesc, libs):
def imp(context, builder, sig, args):
func = context.declare_function(cgutils.get_module(builder), fndesc)
status, retval = context.call_function(builder, func, fndesc.restype,
fndesc.argtypes, args)
with cgutils.if_unlikely(builder, status.err):
context.return_errcode_propagate(builder, status.code)
return retval
imp.signature = signature(fndesc.restype, *fndesc.argtypes)
imp.key = func
imp.libs = tuple(libs)
return imp
def python_attr_impl(cls, attr, atyp):
@impl_attribute(cls, attr, atyp)
def imp(context, builder, typ, value):
api = context.get_python_api(builder)
aval = api.object_getattr_string(value, attr)
with cgutils.ifthen(builder, cgutils.is_null(builder, aval)):
context.return_exc(builder)
if isinstance(atyp, types.Method):
return aval
else:
nativevalue = api.to_native_value(aval, atyp)
api.decref(aval)
return nativevalue
return imp
class Registry(object):
def __init__(self):
self.functions = []
self.attributes = []
def register(self, item):
self.functions.append(item)
return item
def register_attr(self, item):
self.attributes.append(item)
return item
builtin_registry = Registry()
builtin = builtin_registry.register
builtin_attr = builtin_registry.register_attr
########NEW FILE########
__FILENAME__ = intrinsics
"""
LLVM pass that converts intrinsic into other math calls
"""
from __future__ import print_function, absolute_import
import llvm.core as lc
class DivmodFixer(object):
"""
Fix 64-bit div/mod on 32-bit machines
"""
NAMES = 'sdiv', 'udiv', 'srem', 'urem'
I64 = lc.Type.int(64)
def run(self, module):
for func in module.functions:
self.run_on_func(func)
def run_on_func(self, func):
to_replace = []
for bb in func.basic_blocks:
for instr in bb.instructions:
opname = instr.opcode_name
if opname in self.NAMES and instr.type == self.I64:
to_replace.append((instr, "numba.math.%s" % opname))
if to_replace:
builder = lc.Builder.new(func.entry_basic_block)
for inst, name in to_replace:
builder.position_before(inst)
alt = self.declare(func.module, name)
replacement = builder.call(alt, inst.operands)
# fix replace_all_uses_with to not use ._ptr
inst.replace_all_uses_with(replacement._ptr)
inst.erase_from_parent()
def declare(self, module, fname):
fnty = lc.Type.function(self.I64, (self.I64, self.I64))
fn = module.get_or_insert_function(fnty, name=fname)
assert fn.is_declaration, ("%s is expected to be an intrinsic but "
"it is defined" % fname)
return fn
class IntrinsicMapping(object):
def __init__(self, context, mapping=None, availintr=None):
"""
Args
----
mapping:
Optional. Intrinsic name to alternative implementation.
Default to global MAPPING
availintr:
Optional. Available intrinsic set.
Default to global AVAILINTR
"""
self.context = context
self.mapping = mapping or MAPPING
self.availintr = availintr or AVAILINTR
def run(self, module):
self.apply_mapping(module)
self.translate_intrinsic_to_cmath(module)
def apply_mapping(self, module):
modified = []
for fn in module.functions:
if fn.is_declaration and fn.name in self.mapping:
imp = self.mapping[fn.name]
imp(self.context, fn)
modified.append(fn)
# Rename all modified functions
for fn in modified:
fn.name = "numba." + fn.name
if __debug__:
module.verify()
def translate_intrinsic_to_cmath(self, module):
for fn in self._iter_unavail(module):
# Rename unavailable intrinsic to libc calls
fn.name = INTR_TO_CMATH[fn.name]
if __debug__:
module.verify()
def _iter_unavail(self, module):
for fn in module.functions:
if fn.is_declaration and fn.name.startswith('llvm.'):
if fn.name not in self.availintr:
yield fn
def powi_as_pow(context, fn):
builder = lc.Builder.new(fn.append_basic_block(""))
x, y = fn.args
fy = builder.sitofp(y, x.type)
pow = lc.Function.intrinsic(fn.module, lc.INTR_POW, [x.type])
builder.ret(builder.call(pow, (x, fy)))
MAPPING = {
"llvm.powi.f32": powi_as_pow,
"llvm.powi.f64": powi_as_pow,
}
AVAILINTR = ()
INTR_TO_CMATH = {
"llvm.pow.f32": "powf",
"llvm.pow.f64": "pow",
"llvm.sin.f32": "sinf",
"llvm.sin.f64": "sin",
"llvm.cos.f32": "cosf",
"llvm.cos.f64": "cos",
"llvm.sqrt.f32": "sqrtf",
"llvm.sqrt.f64": "sqrt",
"llvm.exp.f32": "expf",
"llvm.exp.f64": "exp",
"llvm.log.f32": "logf",
"llvm.log.f64": "log",
"llvm.log10.f32": "log10f",
"llvm.log10.f64": "log10",
"llvm.fabs.f32": "fabsf",
"llvm.fabs.f64": "fabs",
"llvm.floor.f32": "floorf",
"llvm.floor.f64": "floor",
"llvm.ceil.f32": "ceilf",
"llvm.ceil.f64": "ceil",
"llvm.trunc.f32": "truncf",
"llvm.trunc.f64": "trunc",
}
OTHER_CMATHS = '''
tan
tanf
sinh
sinhf
cosh
coshf
tanh
tanhf
asin
asinf
acos
acosf
atan
atanf
atan2
atan2f
asinh
asinhf
acosh
acoshf
atanh
atanhf
expm1
expm1f
log1p
log1pf
log10
log10f
fmod
fmodf
round
roundf
'''.split()
INTR_MATH = frozenset(INTR_TO_CMATH.values()) | frozenset(OTHER_CMATHS)
########NEW FILE########
__FILENAME__ = mathimpl
"""
Provide math calls that uses intrinsics or libc math functions.
"""
from __future__ import print_function, absolute_import, division
import math
import llvm.core as lc
from llvm.core import Type
from numba.targets.imputils import implement, Registry
from numba import types, cgutils, utils
from numba.typing import signature
registry = Registry()
register = registry.register
def unary_math_int_impl(fn, f64impl):
@register
@implement(fn, types.int64)
def s64impl(context, builder, sig, args):
[val] = args
fpval = builder.sitofp(val, Type.double())
sig = signature(types.float64, types.float64)
return f64impl(context, builder, sig, [fpval])
@register
@implement(fn, types.uint64)
def u64impl(context, builder, sig, args):
[val] = args
fpval = builder.uitofp(val, Type.double())
sig = signature(types.float64, types.float64)
return f64impl(context, builder, sig, [fpval])
def unary_math_intr(fn, intrcode):
@register
@implement(fn, types.float32)
def f32impl(context, builder, sig, args):
[val] = args
mod = cgutils.get_module(builder)
lty = context.get_value_type(types.float32)
intr = lc.Function.intrinsic(mod, intrcode, [lty])
return builder.call(intr, args)
@register
@implement(fn, types.float64)
def f64impl(context, builder, sig, args):
[val] = args
mod = cgutils.get_module(builder)
lty = context.get_value_type(types.float64)
intr = lc.Function.intrinsic(mod, intrcode, [lty])
return builder.call(intr, args)
unary_math_int_impl(fn, f64impl)
def unary_math_extern(fn, f32extern, f64extern):
@register
@implement(fn, types.float32)
def f32impl(context, builder, sig, args):
[val] = args
mod = cgutils.get_module(builder)
fnty = Type.function(Type.float(), [Type.float()])
fn = mod.get_or_insert_function(fnty, name=f32extern)
return builder.call(fn, (val,))
@register
@implement(fn, types.float64)
def f64impl(context, builder, sig, args):
[val] = args
mod = cgutils.get_module(builder)
fnty = Type.function(Type.double(), [Type.double()])
fn = mod.get_or_insert_function(fnty, name=f64extern)
return builder.call(fn, (val,))
unary_math_int_impl(fn, f64impl)
unary_math_intr(math.fabs, lc.INTR_FABS)
#unary_math_intr(math.sqrt, lc.INTR_SQRT)
unary_math_intr(math.exp, lc.INTR_EXP)
unary_math_intr(math.log, lc.INTR_LOG)
unary_math_intr(math.log10, lc.INTR_LOG10)
unary_math_intr(math.sin, lc.INTR_SIN)
unary_math_intr(math.cos, lc.INTR_COS)
#unary_math_intr(math.floor, lc.INTR_FLOOR)
#unary_math_intr(math.ceil, lc.INTR_CEIL)
#unary_math_intr(math.trunc, lc.INTR_TRUNC)
unary_math_extern(math.log1p, "log1pf", "log1p")
if utils.PYVERSION > (2, 6):
unary_math_extern(math.expm1, "expm1f", "expm1")
unary_math_extern(math.tan, "tanf", "tan")
unary_math_extern(math.asin, "asinf", "asin")
unary_math_extern(math.acos, "acosf", "acos")
unary_math_extern(math.atan, "atanf", "atan")
unary_math_extern(math.asinh, "asinhf", "asinh")
unary_math_extern(math.acosh, "acoshf", "acosh")
unary_math_extern(math.atanh, "atanhf", "atanh")
unary_math_extern(math.sinh, "sinhf", "sinh")
unary_math_extern(math.cosh, "coshf", "cosh")
unary_math_extern(math.tanh, "tanhf", "tanh")
unary_math_extern(math.ceil, "ceilf", "ceil")
unary_math_extern(math.floor, "floorf", "floor")
unary_math_extern(math.sqrt, "sqrtf", "sqrt")
unary_math_extern(math.trunc, "truncf", "trunc")
@register
@implement(math.isnan, types.float32)
def isnan_f32_impl(context, builder, sig, args):
[val] = args
return builder.not_(builder.fcmp(lc.FCMP_OEQ, val, val))
@register
@implement(math.isnan, types.float64)
def isnan_f64_impl(context, builder, sig, args):
[val] = args
return builder.not_(builder.fcmp(lc.FCMP_OEQ, val, val))
@register
@implement(math.isnan, types.int64)
def isnan_s64_impl(context, builder, sig, args):
return cgutils.false_bit
@register
@implement(math.isnan, types.uint64)
def isnan_u64_impl(context, builder, sig, args):
return cgutils.false_bit
POS_INF_F32 = lc.Constant.real(Type.float(), float("+inf"))
NEG_INF_F32 = lc.Constant.real(Type.float(), float("-inf"))
POS_INF_F64 = lc.Constant.real(Type.double(), float("+inf"))
NEG_INF_F64 = lc.Constant.real(Type.double(), float("-inf"))
@register
@implement(math.isinf, types.float32)
def isinf_f32_impl(context, builder, sig, args):
[val] = args
isposinf = builder.fcmp(lc.FCMP_OEQ, val, POS_INF_F32)
isneginf = builder.fcmp(lc.FCMP_OEQ, val, NEG_INF_F32)
return builder.or_(isposinf, isneginf)
@register
@implement(math.isinf, types.float64)
def isinf_f64_impl(context, builder, sig, args):
[val] = args
isposinf = builder.fcmp(lc.FCMP_OEQ, val, POS_INF_F64)
isneginf = builder.fcmp(lc.FCMP_OEQ, val, NEG_INF_F64)
return builder.or_(isposinf, isneginf)
@register
@implement(math.isinf, types.int64)
def isinf_s64_impl(context, builder, sig, args):
return cgutils.false_bit
@register
@implement(math.isinf, types.uint64)
def isinf_u64_impl(context, builder, sig, args):
return cgutils.false_bit
# -----------------------------------------------------------------------------
@register
@implement(math.atan2, types.int64, types.int64)
def atan2_s64_impl(context, builder, sig, args):
[y, x] = args
y = builder.sitofp(y, Type.double())
x = builder.sitofp(x, Type.double())
fsig = signature(types.float64, types.float64, types.float64)
return atan2_f64_impl(context, builder, fsig, (y, x))
@register
@implement(math.atan2, types.uint64, types.uint64)
def atan2_u64_impl(context, builder, sig, args):
[y, x] = args
y = builder.uitofp(y, Type.double())
x = builder.uitofp(x, Type.double())
fsig = signature(types.float64, types.float64, types.float64)
return atan2_f64_impl(context, builder, fsig, (y, x))
@register
@implement(math.atan2, types.float32, types.float32)
def atan2_f32_impl(context, builder, sig, args):
assert len(args) == 2
mod = cgutils.get_module(builder)
fnty = Type.function(Type.float(), [Type.float(), Type.float()])
fn = mod.get_or_insert_function(fnty, name="atan2f")
return builder.call(fn, args)
@register
@implement(math.atan2, types.float64, types.float64)
def atan2_f64_impl(context, builder, sig, args):
assert len(args) == 2
mod = cgutils.get_module(builder)
fnty = Type.function(Type.double(), [Type.double(), Type.double()])
fn = mod.get_or_insert_function(fnty, name="atan2")
return builder.call(fn, args)
# -----------------------------------------------------------------------------
@register
@implement(math.hypot, types.int64, types.int64)
def hypot_s64_impl(context, builder, sig, args):
[x, y] = args
y = builder.sitofp(y, Type.double())
x = builder.sitofp(x, Type.double())
fsig = signature(types.float64, types.float64, types.float64)
return hypot_f64_impl(context, builder, fsig, (x, y))
@register
@implement(math.hypot, types.uint64, types.uint64)
def hypot_u64_impl(context, builder, sig, args):
[x, y] = args
y = builder.sitofp(y, Type.double())
x = builder.sitofp(x, Type.double())
fsig = signature(types.float64, types.float64, types.float64)
return hypot_f64_impl(context, builder, fsig, (x, y))
@register
@implement(math.hypot, types.float32, types.float32)
def hypot_f32_impl(context, builder, sig, args):
[x, y] = args
xx = builder.fmul(x, x)
yy = builder.fmul(y, y)
sqrtsig = signature(sig.return_type, sig.args[0])
sqrtimp = context.get_function(math.sqrt, sqrtsig)
xxyy = builder.fadd(xx, yy)
return sqrtimp(builder, [xxyy])
@register
@implement(math.hypot, types.float64, types.float64)
def hypot_f64_impl(context, builder, sig, args):
[x, y] = args
xx = builder.fmul(x, x)
yy = builder.fmul(y, y)
sqrtsig = signature(sig.return_type, sig.args[0])
sqrtimp = context.get_function(math.sqrt, sqrtsig)
xxyy = builder.fadd(xx, yy)
return sqrtimp(builder, [xxyy])
# -----------------------------------------------------------------------------
@register
@implement(math.radians, types.float64)
def radians_f64_impl(context, builder, sig, args):
[x] = args
rate = builder.fdiv(x, context.get_constant(types.float64, 360))
pi = context.get_constant(types.float64, math.pi)
two = context.get_constant(types.float64, 2)
twopi = builder.fmul(pi, two)
return builder.fmul(rate, twopi)
@register
@implement(math.radians, types.float32)
def radians_f32_impl(context, builder, sig, args):
[x] = args
rate = builder.fdiv(x, context.get_constant(types.float32, 360))
pi = context.get_constant(types.float32, math.pi)
two = context.get_constant(types.float32, 2)
twopi = builder.fmul(pi, two)
return builder.fmul(rate, twopi)
unary_math_int_impl(math.radians, radians_f64_impl)
# -----------------------------------------------------------------------------
@register
@implement(math.degrees, types.float64)
def degrees_f64_impl(context, builder, sig, args):
[x] = args
full = context.get_constant(types.float64, 360)
pi = context.get_constant(types.float64, math.pi)
two = context.get_constant(types.float64, 2)
twopi = builder.fmul(pi, two)
return builder.fmul(builder.fdiv(x, twopi), full)
@register
@implement(math.degrees, types.float32)
def degrees_f32_impl(context, builder, sig, args):
[x] = args
full = context.get_constant(types.float32, 360)
pi = context.get_constant(types.float32, math.pi)
two = context.get_constant(types.float32, 2)
twopi = builder.fmul(pi, two)
return builder.fdiv(builder.fmul(x, full), twopi)
unary_math_int_impl(math.degrees, degrees_f64_impl)
########NEW FILE########
__FILENAME__ = npyimpl
from __future__ import print_function, division, absolute_import
import numpy
import math
import sys
from llvm.core import Constant, Type, ICMP_UGT
from numba import typing, types, cgutils
from numba.targets.imputils import implement, Registry
from numba import numpy_support
import itertools
registry = Registry()
register = registry.register
class npy:
"""This will be used as an index of the npy_* functions"""
pass
def unary_npy_math_extern(fn):
setattr(npy, fn, fn)
fn_sym = eval("npy."+fn)
@register
@implement(fn_sym, types.int64)
def s64impl(context, builder, sig, args):
[val] = args
fpval = builder.sitofp(val, Type.double())
sig = typing.signature(types.float64, types.float64)
return f64impl(context, builder, sig, [fpval])
@register
@implement(fn_sym, types.uint64)
def u64impl(context, builder, sig, args):
[val] = args
fpval = builder.uitofp(val, Type.double())
sig = typing.signature(types.float64, types.float64)
return f64impl(context, builder, sig, [fpval])
n = "numba.npymath." + fn
@register
@implement(fn_sym, types.float64)
def f64impl(context, builder, sig, args):
[val] = args
mod = cgutils.get_module(builder)
fnty = Type.function(Type.double(), [Type.double()])
fn = mod.get_or_insert_function(fnty, name=n)
return builder.call(fn, (val,))
_externs = [ "exp2", "expm1", "log", "log2", "log10", "log1p", "deg2rad", "rad2deg" ]
for x in _externs:
unary_npy_math_extern(x)
def numpy_unary_ufunc(funckey, asfloat=False, scalar_input=False):
def impl(context, builder, sig, args):
[tyinp, tyout] = sig.args
[inp, out] = args
if isinstance(tyinp, types.Array):
scalar_inp = False
scalar_tyinp = tyinp.dtype
inp_ndim = tyinp.ndim
elif tyinp in types.number_domain:
scalar_inp = True
scalar_tyinp = tyinp
inp_ndim = 1
else:
raise TypeError('unknown type for input operand')
out_ndim = tyout.ndim
if asfloat:
promote_type = types.float64
elif scalar_tyinp in types.real_domain:
promote_type = types.float64
elif scalar_tyinp in types.signed_domain:
promote_type = types.int64
else:
promote_type = types.uint64
result_type = promote_type
# Temporary hack for __ftol2 llvm bug. Don't allow storing
# float results in uint64 array on windows.
if result_type in types.real_domain and \
tyout.dtype is types.uint64 and \
sys.platform.startswith('win32'):
raise TypeError('Cannot store result in uint64 array')
sig = typing.signature(result_type, promote_type)
if not scalar_inp:
iary = context.make_array(tyinp)(context, builder, inp)
oary = context.make_array(tyout)(context, builder, out)
fnwork = context.get_function(funckey, sig)
intpty = context.get_value_type(types.intp)
if not scalar_inp:
inp_shape = cgutils.unpack_tuple(builder, iary.shape, inp_ndim)
inp_strides = cgutils.unpack_tuple(builder, iary.strides, inp_ndim)
inp_data = iary.data
inp_layout = tyinp.layout
out_shape = cgutils.unpack_tuple(builder, oary.shape, out_ndim)
out_strides = cgutils.unpack_tuple(builder, oary.strides, out_ndim)
out_data = oary.data
out_layout = tyout.layout
ZERO = Constant.int(Type.int(intpty.width), 0)
ONE = Constant.int(Type.int(intpty.width), 1)
inp_indices = None
if not scalar_inp:
inp_indices = []
for i in range(inp_ndim):
x = builder.alloca(Type.int(intpty.width))
builder.store(ZERO, x)
inp_indices.append(x)
loopshape = cgutils.unpack_tuple(builder, oary.shape, out_ndim)
with cgutils.loop_nest(builder, loopshape, intp=intpty) as indices:
# Increment input indices.
# Since the output dimensions are already being incremented,
# we'll use that to set the input indices. In order to
# handle broadcasting, any input dimension of size 1 won't be
# incremented.
if not scalar_inp:
bb_inc_inp_index = [cgutils.append_basic_block(builder,
'.inc_inp_index' + str(i)) for i in range(inp_ndim)]
bb_end_inc_index = cgutils.append_basic_block(builder, '.end_inc_index')
builder.branch(bb_inc_inp_index[0])
for i in range(inp_ndim):
with cgutils.goto_block(builder, bb_inc_inp_index[i]):
# If the shape of this dimension is 1, then leave the
# index at 0 so that this dimension is broadcasted over
# the corresponding output dimension.
cond = builder.icmp(ICMP_UGT, inp_shape[i], ONE)
with cgutils.ifthen(builder, cond):
# If number of input dimensions is less than output
# dimensions, the input shape is right justified so
# that last dimension of input shape corresponds to
# last dimension of output shape. Therefore, index
# output dimension starting at offset of diff of
# input and output dimension count.
builder.store(indices[out_ndim-inp_ndim+i], inp_indices[i])
# We have to check if this is last dimension and add
# appropriate block terminator before beginning next
# loop.
if i + 1 == inp_ndim:
builder.branch(bb_end_inc_index)
else:
builder.branch(bb_inc_inp_index[i+1])
builder.position_at_end(bb_end_inc_index)
inds = [builder.load(index) for index in inp_indices]
px = cgutils.get_item_pointer2(builder,
data=inp_data,
shape=inp_shape,
strides=inp_strides,
layout=inp_layout,
inds=inds)
x = builder.load(px)
else:
x = inp
po = cgutils.get_item_pointer2(builder,
data=out_data,
shape=out_shape,
strides=out_strides,
layout=out_layout,
inds=indices)
d_x = context.cast(builder, x, scalar_tyinp, promote_type)
tempres = fnwork(builder, [d_x])
res = context.cast(builder, tempres, result_type, tyout.dtype)
builder.store(res, po)
return out
return impl
def numpy_scalar_unary_ufunc(funckey, asfloat=True):
def impl(context, builder, sig, args):
[tyinp] = sig.args
tyout = sig.return_type
[inp] = args
if asfloat:
sig = typing.signature(types.float64, types.float64)
fnwork = context.get_function(funckey, sig)
if asfloat:
inp = context.cast(builder, inp, tyinp, types.float64)
res = fnwork(builder, [inp])
if asfloat:
res = context.cast(builder, res, types.float64, tyout)
return res
return impl
def register_unary_ufunc(ufunc, operator, asfloat=False):
def unary_ufunc(context, builder, sig, args):
imp = numpy_unary_ufunc(operator, asfloat=asfloat)
return imp(context, builder, sig, args)
def unary_ufunc_scalar_input(context, builder, sig, args):
imp = numpy_unary_ufunc(operator, scalar_input=True, asfloat=asfloat)
return imp(context, builder, sig, args)
def scalar_unary_ufunc(context, builder, sig, args):
imp = numpy_scalar_unary_ufunc(operator, asfloat)
return imp(context, builder, sig, args)
register(implement(ufunc, types.Kind(types.Array),
types.Kind(types.Array))(unary_ufunc))
for ty in types.number_domain:
register(implement(ufunc, ty,
types.Kind(types.Array))(unary_ufunc_scalar_input))
for ty in types.number_domain:
register(implement(ufunc, ty)(scalar_unary_ufunc))
register_unary_ufunc(numpy.exp, math.exp, asfloat=True)
register_unary_ufunc(numpy.exp2, npy.exp2, asfloat=True)
register_unary_ufunc(numpy.expm1, npy.expm1, asfloat=True)
register_unary_ufunc(numpy.log, npy.log, asfloat=True)
register_unary_ufunc(numpy.log2, npy.log2, asfloat=True)
register_unary_ufunc(numpy.log10, npy.log10, asfloat=True)
register_unary_ufunc(numpy.log1p, npy.log1p, asfloat=True)
register_unary_ufunc(numpy.deg2rad, npy.deg2rad, asfloat=True)
register_unary_ufunc(numpy.rad2deg, npy.rad2deg, asfloat=True)
register_unary_ufunc(numpy.sin, math.sin, asfloat=True)
register_unary_ufunc(numpy.cos, math.cos, asfloat=True)
register_unary_ufunc(numpy.tan, math.tan, asfloat=True)
register_unary_ufunc(numpy.sinh, math.sinh, asfloat=True)
register_unary_ufunc(numpy.cosh, math.cosh, asfloat=True)
register_unary_ufunc(numpy.tanh, math.tanh, asfloat=True)
register_unary_ufunc(numpy.arcsin, math.asin, asfloat=True)
register_unary_ufunc(numpy.arccos, math.acos, asfloat=True)
register_unary_ufunc(numpy.arctan, math.atan, asfloat=True)
register_unary_ufunc(numpy.arcsinh, math.asinh, asfloat=True)
register_unary_ufunc(numpy.arccosh, math.acosh, asfloat=True)
register_unary_ufunc(numpy.arctanh, math.atanh, asfloat=True)
register_unary_ufunc(numpy.sqrt, math.sqrt, asfloat=True)
register_unary_ufunc(numpy.floor, math.floor, asfloat=True)
register_unary_ufunc(numpy.ceil, math.ceil, asfloat=True)
register_unary_ufunc(numpy.trunc, math.trunc, asfloat=True)
register_unary_ufunc(numpy.absolute, types.abs_type)
register_unary_ufunc(numpy.sign, types.sign_type)
register_unary_ufunc(numpy.negative, types.neg_type)
def numpy_binary_ufunc(funckey, divbyzero=False, scalar_inputs=False,
asfloat=False):
def impl(context, builder, sig, args):
[tyinp1, tyinp2, tyout] = sig.args
[inp1, inp2, out] = args
if isinstance(tyinp1, types.Array):
scalar_inp1 = False
scalar_tyinp1 = tyinp1.dtype
inp1_ndim = tyinp1.ndim
elif tyinp1 in types.number_domain:
scalar_inp1 = True
scalar_tyinp1 = tyinp1
inp1_ndim = 1
else:
raise TypeError('unknown type for first input operand')
if isinstance(tyinp2, types.Array):
scalar_inp2 = False
scalar_tyinp2 = tyinp2.dtype
inp2_ndim = tyinp2.ndim
elif tyinp2 in types.number_domain:
scalar_inp2 = True
scalar_tyinp2 = tyinp2
inp2_ndim = 1
else:
raise TypeError('unknown type for second input operand')
out_ndim = tyout.ndim
if asfloat:
promote_type = types.float64
elif scalar_tyinp1 in types.real_domain or \
scalar_tyinp2 in types.real_domain:
promote_type = types.float64
elif scalar_tyinp1 in types.signed_domain or \
scalar_tyinp2 in types.signed_domain:
promote_type = types.int64
else:
promote_type = types.uint64
result_type = promote_type
# Temporary hack for __ftol2 llvm bug. Don't allow storing
# float results in uint64 array on windows.
if result_type in types.real_domain and \
tyout.dtype is types.uint64 and \
sys.platform.startswith('win32'):
raise TypeError('Cannot store result in uint64 array')
sig = typing.signature(result_type, promote_type, promote_type)
if not scalar_inp1:
i1ary = context.make_array(tyinp1)(context, builder, inp1)
if not scalar_inp2:
i2ary = context.make_array(tyinp2)(context, builder, inp2)
oary = context.make_array(tyout)(context, builder, out)
fnwork = context.get_function(funckey, sig)
intpty = context.get_value_type(types.intp)
if not scalar_inp1:
inp1_shape = cgutils.unpack_tuple(builder, i1ary.shape, inp1_ndim)
inp1_strides = cgutils.unpack_tuple(builder, i1ary.strides, inp1_ndim)
inp1_data = i1ary.data
inp1_layout = tyinp1.layout
if not scalar_inp2:
inp2_shape = cgutils.unpack_tuple(builder, i2ary.shape, inp2_ndim)
inp2_strides = cgutils.unpack_tuple(builder, i2ary.strides, inp2_ndim)
inp2_data = i2ary.data
inp2_layout = tyinp2.layout
out_shape = cgutils.unpack_tuple(builder, oary.shape, out_ndim)
out_strides = cgutils.unpack_tuple(builder, oary.strides, out_ndim)
out_data = oary.data
out_layout = tyout.layout
ZERO = Constant.int(Type.int(intpty.width), 0)
ONE = Constant.int(Type.int(intpty.width), 1)
inp1_indices = None
if not scalar_inp1:
inp1_indices = []
for i in range(inp1_ndim):
x = builder.alloca(Type.int(intpty.width))
builder.store(ZERO, x)
inp1_indices.append(x)
inp2_indices = None
if not scalar_inp2:
inp2_indices = []
for i in range(inp2_ndim):
x = builder.alloca(Type.int(intpty.width))
builder.store(ZERO, x)
inp2_indices.append(x)
loopshape = cgutils.unpack_tuple(builder, oary.shape, out_ndim)
with cgutils.loop_nest(builder, loopshape, intp=intpty) as indices:
# Increment input indices.
# Since the output dimensions are already being incremented,
# we'll use that to set the input indices. In order to
# handle broadcasting, any input dimension of size 1 won't be
# incremented.
def build_increment_blocks(inp_indices, inp_shape, inp_ndim, inp_num):
bb_inc_inp_index = [cgutils.append_basic_block(builder,
'.inc_inp{0}_index{1}'.format(inp_num, str(i))) for i in range(inp_ndim)]
bb_end_inc_index = cgutils.append_basic_block(builder,
'.end_inc{0}_index'.format(inp_num))
builder.branch(bb_inc_inp_index[0])
for i in range(inp_ndim):
with cgutils.goto_block(builder, bb_inc_inp_index[i]):
# If the shape of this dimension is 1, then leave the
# index at 0 so that this dimension is broadcasted over
# the corresponding input and output dimensions.
cond = builder.icmp(ICMP_UGT, inp_shape[i], ONE)
with cgutils.ifthen(builder, cond):
builder.store(indices[out_ndim-inp_ndim+i], inp_indices[i])
if i + 1 == inp_ndim:
builder.branch(bb_end_inc_index)
else:
builder.branch(bb_inc_inp_index[i+1])
builder.position_at_end(bb_end_inc_index)
if not scalar_inp1:
build_increment_blocks(inp1_indices, inp1_shape, inp1_ndim, '1')
if not scalar_inp2:
build_increment_blocks(inp2_indices, inp2_shape, inp2_ndim, '2')
if scalar_inp1:
x = inp1
else:
inds = [builder.load(index) for index in inp1_indices]
px = cgutils.get_item_pointer2(builder,
data=inp1_data,
shape=inp1_shape,
strides=inp1_strides,
layout=inp1_layout,
inds=inds)
x = builder.load(px)
if scalar_inp2:
y = inp2
else:
inds = [builder.load(index) for index in inp2_indices]
py = cgutils.get_item_pointer2(builder,
data=inp2_data,
shape=inp2_shape,
strides=inp2_strides,
layout=inp2_layout,
inds=inds)
y = builder.load(py)
po = cgutils.get_item_pointer2(builder,
data=out_data,
shape=out_shape,
strides=out_strides,
layout=out_layout,
inds=indices)
if divbyzero:
# Handle division
iszero = cgutils.is_scalar_zero(builder, y)
with cgutils.ifelse(builder, iszero, expect=False) as (then,
orelse):
with then:
# Divide by zero
if (scalar_tyinp1 in types.real_domain or
scalar_tyinp2 in types.real_domain) or \
not numpy_support.int_divbyzero_returns_zero:
# If y is float and is 0 also, return Nan; else
# return Inf
outltype = context.get_data_type(result_type)
shouldretnan = cgutils.is_scalar_zero(builder, x)
nan = Constant.real(outltype, float("nan"))
inf = Constant.real(outltype, float("inf"))
tempres = builder.select(shouldretnan, nan, inf)
res = context.cast(builder, tempres, result_type,
tyout.dtype)
elif tyout.dtype in types.signed_domain and \
not numpy_support.int_divbyzero_returns_zero:
res = Constant.int(context.get_data_type(tyout.dtype),
0x1 << (y.type.width-1))
else:
res = Constant.null(context.get_data_type(tyout.dtype))
assert res.type == po.type.pointee, \
(str(res.type), str(po.type.pointee))
builder.store(res, po)
with orelse:
# Normal
d_x = context.cast(builder, x, scalar_tyinp1, promote_type)
d_y = context.cast(builder, y, scalar_tyinp2, promote_type)
tempres = fnwork(builder, [d_x, d_y])
res = context.cast(builder, tempres, result_type, tyout.dtype)
assert res.type == po.type.pointee, (res.type,
po.type.pointee)
builder.store(res, po)
else:
# Handle non-division operations
d_x = context.cast(builder, x, scalar_tyinp1, promote_type)
d_y = context.cast(builder, y, scalar_tyinp2, promote_type)
tempres = fnwork(builder, [d_x, d_y])
res = context.cast(builder, tempres, result_type, tyout.dtype)
assert res.type == po.type.pointee, (res.type,
po.type.pointee)
builder.store(res, po)
return out
return impl
def register_binary_ufunc(ufunc, operator, asfloat=False, divbyzero=False):
def binary_ufunc(context, builder, sig, args):
imp = numpy_binary_ufunc(operator, asfloat=asfloat, divbyzero=divbyzero)
return imp(context, builder, sig, args)
def binary_ufunc_scalar_inputs(context, builder, sig, args):
imp = numpy_binary_ufunc(operator, scalar_inputs=True, asfloat=asfloat,
divbyzero=divbyzero)
return imp(context, builder, sig, args)
register(implement(ufunc, types.Kind(types.Array), types.Kind(types.Array),
types.Kind(types.Array))(binary_ufunc))
for ty in types.number_domain:
register(implement(ufunc, ty, types.Kind(types.Array),
types.Kind(types.Array))(binary_ufunc_scalar_inputs))
register(implement(ufunc, types.Kind(types.Array), ty,
types.Kind(types.Array))(binary_ufunc_scalar_inputs))
for ty1, ty2 in itertools.product(types.number_domain, types.number_domain):
register(implement(ufunc, ty1, ty2,
types.Kind(types.Array))(binary_ufunc_scalar_inputs))
register_binary_ufunc(numpy.add, '+')
register_binary_ufunc(numpy.subtract, '-')
register_binary_ufunc(numpy.multiply, '*')
register_binary_ufunc(numpy.divide, '/', asfloat=True, divbyzero=True)
register_binary_ufunc(numpy.arctan2, math.atan2, asfloat=True)
########NEW FILE########
__FILENAME__ = options
"""
Target Options
"""
from __future__ import print_function, division, absolute_import
class TargetOptions(object):
OPTIONS = {}
def __init__(self):
self.values = {}
def from_dict(self, dic):
for k, v in dic.items():
try:
ctor = self.OPTIONS[k]
except KeyError:
fmt = "Does not support option: '%s'"
raise KeyError(fmt % k)
else:
self.values[k] = ctor(v)
@classmethod
def parse_as_flags(cls, flags, options):
opt = cls()
opt.from_dict(options)
opt.set_flags(flags)
return flags
def set_flags(self, flags):
"""
Provide default flags setting logic.
Subclass can override.
"""
kws = self.values.copy()
if kws.pop('nopython', False) == False:
flags.set("enable_pyobject")
if kws.pop("forceobj", False) == True:
flags.set("force_pyobject")
if kws.pop('looplift', True) == True:
flags.set("enable_looplift")
if kws:
# Unread options?
raise NameError("Unrecognized options: %s" % kws.keys())
########NEW FILE########
__FILENAME__ = registry
from __future__ import print_function, division, absolute_import
from numba import utils, typing
from numba.targets import cpu
from numba.targets.descriptors import TargetDescriptor
from numba import dispatcher
# -----------------------------------------------------------------------------
# Default CPU target descriptors
class CPUTarget(TargetDescriptor):
options = cpu.CPUTargetOptions
typing_context = typing.Context()
target_context = cpu.CPUContext(typing_context)
class CPUOverloaded(dispatcher.Overloaded):
targetdescr = CPUTarget()
class TargetRegistry(utils.UniqueDict):
"""
Attributes
----------
ondemand:
A dictionary of target-name -> function, where function is executed
the first time a target is used. It is used for deferred
initialization for some targets (e.g. gpu).
"""
def __init__(self, *args, **kws):
super(TargetRegistry, self).__init__(*args, **kws)
self.ondemand = utils.UniqueDict()
def __getitem__(self, item):
if item in self.ondemand:
self[item] = self.ondemand[item]()
del self.ondemand[item]
return super(TargetRegistry, self).__getitem__(item)
target_registry = TargetRegistry()
target_registry['cpu'] = CPUOverloaded
########NEW FILE########
__FILENAME__ = testing
from __future__ import print_function, division, absolute_import
import sys
import contextlib
if sys.version_info[0] >= 3:
from io import StringIO
else:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def discover_tests(startdir):
import numba.unittest_support as unittest
loader = unittest.TestLoader()
suite = loader.discover(startdir)
return suite
def run_tests(suite, descriptions=True, verbosity=2, buffer=True,
failfast=True):
import numba.unittest_support as unittest
runner = unittest.TextTestRunner(descriptions=descriptions,
verbosity=verbosity,
buffer=buffer, failfast=failfast)
result = runner.run(suite)
return result
def test():
suite = discover_tests("numba.tests")
return run_tests(suite).wasSuccessful()
def _flatten_suite(test):
"""Expand suite into list of tests
"""
from numba.unittest_support import TestSuite
if isinstance(test, TestSuite):
tests = []
for x in test:
tests.extend(_flatten_suite(x))
return tests
else:
return [test]
def multitest():
"""
Run tests in multiple processes.
"""
import numba.unittest_support as unittest
import multiprocessing as mp
loader = unittest.TestLoader()
startdir = "numba.tests"
suites = loader.discover(startdir)
tests = _flatten_suite(suites)
# Distribute tests to multiple processes
pool = mp.Pool(processes=mp.cpu_count())
results = pool.imap_unordered(_multiruntest, tests)
errct = 0
for ok, out in results:
if not ok:
print()
print("=== Error ===")
print(out)
errct += 1
else:
print('.', end='')
sys.stdout.flush()
print()
if errct == 0:
print("All passed!")
return True
else:
print("Error %d/%d" % (errct, len(tests)))
return False
def _multiruntest(suite):
import numba.unittest_support as unittest
stream = StringIO()
with contextlib.closing(stream):
runner = unittest.TextTestRunner(descriptions=False, verbosity=3,
buffer=True, stream=stream)
result = runner.run(suite)
return result.wasSuccessful(), stream.getvalue()
########NEW FILE########
__FILENAME__ = compile_with_pycc
from numba import exportmany, export
def mult(a, b):
return a * b
exportmany(['multf f4(f4,f4)', 'multi i4(i4,i4)'])(mult)
# Needs to link to helperlib to due with complex arguments
# export('multc c16(c16,c16)')(mult)
export('mult f8(f8, f8)')(mult)
########NEW FILE########
__FILENAME__ = test_arrayconst
from __future__ import print_function
import numba.unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types
myarray = np.arange(5)
myscalar = np.int32(64)
def use_array_const(i):
return myarray[i]
def use_arrayscalar_const():
return myscalar
class TestConstantArray(unittest.TestCase):
def test_array_const(self):
pyfunc = use_array_const
cres = compile_isolated(pyfunc, (types.int32,))
cfunc = cres.entry_point
for i in [0, 1, 2]:
self.assertEqual(pyfunc(i), cfunc(i))
def test_arrayscalar_const(self):
pyfunc = use_arrayscalar_const
cres = compile_isolated(pyfunc, ())
cfunc = cres.entry_point
self.assertEqual(pyfunc(), cfunc())
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_array_attr
from __future__ import print_function
import numba.unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types
def array_shape(a, i):
return a.shape[i]
def array_strides(a, i):
return a.strides[i]
def array_ndim(a):
return a.ndim
def array_size(a):
return a.size
class TestArrayAttr(unittest.TestCase):
def test_shape(self):
pyfunc = array_shape
cres = compile_isolated(pyfunc, (types.int32[:,:], types.int32))
cfunc = cres.entry_point
a = np.arange(10).reshape(2, 5)
for i in range(a.ndim):
self.assertEqual(pyfunc(a, i), cfunc(a, i))
def test_strides(self):
pyfunc = array_strides
cres = compile_isolated(pyfunc, (types.int32[:,:], types.int32))
cfunc = cres.entry_point
a = np.arange(10).reshape(2, 5)
for i in range(a.ndim):
self.assertEqual(pyfunc(a, i), cfunc(a, i))
def test_ndim(self):
pyfunc = array_ndim
cres = compile_isolated(pyfunc, (types.int32[:,:],))
cfunc = cres.entry_point
a = np.arange(10).reshape(2, 5)
self.assertEqual(pyfunc(a), cfunc(a))
def test_size(self):
pyfunc = array_size
cres = compile_isolated(pyfunc, (types.int32[:,:],))
cfunc = cres.entry_point
a = np.arange(10).reshape(2, 5)
self.assertEqual(pyfunc(a), cfunc(a))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_array_manipulation
from __future__ import print_function
import numba.unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
def reshape_array(a, expected):
return (a.reshape(3, 3) == expected).all()
def flatten_array(a, expected):
return (a.flatten() == expected).all()
def ravel_array(a, expected):
return (a.ravel() == expected).all()
def transpose_array(a, expected):
return (a.transpose() == expected).all()
def squeeze_array(a, expected):
return (a.squeeze() == expected).all()
def convert_array(a, expected):
# astype takes no kws argument in numpy1.6
return (a.astype('f4') == expected).all()
def add_axis1(a, expected):
return np.expand_dims(a, axis=0).shape == expected.shape
def add_axis2(a, expected):
return a[np.newaxis,:].shape == expected.shape
class TestArrayManipulation(unittest.TestCase):
def test_reshape_array(self, flags=enable_pyobj_flags):
pyfunc = reshape_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2),
flags=flags)
cfunc = cr.entry_point
a = np.arange(9)
expected = np.arange(9).reshape(3, 3)
self.assertTrue(cfunc(a, expected))
@unittest.expectedFailure
def test_reshape_array_npm(self):
self.test_reshape_array(flags=no_pyobj_flags)
def test_flatten_array(self, flags=enable_pyobj_flags):
pyfunc = flatten_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2),
flags=flags)
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
expected = np.arange(9).reshape(3, 3).flatten()
self.assertTrue(cfunc(a, expected))
@unittest.expectedFailure
def test_flatten_array_npm(self):
self.test_flatten_array(flags=no_pyobj_flags)
def test_ravel_array(self, flags=enable_pyobj_flags):
pyfunc = ravel_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2),
flags=flags)
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
expected = np.arange(9).reshape(3, 3).ravel()
self.assertTrue(cfunc(a, expected))
@unittest.expectedFailure
def test_ravel_array_npm(self):
self.test_ravel_array(flags=no_pyobj_flags)
def test_transpose_array(self, flags=enable_pyobj_flags):
pyfunc = transpose_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2),
flags=flags)
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
expected = np.arange(9).reshape(3, 3).transpose()
self.assertTrue(cfunc(a, expected))
@unittest.expectedFailure
def test_transpose_array_npm(self):
self.test_transpose_array(flags=no_pyobj_flags)
def test_squeeze_array(self, flags=enable_pyobj_flags):
pyfunc = squeeze_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2),
flags=flags)
cfunc = cr.entry_point
a = np.arange(2*1*3*1*4).reshape(2,1,3,1,4)
expected = np.arange(2*1*3*1*4).reshape(2,1,3,1,4).squeeze()
self.assertTrue(cfunc(a, expected))
@unittest.expectedFailure
def test_squeeze_array_npm(self):
self.test_squeeze_array(flags=no_pyobj_flags)
def test_convert_array(self, flags=enable_pyobj_flags):
pyfunc = convert_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2),
flags=flags)
cfunc = cr.entry_point
a = np.arange(9, dtype='i4')
expected = np.arange(9, dtype='f4')
self.assertTrue(cfunc(a, expected))
@unittest.expectedFailure
def test_convert_array_npm(self):
self.test_convert_array(flags=no_pyobj_flags)
def test_add_axis1(self, flags=enable_pyobj_flags):
pyfunc = add_axis1
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2),
flags=flags)
cfunc = cr.entry_point
a = np.arange(9).reshape(3,3)
expected = np.arange(9).reshape(1,3,3)
self.assertTrue(cfunc(a, expected))
@unittest.expectedFailure
def test_add_axis1_npm(self):
self.test_add_axis1(flags=no_pyobj_flags)
def test_add_axis2(self, flags=enable_pyobj_flags):
pyfunc = add_axis2
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2),
flags=flags)
cfunc = cr.entry_point
a = np.arange(9).reshape(3,3)
expected = np.arange(9).reshape(1,3,3)
self.assertTrue(cfunc(a, expected))
@unittest.expectedFailure
def test_add_axis2_npm(self):
self.test_add_axis2(flags=no_pyobj_flags)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_array_return
from __future__ import print_function, division, absolute_import
import numpy
from numba.compiler import compile_isolated
from numba import typeof
from numba import unittest_support as unittest
def array_return(a, i):
a[i] = 123
return a
class TestArrayReturn(unittest.TestCase):
def test_array_return(self):
a = numpy.arange(10)
i = 2
at, it = typeof(a), typeof(i)
cres = compile_isolated(array_return, (at, it))
cfunc = cres.entry_point
self.assertIs(a, cfunc(a, i))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_blackscholes
from __future__ import print_function
import numpy as np
import math
import numba.unittest_support as unittest
from timeit import default_timer as timer
from numba.compiler import compile_isolated, compile_extra, Flags
from numba import types, typing
RISKFREE = 0.02
VOLATILITY = 0.30
A1 = 0.31938153
A2 = -0.356563782
A3 = 1.781477937
A4 = -1.821255978
A5 = 1.330274429
RSQRT2PI = 0.39894228040143267793994605993438
def cnd_array(d):
K = 1.0 / (1.0 + 0.2316419 * np.abs(d))
ret_val = (RSQRT2PI * np.exp(-0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))
return np.where(d > 0, 1.0 - ret_val, ret_val)
def cnd(d):
K = 1.0 / (1.0 + 0.2316419 * math.fabs(d))
ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))
if d > 0:
ret_val = 1.0 - ret_val
return ret_val
def blackscholes_arrayexpr(stockPrice, optionStrike, optionYears, Riskfree,
Volatility):
S = stockPrice
X = optionStrike
T = optionYears
R = Riskfree
V = Volatility
sqrtT = np.sqrt(T)
d1 = (np.log(S / X) + (R + 0.5 * V * V) * T) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd_array(d1)
cndd2 = cnd_array(d2)
expRT = np.exp(- R * T)
callResult = (S * cndd1 - X * expRT * cndd2)
putResult = (X * expRT * (1.0 - cndd2) - S * (1.0 - cndd1))
return callResult, putResult
def blackscholes_arrayexpr_jitted(stockPrice, optionStrike, optionYears,
Riskfree, Volatility):
S = stockPrice
X = optionStrike
T = optionYears
R = Riskfree
V = Volatility
sqrtT = np.sqrt(T)
d1 = (np.log(S / X) + (R + 0.5 * V * V) * T) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd_array_jitted(d1)
cndd2 = cnd_array_jitted(d2)
expRT = np.exp(- R * T)
callResult = (S * cndd1 - X * expRT * cndd2)
putResult = (X * expRT * (1.0 - cndd2) - S * (1.0 - cndd1))
return callResult, putResult
def blackscholes_scalar(callResult, putResult, stockPrice, optionStrike,
optionYears, Riskfree, Volatility):
S = stockPrice
X = optionStrike
T = optionYears
R = Riskfree
V = Volatility
for i in range(len(S)):
sqrtT = math.sqrt(T[i])
d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd(d1)
cndd2 = cnd(d2)
expRT = math.exp((-1. * R) * T[i])
callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))
def blackscholes_scalar_jitted(callResult, putResult, stockPrice, optionStrike,
optionYears, Riskfree, Volatility):
S = stockPrice
X = optionStrike
T = optionYears
R = Riskfree
V = Volatility
for i in range(len(S)):
sqrtT = math.sqrt(T[i])
d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd_jitted(d1)
cndd2 = cnd_jitted(d2)
expRT = math.exp((-1. * R) * T[i])
callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))
def randfloat(rand_var, low, high):
return (1.0 - rand_var) * low + rand_var * high
class TestBlackScholes(unittest.TestCase):
def test_array_expr(self):
flags = Flags()
flags.set("enable_pyobject")
global cnd_array_jitted
cr1 = compile_isolated(cnd_array, args=(), flags=flags)
cnd_array_jitted = cr1.entry_point
cr2 = compile_isolated(blackscholes_arrayexpr_jitted, args=(),
flags=flags)
jitted_bs = cr2.entry_point
OPT_N = 400
iterations = 10
stockPrice = randfloat(np.random.random(OPT_N), 5.0, 30.0)
optionStrike = randfloat(np.random.random(OPT_N), 1.0, 100.0)
optionYears = randfloat(np.random.random(OPT_N), 0.25, 10.0)
args = stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY
ts = timer()
for i in range(iterations):
callResultGold, putResultGold = blackscholes_arrayexpr(*args)
te = timer()
pytime = te - ts
ts = timer()
for i in range(iterations):
callResultNumba, putResultNumba = jitted_bs(*args)
te = timer()
jittime = te - ts
print("Python", pytime)
print("Numba", jittime)
print("Speedup: %s" % (pytime / jittime))
delta = np.abs(callResultGold - callResultNumba)
L1norm = delta.sum() / np.abs(callResultGold).sum()
print("L1 norm: %E" % L1norm)
print("Max absolute error: %E" % delta.max())
self.assertEqual(delta.max(), 0)
def test_scalar(self):
flags = Flags()
global cnd_jitted
cr1 = compile_isolated(cnd, (types.float64,))
cnd_jitted = cr1.entry_point
tyctx = cr1.typing_context
ctx = cr1.target_context
ctx.dynamic_map_function(cnd_jitted)
tyctx.insert_user_function(cnd_jitted,
ctx.get_user_function(cnd_jitted))
array = types.Array(types.float64, 1, 'C')
argtys = (array,) * 5 + (types.float64, types.float64)
cr2 = compile_extra(tyctx, ctx, blackscholes_scalar_jitted,
args=argtys, return_type=None, flags=flags,
locals={})
jitted_bs = cr2.entry_point
OPT_N = 400
iterations = 10
callResultGold = np.zeros(OPT_N)
putResultGold = np.zeros(OPT_N)
callResultNumba = np.zeros(OPT_N)
putResultNumba = np.zeros(OPT_N)
stockPrice = randfloat(np.random.random(OPT_N), 5.0, 30.0)
optionStrike = randfloat(np.random.random(OPT_N), 1.0, 100.0)
optionYears = randfloat(np.random.random(OPT_N), 0.25, 10.0)
args = stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY
ts = timer()
for i in range(iterations):
blackscholes_scalar(callResultGold, putResultGold, *args)
te = timer()
pytime = te - ts
ts = timer()
for i in range(iterations):
jitted_bs(callResultNumba, putResultNumba, *args)
te = timer()
jittime = te - ts
print("Python", pytime)
print("Numba", jittime)
print("Speedup: %s" % (pytime / jittime))
delta = np.abs(callResultGold - callResultNumba)
L1norm = delta.sum() / np.abs(callResultGold).sum()
print("L1 norm: %E" % L1norm)
print("Max absolute error: %E" % delta.max())
self.assertAlmostEqual(delta.max(), 0)
if __name__ == "__main__":
unittest.main()
########NEW FILE########
__FILENAME__ = test_bubblesort
from __future__ import print_function
import numpy
from numba import unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
def bubblesort(X):
N = X.shape[0]
for end in range(N, 1, -1):
for i in range(end - 1):
cur = X[i]
if cur > X[i + 1]:
tmp = X[i]
X[i] = X[i + 1]
X[i + 1] = tmp
class TestBubbleSort(unittest.TestCase):
def test_bubblesort(self):
pyfunc = bubblesort
aryty = types.Array(dtype=types.int64, ndim=1, layout='C')
cr = compile_isolated(pyfunc, (aryty,))
cfunc = cr.entry_point
array = numpy.array(list(reversed(range(8))), dtype="int64")
control = array.copy()
cfunc(array)
pyfunc(control)
self.assertTrue((array == control).all())
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_builtins
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types, utils
import itertools
import functools
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
def abs_usecase(x):
return abs(x)
def all_usecase(x, y):
if x == None and y == None:
return all([])
elif x == None:
return all([y])
elif y == None:
return all([x])
else:
return all([x, y])
def any_usecase(x, y):
if x == None and y == None:
return any([])
elif x == None:
return any([y])
elif y == None:
return any([x])
else:
return any([x, y])
def bool_usecase(x):
return bool(x)
def chr_usecase(x):
return chr(x)
def cmp_usecase(x, y):
return cmp(x, y)
def complex_usecase(x, y):
return complex(x, y)
def enumerate_usecase():
result = 0
for i, j in enumerate([1,2,3]):
result += i * j
return result
def filter_usecase(x, filter_func):
return filter(filter_func, x)
def float_usecase(x):
return float(x)
def format_usecase(x, y):
return x.format(y)
def hex_usecase(x):
return hex(x)
def int_usecase(x, base):
return int(x, base=base)
def long_usecase(x, base):
return long(x, base=base)
def map_usecase(x, map_func):
return map(map_func, x)
def max_usecase1(x, y):
return max(x, y)
def max_usecase2(x, y):
return max([x, y])
def min_usecase1(x, y):
return min(x, y)
def min_usecase2(x, y):
return min([x, y])
def oct_usecase(x):
return oct(x)
def ord_usecase(x):
return ord(x)
def reduce_usecase(reduce_func, x):
return functools.reduce(reduce_func, x)
def round_usecase(x):
return round(x)
def sum_usecase(x):
return sum(x)
def unichr_usecase(x):
return unichr(x)
class TestBuiltins(unittest.TestCase):
def test_abs(self, flags=enable_pyobj_flags):
pyfunc = abs_usecase
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertEqual(cfunc(x), pyfunc(x))
cr = compile_isolated(pyfunc, (types.float32,), flags=flags)
cfunc = cr.entry_point
for x in [-1.1, 0.0, 1.1]:
self.assertAlmostEqual(cfunc(x), pyfunc(x))
def test_abs_npm(self):
self.test_abs(flags=no_pyobj_flags)
def test_all(self, flags=enable_pyobj_flags):
pyfunc = all_usecase
cr = compile_isolated(pyfunc, (types.int32,types.int32), flags=flags)
cfunc = cr.entry_point
x_operands = [-1, 0, 1, None]
y_operands = [-1, 0, 1, None]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
@unittest.expectedFailure
def test_all_npm(self):
self.test_all(flags=no_pyobj_flags)
def test_any(self, flags=enable_pyobj_flags):
pyfunc = any_usecase
cr = compile_isolated(pyfunc, (types.int32,types.int32), flags=flags)
cfunc = cr.entry_point
x_operands = [-1, 0, 1, None]
y_operands = [-1, 0, 1, None]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
@unittest.expectedFailure
def test_any_npm(self):
self.test_any(flags=no_pyobj_flags)
def test_bool(self, flags=enable_pyobj_flags):
pyfunc = bool_usecase
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertEqual(cfunc(x), pyfunc(x))
def test_bool_npm(self):
self.test_bool(flags=no_pyobj_flags)
def test_bool_nonnumber(self, flags=enable_pyobj_flags):
pyfunc = bool_usecase
cr = compile_isolated(pyfunc, (types.string,), flags=flags)
cfunc = cr.entry_point
for x in ['x', '']:
self.assertEqual(cfunc(x), pyfunc(x))
cr = compile_isolated(pyfunc, (types.Dummy('list'),), flags=flags)
cfunc = cr.entry_point
for x in [[1], []]:
self.assertEqual(cfunc(x), pyfunc(x))
@unittest.expectedFailure
def test_bool_nonnumber_npm(self):
self.test_bool_nonnumber(flags=no_pyobj_flags)
def test_chr(self, flags=enable_pyobj_flags):
pyfunc = chr_usecase
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in range(256):
self.assertEqual(cfunc(x), pyfunc(x))
@unittest.expectedFailure
def test_chr_npm(self):
self.test_chr(flags=no_pyobj_flags)
@unittest.skipIf(utils.IS_PY3, "cmp not available as global is Py3")
def test_cmp(self, flags=enable_pyobj_flags):
pyfunc = cmp_usecase
cr = compile_isolated(pyfunc, (types.int32, types.int32), flags=flags)
cfunc = cr.entry_point
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
@unittest.skipIf(utils.IS_PY3, "cmp not available as global is Py3")
@unittest.expectedFailure
def test_cmp_npm(self):
self.test_cmp(flags=no_pyobj_flags)
def test_complex(self, flags=enable_pyobj_flags):
pyfunc = complex_usecase
cr = compile_isolated(pyfunc, (types.int32, types.int32), flags=flags)
cfunc = cr.entry_point
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
def test_complex_npm(self):
self.test_complex(flags=no_pyobj_flags)
def test_enumerate(self, flags=enable_pyobj_flags):
pyfunc = enumerate_usecase
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
self.assertEqual(cfunc(), pyfunc())
@unittest.expectedFailure
def test_enumerate_npm(self):
self.test_enumerate(flags=no_pyobj_flags)
def test_filter(self, flags=enable_pyobj_flags):
pyfunc = filter_usecase
cr = compile_isolated(pyfunc, (types.Dummy('list'),
types.Dummy('function_ptr')),
flags=flags)
cfunc = cr.entry_point
filter_func = lambda x: x % 2
x = [0, 1, 2, 3, 4]
self.assertSequenceEqual(list(cfunc(x, filter_func)),
list(pyfunc(x, filter_func)))
@unittest.expectedFailure
def test_filter_npm(self):
self.test_filter(flags=no_pyobj_flags)
def test_float(self, flags=enable_pyobj_flags):
pyfunc = float_usecase
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertAlmostEqual(cfunc(x), pyfunc(x))
cr = compile_isolated(pyfunc, (types.float32,), flags=flags)
cfunc = cr.entry_point
for x in [-1.1, 0.0, 1.1]:
self.assertAlmostEqual(cfunc(x), pyfunc(x))
cr = compile_isolated(pyfunc, (types.string,), flags=flags)
cfunc = cr.entry_point
for x in ['-1.1', '0.0', '1.1']:
self.assertAlmostEqual(cfunc(x), pyfunc(x))
@unittest.expectedFailure
def test_float_npm(self):
self.test_float(flags=no_pyobj_flags)
def test_format(self, flags=enable_pyobj_flags):
pyfunc = format_usecase
cr = compile_isolated(pyfunc, (types.string,types.int32,), flags=flags)
cfunc = cr.entry_point
x = '{0}'
for y in [-1, 0, 1]:
self.assertAlmostEqual(cfunc(x, y), pyfunc(x, y))
cr = compile_isolated(pyfunc, (types.string,
types.float32,), flags=flags)
cfunc = cr.entry_point
x = '{0}'
for y in [-1.1, 0.0, 1.1]:
self.assertAlmostEqual(cfunc(x, y), pyfunc(x, y))
cr = compile_isolated(pyfunc, (types.string,
types.string,), flags=flags)
cfunc = cr.entry_point
x = '{0}'
for y in ['a', 'b', 'c']:
self.assertAlmostEqual(cfunc(x, y), pyfunc(x, y))
@unittest.expectedFailure
def test_format_npm(self):
self.test_format(flags=no_pyobj_flags)
def test_hex(self, flags=enable_pyobj_flags):
pyfunc = hex_usecase
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertEqual(cfunc(x), pyfunc(x))
@unittest.expectedFailure
def test_hex_npm(self):
self.test_hex(flags=no_pyobj_flags)
def test_int(self, flags=enable_pyobj_flags):
pyfunc = int_usecase
cr = compile_isolated(pyfunc, (types.string, types.int32), flags=flags)
cfunc = cr.entry_point
x_operands = ['-1', '0', '1', '10']
y_operands = [2, 8, 10, 16]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
@unittest.expectedFailure
def test_int_npm(self):
self.test_int(flags=no_pyobj_flags)
@unittest.skipIf(utils.IS_PY3, "long is not available as global is Py3")
def test_long(self, flags=enable_pyobj_flags):
pyfunc = long_usecase
cr = compile_isolated(pyfunc, (types.string, types.int64), flags=flags)
cfunc = cr.entry_point
x_operands = ['-1', '0', '1', '10']
y_operands = [2, 8, 10, 16]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
@unittest.skipIf(utils.IS_PY3, "cmp not available as global is Py3")
@unittest.expectedFailure
def test_long_npm(self):
self.test_long(flags=no_pyobj_flags)
def test_map(self, flags=enable_pyobj_flags):
pyfunc = map_usecase
cr = compile_isolated(pyfunc, (types.Dummy('list'),
types.Dummy('function_ptr')),
flags=flags)
cfunc = cr.entry_point
map_func = lambda x: x * 2
x = [0, 1, 2, 3, 4]
self.assertSequenceEqual(list(cfunc(x, map_func)),
list(pyfunc(x, map_func)))
@unittest.expectedFailure
def test_map_npm(self):
self.test_map(flags=no_pyobj_flags)
def test_max_1(self, flags=enable_pyobj_flags):
pyfunc = max_usecase1
cr = compile_isolated(pyfunc, (types.int32, types.int32), flags=flags)
cfunc = cr.entry_point
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
def test_max_2(self, flags=enable_pyobj_flags):
pyfunc = max_usecase2
cr = compile_isolated(pyfunc, (types.int32, types.int32), flags=flags)
cfunc = cr.entry_point
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
def test_max_npm_1(self):
self.test_max_1(flags=no_pyobj_flags)
@unittest.expectedFailure
def test_max_npm_2(self):
self.test_max_2(flags=no_pyobj_flags)
def test_min_1(self, flags=enable_pyobj_flags):
pyfunc = min_usecase1
cr = compile_isolated(pyfunc, (types.int32, types.int32), flags=flags)
cfunc = cr.entry_point
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
def test_min_2(self, flags=enable_pyobj_flags):
pyfunc = min_usecase2
cr = compile_isolated(pyfunc, (types.int32, types.int32), flags=flags)
cfunc = cr.entry_point
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
for x, y in itertools.product(x_operands, y_operands):
self.assertEqual(cfunc(x, y), pyfunc(x, y))
def test_min_npm_1(self):
self.test_min_1(flags=no_pyobj_flags)
@unittest.expectedFailure
def test_min_npm_2(self):
self.test_min_2(flags=no_pyobj_flags)
def test_oct(self, flags=enable_pyobj_flags):
pyfunc = oct_usecase
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-8, -1, 0, 1, 8]:
self.assertEqual(cfunc(x), pyfunc(x))
@unittest.expectedFailure
def test_oct_npm(self):
self.test_oct(flags=no_pyobj_flags)
def test_ord(self, flags=enable_pyobj_flags):
pyfunc = ord_usecase
cr = compile_isolated(pyfunc, (types.string,), flags=flags)
cfunc = cr.entry_point
for x in ['a', u'\u2020']:
self.assertEqual(cfunc(x), pyfunc(x))
@unittest.expectedFailure
def test_ord_npm(self):
self.test_ord(flags=no_pyobj_flags)
def test_reduce(self, flags=enable_pyobj_flags):
pyfunc = reduce_usecase
cr = compile_isolated(pyfunc, (types.Dummy('function_ptr'),
types.Dummy('list')),
flags=flags)
cfunc = cr.entry_point
reduce_func = lambda x, y: x + y
x = range(10)
self.assertEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x))
x = [x + x/10.0 for x in range(10)]
self.assertEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x))
x = [complex(x, x) for x in range(10)]
self.assertEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x))
@unittest.expectedFailure
def test_reduce_npm(self):
self.test_reduce(flags=no_pyobj_flags)
def test_round(self, flags=enable_pyobj_flags):
pyfunc = round_usecase
cr = compile_isolated(pyfunc, (types.float64,), flags=flags)
cfunc = cr.entry_point
for x in [-0.5, -0.1, 0.0, 0.1, 0.5, 1.5, 5.0]:
self.assertEqual(cfunc(x), pyfunc(x))
def test_round_npm(self):
self.test_round(flags=no_pyobj_flags)
def test_sum(self, flags=enable_pyobj_flags):
pyfunc = sum_usecase
cr = compile_isolated(pyfunc, (types.Dummy('list'),), flags=flags)
cfunc = cr.entry_point
x = range(10)
self.assertEqual(cfunc(x), pyfunc(x))
x = [x + x/10.0 for x in range(10)]
self.assertEqual(cfunc(x), pyfunc(x))
x = [complex(x, x) for x in range(10)]
self.assertEqual(cfunc(x), pyfunc(x))
@unittest.expectedFailure
def test_sum_npm(self):
self.test_sum(flags=no_pyobj_flags)
@unittest.skipIf(utils.IS_PY3, "cmp not available as global is Py3")
def test_unichr(self, flags=enable_pyobj_flags):
pyfunc = unichr_usecase
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in range(0, 1000, 10):
self.assertEqual(cfunc(x), pyfunc(x))
@unittest.skipIf(utils.IS_PY3, "cmp not available as global is Py3")
@unittest.expectedFailure
def test_unichr_npm(self):
self.test_unichr(flags=no_pyobj_flags)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_casting
import unittest
from numba.compiler import compile_isolated
from numba import types
import struct
def float_to_int(x):
return types.int32(x)
def int_to_float(x):
return types.float64(x) / 2
def float_to_unsigned(x):
return types.uint32(x)
def float_to_complex(x):
return types.complex128(x)
class TestCasting(unittest.TestCase):
def test_float_to_int(self):
pyfunc = float_to_int
cr = compile_isolated(pyfunc, [types.float32])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.int32)
self.assertEqual(cfunc(12.3), pyfunc(12.3))
self.assertEqual(cfunc(12.3), int(12.3))
def test_int_to_float(self):
pyfunc = int_to_float
cr = compile_isolated(pyfunc, [types.int64])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.float64)
self.assertEqual(cfunc(321), pyfunc(321))
self.assertEqual(cfunc(321), 321./2)
def test_float_to_unsigned(self):
pyfunc = float_to_unsigned
cr = compile_isolated(pyfunc, [types.float32])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.uint32)
self.assertEqual(cfunc(-3.21), pyfunc(-3.21))
self.assertEqual(cfunc(-3.21), struct.unpack('I', struct.pack('i',
-3))[0])
def test_float_to_complex(self):
pyfunc = float_to_complex
cr = compile_isolated(pyfunc, [types.float64])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.complex128)
self.assertEqual(cfunc(-3.21), pyfunc(-3.21))
self.assertEqual(cfunc(-3.21), -3.21 + 0j)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_cffi
from __future__ import print_function, division, absolute_import
from numba import unittest_support as unittest
from numba import cffi_support, types
from numba.compiler import compile_isolated
if cffi_support.SUPPORTED:
from cffi import FFI
ffi = FFI()
ffi.cdef("""
double sin(double x);
""")
C = ffi.dlopen(None) # loads the entire C namespace
c_sin = C.sin
def use_cffi_sin(x):
return c_sin(x)
@unittest.skipIf(not cffi_support.SUPPORTED, "CFFI not supported")
class TestCFFI(unittest.TestCase):
def test_cffi_sin_function(self):
signature = cffi_support.map_type(ffi.typeof(c_sin))
self.assertEqual(len(signature.args), 1)
self.assertEqual(signature.args[0], types.double)
pyfunc = use_cffi_sin
cres = compile_isolated(pyfunc, [types.double])
cfunc = cres.entry_point
for x in [-1.2, -1, 0, 0.1]:
self.assertEqual(pyfunc(x), cfunc(x))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_complex_array
from __future__ import print_function, absolute_import
import numba.unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
def copy(a, b):
for i in range(a.shape[0]):
b[i] = a[i]
class TestArray(unittest.TestCase):
def test_copy_complex64(self):
pyfunc = copy
carray = types.Array(types.complex64, 1, "C")
cres = compile_isolated(pyfunc, (carray, carray))
cfunc = cres.entry_point
a = np.arange(10, dtype="complex64") + 1j
control = np.zeros_like(a)
result = np.zeros_like(a)
pyfunc(a, control)
cfunc(a, result)
self.assertTrue(np.all(control == result))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_conversion
from __future__ import print_function
import itertools
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
def identity(x):
return x
def addition(x, y):
return x + y
def equality(x, y):
return x == y
class TestConversion(unittest.TestCase):
"""
Testing Python to Native conversion
"""
def test_complex_identity(self):
pyfunc = identity
cres = compile_isolated(pyfunc, [types.complex64],
return_type=types.complex64)
xs = [1.0j, (1+1j), (-1-1j), (1+0j)]
for x in xs:
self.assertEqual(cres.entry_point(x=x), x)
cres = compile_isolated(pyfunc, [types.complex128],
return_type=types.complex128)
xs = [1.0j, (1+1j), (-1-1j), (1+0j)]
for x in xs:
self.assertEqual(cres.entry_point(x=x), x)
def test_complex_addition(self):
pyfunc = addition
cres = compile_isolated(pyfunc, [types.complex64, types.complex64],
return_type=types.complex64)
xs = [1.0j, (1+1j), (-1-1j), (1+0j)]
for x in xs:
y = x
self.assertEqual(cres.entry_point(x, y), x + y)
cres = compile_isolated(pyfunc, [types.complex128, types.complex128],
return_type=types.complex128)
xs = [1.0j, (1+1j), (-1-1j), (1+0j)]
for x in xs:
y = x
self.assertEqual(cres.entry_point(x, y), x + y)
def test_boolean_as_int(self):
pyfunc = equality
cres = compile_isolated(pyfunc, [types.boolean, types.intp])
cfunc = cres.entry_point
xs = True, False
ys = -1, 0, 1
for xs, ys in itertools.product(xs, ys):
self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys))
def test_boolean_as_float(self):
pyfunc = equality
cres = compile_isolated(pyfunc, [types.boolean, types.float64])
cfunc = cres.entry_point
xs = True, False
ys = -1, 0, 1
for xs, ys in itertools.product(xs, ys):
self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys))
def test_boolean_eq_boolean(self):
pyfunc = equality
cres = compile_isolated(pyfunc, [types.boolean, types.boolean])
cfunc = cres.entry_point
xs = True, False
ys = True, False
for xs, ys in itertools.product(xs, ys):
self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_create_arrays
from __future__ import print_function
import numba.unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
def create_array(control):
return (np.array([1,2,3]) == control).all()
def create_empty_array(control):
return (np.array([]) == control).all()
def create_arange(control):
return (np.arange(10) == control).all()
def create_empty(control):
my = np.empty(10)
return (my.shape == control.shape and my.strides == control.strides and
my.dtype == control.dtype)
def create_ones(control):
return (np.ones(10) == control).all()
def create_zeros(control):
return (np.zeros(10) == control).all()
class TestArray(unittest.TestCase):
def test_create_arrays(self, flags=enable_pyobj_flags):
pyfunc = create_array
arraytype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype,),
flags=flags)
cfunc = cr.entry_point
control = np.array([1,2,3])
self.assertTrue(cfunc(control))
@unittest.expectedFailure
def test_create_arrays_npm(self):
self.test_create_arrays(flags=Noflags)
def test_create_empty_array(self, flags=enable_pyobj_flags):
pyfunc = create_empty_array
arraytype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype,),
flags=flags)
cfunc = cr.entry_point
control = np.array([])
self.assertTrue(cfunc(control))
@unittest.expectedFailure
def test_create_empty_array_npm(self):
self.test_create_empty_array(flags=Noflags)
def test_create_arange(self, flags=enable_pyobj_flags):
pyfunc = create_arange
arraytype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype,),
flags=flags)
cfunc = cr.entry_point
control = np.arange(10)
self.assertTrue(cfunc(control))
@unittest.expectedFailure
def test_create_arange_npm(self):
self.test_create_arange(flags=Noflags)
def test_create_empty(self, flags=enable_pyobj_flags):
pyfunc = create_empty
arraytype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype,),
flags=flags)
cfunc = cr.entry_point
control = np.empty(10)
self.assertTrue(cfunc(control))
@unittest.expectedFailure
def test_create_empty_npm(self):
self.test_create_empty(flags=Noflags)
def test_create_ones(self, flags=enable_pyobj_flags):
pyfunc = create_ones
arraytype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype,),
flags=flags)
cfunc = cr.entry_point
control = np.ones(10)
self.assertTrue(cfunc(control))
@unittest.expectedFailure
def test_create_ones_npm(self):
self.test_create_ones(flags=Noflags)
def test_create_zeros(self, flags=enable_pyobj_flags):
pyfunc = create_zeros
arraytype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype,),
flags=flags)
cfunc = cr.entry_point
control = np.zeros(10)
self.assertTrue(cfunc(control))
@unittest.expectedFailure
def test_create_zeros_npm(self):
self.test_create_zeros(flags=Noflags)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_ctypes
from __future__ import print_function, absolute_import, division
from ctypes import *
import sys
from numba import unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
is_windows = sys.platform.startswith('win32')
if not is_windows:
proc = CDLL(None)
c_sin = proc.sin
c_sin.argtypes = [c_double]
c_sin.restype = c_double
def use_c_sin(x):
return c_sin(x)
ctype_wrapping = CFUNCTYPE(c_double, c_double)(use_c_sin)
def use_ctype_wrapping(x):
return ctype_wrapping(x)
savethread = pythonapi.PyEval_SaveThread
savethread.argtypes = []
savethread.restype = c_void_p
restorethread = pythonapi.PyEval_RestoreThread
restorethread.argtypes = [c_void_p]
restorethread.restype = None
def use_c_pointer(x):
"""
Running in Python will cause a segfault.
"""
threadstate = savethread()
x += 1
restorethread(threadstate)
return x
@unittest.skipIf(is_windows, "Test not supported on windows")
class TestCTypes(unittest.TestCase):
def test_c_sin(self):
pyfunc = use_c_sin
cres = compile_isolated(pyfunc, [types.double])
cfunc = cres.entry_point
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
def test_ctype_wrapping(self):
pyfunc = use_ctype_wrapping
cres = compile_isolated(pyfunc, [types.double])
cfunc = cres.entry_point
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
def test_ctype_voidptr(self):
pyfunc = use_c_pointer
# pyfunc will segfault if called
cres = compile_isolated(pyfunc, [types.int32])
cfunc = cres.entry_point
x = 123
self.assertTrue(cfunc(x), x + 1)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_dataflow
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def assignments(a):
b = c = str(a)
return b + c
def assignments2(a):
b = c = d = str(a)
return b + c + d
class TestDataFlow(unittest.TestCase):
def test_assignments(self, flags=force_pyobj_flags):
pyfunc = assignments
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertEqual(pyfunc(x), cfunc(x))
def test_assignments2(self, flags=force_pyobj_flags):
pyfunc = assignments2
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertEqual(pyfunc(x), cfunc(x))
if flags is force_pyobj_flags:
cfunc("a")
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_deprecations
from __future__ import print_function, absolute_import
import warnings
from numba import jit, autojit, vectorize
import numba.unittest_support as unittest
def dummy(): pass
def stub_vec(a):
return a
class TestDeprecation(unittest.TestCase):
def test_autojit(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
autojit(dummy)
self.assertEqual(len(w), 1)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_dispatcher
from __future__ import print_function, division, absolute_import
from numba import unittest_support as unittest
from numba.special import typeof
from numba import vectorize, types, jit
import numpy
def dummy(x):
return x
class TestDispatcher(unittest.TestCase):
def test_typeof(self):
self.assertEqual(typeof(numpy.int8(1)), types.int8)
self.assertEqual(typeof(numpy.uint16(1)), types.uint16)
self.assertEqual(typeof(numpy.float64(1)), types.float64)
self.assertEqual(typeof(numpy.complex128(1)), types.complex128)
def test_numba_interface(self):
"""
Check that vectorize can accept a decorated object.
"""
vectorize('f8(f8)')(jit(dummy))
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_dummyarray
from __future__ import print_function
import numba.unittest_support as unittest
import itertools
import numpy as np
from numba.dummyarray import Array
class TestSlicing(unittest.TestCase):
def assertSameContig(self, arr, nparr):
attrs = 'C_CONTIGUOUS', 'F_CONTIGUOUS'
for attr in attrs:
if arr.flags[attr] != nparr.flags[attr]:
if arr.size == 0 and nparr.size == 0:
# numpy <=1.7 bug that some empty array are contiguous and
# some are not
pass
else:
self.fail("contiguous flag mismatch:\ngot=%s\nexpect=%s" %
(arr.flags, nparr.flags))
#### 1D
def test_slice0_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
self.assertSameContig(arr, nparr)
xx = -2, -1, 0, 1, 2
for x in xx:
expect = nparr[x:]
got = arr[x:]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_slice1_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 0, 1, 2
for x in xx:
expect = nparr[:x]
got = arr[:x]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_slice2_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 0, 1, 2
for x, y in itertools.product(xx, xx):
expect = nparr[x:y]
got = arr[x:y]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
#### 2D
def test_slice0_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 0, 1, 2
for x in xx:
expect = nparr[x:]
got = arr[x:]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
for x, y in itertools.product(xx, xx):
expect = nparr[x:, y:]
got = arr[x:, y:]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
def test_slice1_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 0, 1, 2
for x in xx:
expect = nparr[:x]
got = arr[:x]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
self.assertSameContig(got, expect)
for x, y in itertools.product(xx, xx):
expect = nparr[:x, :y]
got = arr[:x, :y]
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
self.assertSameContig(got, expect)
def test_slice2_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
xx = -2, -1, 0, 1, 2
for s, t, u, v in itertools.product(xx, xx, xx, xx):
expect = nparr[s:t, u:v]
got = arr[s:t, u:v]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
for x, y in itertools.product(xx, xx):
expect = nparr[s:t, u:v]
got = arr[s:t, u:v]
self.assertSameContig(got, expect)
self.assertEqual(got.shape, expect.shape)
self.assertEqual(got.strides, expect.strides)
class TestExtent(unittest.TestCase):
def test_extent_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
s, e = arr.extent
self.assertEqual(e - s, nparr.size * nparr.dtype.itemsize)
def test_extent_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
s, e = arr.extent
self.assertEqual(e - s, nparr.size * nparr.dtype.itemsize)
def test_extent_iter_1d(self):
nparr = np.empty(4)
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
[ext] = list(arr.iter_contiguous_extent())
self.assertEqual(ext, arr.extent)
def test_extent_iter_2d(self):
nparr = np.empty((4, 5))
arr = Array.from_desc(0, nparr.shape, nparr.strides,
nparr.dtype.itemsize)
[ext] = list(arr.iter_contiguous_extent())
self.assertEqual(ext, arr.extent)
self.assertEqual(len(list(arr[::2].iter_contiguous_extent())), 2)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_flow_control
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types
import itertools
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
def for_loop_usecase1(x, y):
result = 0
for i in range(x):
result += i
return result
def for_loop_usecase2(x, y):
result = 0
for i, j in enumerate(range(x, y, -1)):
result += i * j
return result
def for_loop_usecase3(x, y):
result = 0
for i in [x,y]:
result += i
return result
def for_loop_usecase4(x, y):
result = 0
for i in range(10):
for j in range(10):
result += 1
return result
def for_loop_usecase5(x, y):
result = 0
for i in range(x):
result += 1
if result > y:
break
return result
def for_loop_usecase6(x, y):
result = 0
for i in range(x):
if i > y:
continue
result += 1
return result
def for_loop_usecase7(x, y):
for i in range(x):
x = 0
for j in range(x):
return 1
else:
pass
return 0
def for_loop_usecase8(x, y):
result = 0
for i in range(x, y, y - x + 1):
result += 1
return result
def for_loop_usecase9(x, y):
z = 0
for i in range(x):
x = 0
for j in range(x):
if j == x / 2:
z += j
break
else:
z += y
return z
def while_loop_usecase1(x, y):
result = 0
i = 0
while i < x:
result += i
i += 1
return result
def while_loop_usecase2(x, y):
result = 0
while result != x:
result += 1
return result
def while_loop_usecase3(x, y):
result = 0
i = 0
j = 0
while i < x:
while j < y:
result += i + j
i += 1
j += 1
return result
def while_loop_usecase4(x, y):
result = 0
while True:
result += 1
if result > x:
break
return result
def while_loop_usecase5(x, y):
result = 0
while result < x:
if result > y:
result += 2
continue
result += 1
return result
def ifelse_usecase1(x, y):
if x > 0:
pass
elif y > 0:
pass
else:
pass
return True
def ifelse_usecase2(x, y):
if x > y:
return 1
elif x == 0 or y == 0:
return 2
else:
return 3
def ifelse_usecase3(x, y):
if x > 0:
if y > 0:
return 1
elif y < 0:
return 1
else:
return 0
elif x < 0:
return 1
else:
return 0
def ifelse_usecase4(x, y):
if x == y:
return 1
def ternary_ifelse_usecase1(x, y):
return True if x > y else False
class TestFlowControl(unittest.TestCase):
def run_test(self, pyfunc, x_operands, y_operands,
flags=enable_pyobj_flags):
cr = compile_isolated(pyfunc, (types.int32, types.int32), flags=flags)
cfunc = cr.entry_point
for x, y in itertools.product(x_operands, y_operands):
pyerr = None
cerr = None
try:
pyres = pyfunc(x, y)
except Exception as e:
print("note: ", pyfunc, (x, y), "raises exception: %s" % e)
pyerr = e
try:
cres = cfunc(x, y)
except Exception as e:
print("note: ", cfunc, (x, y), "raises exception: %s" % e)
if pyerr is None:
raise
cerr = e
else:
if pyerr is not None:
self.fail("Invalid for pure-python but numba works\n" +
pyerr)
self.assertEqual(pyres, cres)
def test_for_loop1(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase1, [-10, 0, 10], [0], flags=flags)
def test_for_loop1_npm(self):
self.test_for_loop1(flags=no_pyobj_flags)
def test_for_loop2(self, flags=enable_pyobj_flags):
"""
TODO handle enumerate
"""
self.run_test(for_loop_usecase2, [-10, 0, 10], [-10, 0, 10],
flags=flags)
@unittest.expectedFailure
def test_for_loop2_npm(self):
self.test_for_loop2(flags=no_pyobj_flags)
def test_for_loop3(self, flags=enable_pyobj_flags):
"""
List requires pyobject
"""
self.run_test(for_loop_usecase3, [1], [2],
flags=flags)
@unittest.expectedFailure
def test_for_loop3_npm(self):
self.test_for_loop3(flags=no_pyobj_flags)
def test_for_loop4(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase4, [10], [10], flags=flags)
def test_for_loop4_npm(self):
self.test_for_loop4(flags=no_pyobj_flags)
def test_for_loop5(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase5, [100], [50], flags=flags)
def test_for_loop5_npm(self):
self.test_for_loop5(flags=no_pyobj_flags)
def test_for_loop6(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase6, [100], [50], flags=flags)
def test_for_loop6_npm(self):
self.test_for_loop6(flags=no_pyobj_flags)
def test_for_loop7(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase7, [5], [0], flags=flags)
def test_for_loop7_npm(self):
self.test_for_loop7(flags=no_pyobj_flags)
def test_for_loop8(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase8, [0, 1], [0, 2, 10], flags=flags)
def test_for_loop9(self, flags=enable_pyobj_flags):
self.run_test(for_loop_usecase9, [0, 1], [0, 2, 10], flags=flags)
def test_for_loop8_npm(self):
self.test_for_loop8(flags=no_pyobj_flags)
def test_while_loop1(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase1, [10], [0], flags=flags)
def test_while_loop1_npm(self):
self.test_while_loop1(flags=no_pyobj_flags)
def test_while_loop2(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase2, [10], [0], flags=flags)
def test_while_loop2_npm(self):
self.test_while_loop2(flags=no_pyobj_flags)
def test_while_loop3(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase3, [10], [10], flags=flags)
def test_while_loop3_npm(self):
self.test_while_loop3(flags=no_pyobj_flags)
def test_while_loop4(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase4, [10], [0], flags=flags)
def test_while_loop4_npm(self):
self.test_while_loop4(flags=no_pyobj_flags)
def test_while_loop5(self, flags=enable_pyobj_flags):
self.run_test(while_loop_usecase5, [0, 5, 10], [0, 5, 10], flags=flags)
def test_while_loop5_npm(self):
self.test_while_loop5(flags=no_pyobj_flags)
def test_ifelse1(self, flags=enable_pyobj_flags):
self.run_test(ifelse_usecase1, [-1, 0, 1], [-1, 0, 1], flags=flags)
def test_ifelse1_npm(self):
self.test_ifelse1(flags=no_pyobj_flags)
def test_ifelse2(self, flags=enable_pyobj_flags):
self.run_test(ifelse_usecase2, [-1, 0, 1], [-1, 0, 1], flags=flags)
def test_ifelse2_npm(self):
self.test_ifelse2(flags=no_pyobj_flags)
def test_ifelse3(self, flags=enable_pyobj_flags):
self.run_test(ifelse_usecase3, [-1, 0, 1], [-1, 0, 1], flags=flags)
def test_ifelse3_npm(self):
self.test_ifelse3(flags=no_pyobj_flags)
def test_ifelse4(self, flags=enable_pyobj_flags):
self.run_test(ifelse_usecase4, [-1, 0, 1], [-1, 0, 1], flags=flags)
def test_ifelse4_npm(self):
self.test_ifelse4(flags=no_pyobj_flags)
def test_ternary_ifelse1(self, flags=enable_pyobj_flags):
self.run_test(ternary_ifelse_usecase1, [-1, 0, 1], [-1, 0, 1],
flags=flags)
def test_ternary_ifelse1_npm(self):
self.test_ternary_ifelse1(flags=no_pyobj_flags)
if __name__ == '__main__':
unittest.main(verbosity=2)
########NEW FILE########
__FILENAME__ = test_indexing
from __future__ import print_function
import numba.unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
import decimal
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
Noflags = Flags()
def slicing_1d_usecase(a, start, stop, step):
return a[start:stop:step]
def slicing_1d_usecase2(a, start, stop, step):
b = a[start:stop:step]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase3(a, start, stop):
b = a[start:stop]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase4(a):
b = a[:]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase5(a, start):
b = a[start:]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase6(a, stop):
b = a[:stop]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_2d_usecase(a, start1, stop1, step1, start2, stop2, step2):
return a[start1:stop1:step1, start2:stop2:step2]
def slicing_2d_usecase2(a, start1, stop1, step1, start2, stop2, step2):
b = a[start1:stop1:step1, start2:stop2:step2]
total = 0
for i in range(b.shape[0]):
for j in range(b.shape[1]):
total += b[i, j] * (i + j + 1)
return total
def slicing_2d_usecase3(a, start1, stop1, step1, index):
b = a[start1:stop1:step1, index]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_3d_usecase(a, index0, start1, index2):
b = a[index0, start1:, index2]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_3d_usecase2(a, index0, stop1, index2):
b = a[index0, :stop1, index2]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def integer_indexing_1d_usecase(a, i):
return a[i]
def integer_indexing_2d_usecase(a, i1, i2):
return a[i1,i2]
def ellipse_usecase(a):
return a[...,0]
def none_index_usecase(a):
return a[None]
def fancy_index_usecase(a, index):
return a[index]
def boolean_indexing_usecase(a, mask):
return a[mask]
class TestIndexing(unittest.TestCase):
def test_1d_slicing(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
self.assertTrue((pyfunc(a, 0, 10, 1) == cfunc(a, 0, 10, 1)).all())
self.assertTrue((pyfunc(a, 2, 3, 1) == cfunc(a, 2, 3, 1)).all())
self.assertTrue((pyfunc(a, 10, 0, 1) == cfunc(a, 10, 0, 1)).all())
self.assertTrue((pyfunc(a, 0, 10, -1) == cfunc(a, 0, 10, -1)).all())
self.assertTrue((pyfunc(a, 0, 10, 2) == cfunc(a, 0, 10, 2)).all())
@unittest.expectedFailure
def test_1d_slicing_npm(self):
"""
Return of arbitrary array is not supported yet
"""
self.test_1d_slicing(flags=Noflags)
def test_1d_slicing2(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase2
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
args = [(0, 10, 1),
(2, 3, 1),
(10, 0, 1),
(0, 10, -1),
(0, 10, 2)]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any
arraytype = types.Array(types.int32, 1, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(20, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
args = [(0, 10, 1),
(2, 3, 1),
(10, 0, 1),
(0, 10, -1),
(0, 10, 2)]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_1d_slicing2_npm(self):
self.test_1d_slicing2(flags=Noflags)
def test_1d_slicing3(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase3
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
args = [(3, 10),
(2, 3),
(10, 0),
(0, 10),
(5, 10)]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any
arraytype = types.Array(types.int32, 1, 'A')
argtys = (arraytype, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(20, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_1d_slicing3_npm(self):
self.test_1d_slicing3(flags=Noflags)
def test_1d_slicing4(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase4
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype,)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
self.assertEqual(pyfunc(a), cfunc(a))
# Any
arraytype = types.Array(types.int32, 1, 'A')
argtys = (arraytype,)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(20, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
self.assertEqual(pyfunc(a), cfunc(a))
def test_1d_slicing4_npm(self):
self.test_1d_slicing4(flags=Noflags)
def test_1d_slicing5(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase5
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
args = [3, 2, 10, 0, 5]
for arg in args:
self.assertEqual(pyfunc(a, arg), cfunc(a, arg))
# Any
arraytype = types.Array(types.int32, 1, 'A')
argtys = (arraytype, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(20, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
for arg in args:
self.assertEqual(pyfunc(a, arg), cfunc(a, arg))
def test_1d_slicing5_npm(self):
self.test_1d_slicing5(flags=Noflags)
def test_2d_slicing(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase
arraytype = types.Array(types.int32, 2, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
self.assertTrue((pyfunc(a, 0, 10, 1) == cfunc(a, 0, 10, 1)).all())
self.assertTrue((pyfunc(a, 2, 3, 1) == cfunc(a, 2, 3, 1)).all())
self.assertTrue((pyfunc(a, 10, 0, 1) == cfunc(a, 10, 0, 1)).all())
self.assertTrue((pyfunc(a, 0, 10, -1) == cfunc(a, 0, 10, -1)).all())
self.assertTrue((pyfunc(a, 0, 10, 2) == cfunc(a, 0, 10, 2)).all())
pyfunc = slicing_2d_usecase
arraytype = types.Array(types.int32, 2, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
self.assertTrue((pyfunc(a, 0, 10, 1, 0, 10, 1) ==
cfunc(a, 0, 10, 1, 0, 10, 1)).all())
self.assertTrue((pyfunc(a, 2, 3, 1, 2, 3, 1) ==
cfunc(a, 2, 3, 1, 2, 3, 1)).all())
self.assertTrue((pyfunc(a, 10, 0, 1, 10, 0, 1) ==
cfunc(a, 10, 0, 1, 10, 0, 1)).all())
self.assertTrue((pyfunc(a, 0, 10, -1, 0, 10, -1) ==
cfunc(a, 0, 10, -1, 0, 10, -1)).all())
self.assertTrue((pyfunc(a, 0, 10, 2, 0, 10, 2) ==
cfunc(a, 0, 10, 2, 0, 10, 2)).all())
@unittest.expectedFailure
def test_2d_slicing_npm(self):
self.test_2d_slicing(flags=Noflags)
def test_2d_slicing2(self, flags=enable_pyobj_flags):
# C layout
pyfunc = slicing_2d_usecase2
arraytype = types.Array(types.int32, 2, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
args = [
(0, 10, 1, 0, 10, 1),
(2, 3, 1, 2, 3, 1),
(10, 0, 1, 10, 0, 1),
(0, 10, -1, 0, 10, -1),
(0, 10, 2, 0, 10, 2),
]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any layout
arraytype = types.Array(types.int32, 2, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(400, dtype='i4').reshape(20, 20)[::2, ::2]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_2d_slicing2_npm(self):
self.test_2d_slicing2(flags=Noflags)
def test_2d_slicing3(self, flags=enable_pyobj_flags):
# C layout
pyfunc = slicing_2d_usecase3
arraytype = types.Array(types.int32, 2, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32,
types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
args = [
(0, 10, 1, 0),
(2, 3, 1, 2),
(10, 0, 1, 9),
(0, 10, -1, 0),
(0, 10, 2, 4),
]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any layout
arraytype = types.Array(types.int32, 2, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32,
types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(400, dtype='i4').reshape(20, 20)[::2, ::2]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_2d_slicing3_npm(self):
self.test_2d_slicing3(flags=Noflags)
def test_3d_slicing(self, flags=enable_pyobj_flags):
# C layout
pyfunc = slicing_3d_usecase
arraytype = types.Array(types.int32, 3, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(1000, dtype='i4').reshape(10, 10, 10)
args = [
(0, 9, 1),
(2, 3, 1),
(9, 0, 1),
(0, 9, -1),
(0, 9, 2),
]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any layout
arraytype = types.Array(types.int32, 3, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(2000, dtype='i4')[::2].reshape(10, 10, 10)
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_3d_slicing_npm(self):
self.test_3d_slicing(flags=Noflags)
def test_3d_slicing2(self, flags=enable_pyobj_flags):
# C layout
pyfunc = slicing_3d_usecase2
arraytype = types.Array(types.int32, 3, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(1000, dtype='i4').reshape(10, 10, 10)
args = [
(0, 9, 1),
(2, 3, 1),
(9, 0, 1),
(0, 9, -1),
(0, 9, 2),
]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any layout
arraytype = types.Array(types.int32, 3, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(2000, dtype='i4')[::2].reshape(10, 10, 10)
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_3d_slicing2_npm(self):
self.test_3d_slicing2(flags=Noflags)
def test_1d_integer_indexing(self, flags=enable_pyobj_flags):
# C layout
pyfunc = integer_indexing_1d_usecase
arraytype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype, types.int32), flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
self.assertEqual(pyfunc(a, 0), cfunc(a, 0))
self.assertEqual(pyfunc(a, 9), cfunc(a, 9))
self.assertEqual(pyfunc(a, -1), cfunc(a, -1))
# Any layout
arraytype = types.Array(types.int32, 1, 'A')
cr = compile_isolated(pyfunc, (arraytype, types.int32), flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
self.assertEqual(pyfunc(a, 0), cfunc(a, 0))
self.assertEqual(pyfunc(a, 2), cfunc(a, 2))
self.assertEqual(pyfunc(a, -1), cfunc(a, -1))
def test_1d_integer_indexing_npm(self):
self.test_1d_integer_indexing(flags=Noflags)
def test_integer_indexing_1d_for_2d(self, flags=enable_pyobj_flags):
pyfunc = integer_indexing_1d_usecase
arraytype = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype, types.int32), flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
self.assertTrue((pyfunc(a, 0) == cfunc(a, 0)).all())
self.assertTrue((pyfunc(a, 9) == cfunc(a, 9)).all())
self.assertTrue((pyfunc(a, -1) == cfunc(a, -1)).all())
@unittest.expectedFailure
def test_integer_indexing_1d_for_2d(self):
self.test_integer_indexing_1d_for_2d(flags=Noflags)
def test_2d_integer_indexing(self, flags=enable_pyobj_flags):
# C layout
a = np.arange(100, dtype='i4').reshape(10, 10)
pyfunc = integer_indexing_2d_usecase
arraytype = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype, types.int32, types.int32),
flags=flags)
cfunc = cr.entry_point
self.assertEqual(pyfunc(a, 0, 0), cfunc(a, 0, 0))
self.assertEqual(pyfunc(a, 9, 9), cfunc(a, 9, 9))
self.assertEqual(pyfunc(a, -1, -1), cfunc(a, -1, -1))
# Any layout
a = np.arange(100, dtype='i4').reshape(10, 10)[::2, ::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
pyfunc = integer_indexing_2d_usecase
arraytype = types.Array(types.int32, 2, 'A')
cr = compile_isolated(pyfunc, (arraytype, types.int32, types.int32),
flags=flags)
cfunc = cr.entry_point
self.assertEqual(pyfunc(a, 0, 0), cfunc(a, 0, 0))
self.assertEqual(pyfunc(a, 2, 2), cfunc(a, 2, 2))
self.assertEqual(pyfunc(a, -1, -1), cfunc(a, -1, -1))
def test_2d_integer_indexing_npm(self):
self.test_2d_integer_indexing(flags=Noflags)
def test_2d_float_indexing(self, flags=enable_pyobj_flags):
a = np.arange(100, dtype='i4').reshape(10, 10)
pyfunc = integer_indexing_2d_usecase
arraytype = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype, types.float32, types.int32),
flags=flags)
cfunc = cr.entry_point
self.assertEqual(pyfunc(a, 0, 0), cfunc(a, 0, 0))
self.assertEqual(pyfunc(a, 9, 9), cfunc(a, 9, 9))
self.assertEqual(pyfunc(a, -1, -1), cfunc(a, -1, -1))
def test_2d_float_indexing_npm(self):
self.test_2d_float_indexing(flags=Noflags)
def test_ellipse(self, flags=enable_pyobj_flags):
pyfunc = ellipse_usecase
arraytype = types.Array(types.int32, 2, 'C')
# TODO should be enable to handle this in NoPython mode
cr = compile_isolated(pyfunc, (arraytype,), flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
self.assertTrue((pyfunc(a) == cfunc(a)).all())
@unittest.expectedFailure
def test_ellipse_npm(self):
self.test_ellipse(flags=Noflags)
def test_none_index(self, flags=enable_pyobj_flags):
pyfunc = none_index_usecase
arraytype = types.Array(types.int32, 2, 'C')
# TODO should be enable to handle this in NoPython mode
cr = compile_isolated(pyfunc, (arraytype,), flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
self.assertTrue((pyfunc(a) == cfunc(a)).all())
@unittest.expectedFailure
def test_none_index_npm(self):
self.test_none_index(flags=Noflags)
def test_fancy_index(self, flags=enable_pyobj_flags):
pyfunc = fancy_index_usecase
arraytype = types.Array(types.int32, 2, 'C')
indextype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype, indextype), flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
index = np.array([], dtype='i4')
self.assertTrue((pyfunc(a, index) == cfunc(a, index)).all())
index = np.array([0], dtype='i4')
self.assertTrue((pyfunc(a, index) == cfunc(a, index)).all())
index = np.array([1,2], dtype='i4')
self.assertTrue((pyfunc(a, index) == cfunc(a, index)).all())
index = np.array([-1], dtype='i4')
self.assertTrue((pyfunc(a, index) == cfunc(a, index)).all())
@unittest.expectedFailure
def test_fancy_index_npm(self):
self.test_fancy_index(flags=Noflags)
def test_boolean_indexing(self, flags=enable_pyobj_flags):
pyfunc = boolean_indexing_usecase
arraytype = types.Array(types.int32, 2, 'C')
masktype = types.Array(types.boolean, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype, masktype), flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
mask = np.array([True, False, True])
self.assertTrue((pyfunc(a, mask) == cfunc(a, mask)).all())
@unittest.expectedFailure
def test_boolean_indexing_npm(self):
self.test_boolean_indexing(flags=Noflags)
def test_conversion_setitem(self, flags=enable_pyobj_flags):
""" this used to work, and was used in one of the tutorials """
from numba import jit
def pyfunc(array):
for index in range(len(array)):
array[index] = index % decimal.Decimal(100)
cfunc = jit("void(i8[:])")(pyfunc)
udt = np.arange(100, dtype='i1')
control = udt.copy()
pyfunc(control)
cfunc(udt)
self.assertTrue((udt == control).all())
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_interproc
from __future__ import print_function
from numba import jit, int32
from numba import unittest_support as unittest
def foo(a, b):
return a + b
def bar(a, b):
return cfoo(a, b) + b
@jit
def inner(x, y):
return x + y
@jit(nopython=True)
def outer(x, y):
return inner(x, y)
class TestInterProc(unittest.TestCase):
def test_bar_call_foo(self):
global cfoo
cfoo = jit((int32, int32), nopython=True)(foo)
cbar = jit((int32, int32), nopython=True)(bar)
self.assertTrue(cbar(1, 2), 1 + 2 + 2)
def test_callsite_compilation(self):
self.assertEqual(outer(1, 2), 1 + 2)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_ir
from __future__ import print_function
import numba.unittest_support as unittest
from numba import ir
class TestIR(unittest.TestCase):
def test_IRScope(self):
filename = "<?>"
top = ir.Scope(parent=None, loc=ir.Loc(filename=filename, line=1))
local = ir.Scope(parent=top, loc=ir.Loc(filename=filename, line=2))
apple = local.define('apple', loc=ir.Loc(filename=filename, line=3))
self.assertTrue(local.get('apple') is apple)
self.assertEqual(len(local.localvars), 1)
orange = top.define('orange', loc=ir.Loc(filename=filename, line=4))
self.assertEqual(len(local.localvars), 1)
self.assertEqual(len(top.localvars), 1)
self.assertTrue(top.get('orange') is orange)
self.assertTrue(local.get('orange') is orange)
more_orange = local.define('orange', loc=ir.Loc(filename=filename,
line=5))
self.assertTrue(top.get('orange') is orange)
self.assertTrue(local.get('orange') is not orange)
self.assertTrue(local.get('orange') is more_orange)
try:
bad_orange = local.define('orange', loc=ir.Loc(filename=filename,
line=5))
except ir.RedefinedError:
pass
else:
self.fail("Expecting an %s" % ir.RedefinedError)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_jitclasses
import numba.unittest_support as unittest
from numba import types, typing, compiler, utils
from numba.targets.cpu import CPUContext
class Car(object):
def __init__(self, value):
self.value = value
def move(self, x):
self.value += x
def use_car_value(car):
return car.value
def use_car_move(car, x):
car.move(x)
return car.value
class TestJITClasses(unittest.TestCase):
def setUp(self):
move_signature = typing.signature(types.none, types.int32,
recvr=types.Object(Car))
carattrs = {
"value": types.int32,
"move" : typing.new_method(Car.move, move_signature),
}
self.carattrs = carattrs
def test_use_car_value(self):
tyctx = typing.Context()
tyctx.insert_class(Car, self.carattrs)
cgctx = CPUContext(tyctx)
cgctx.insert_class(Car, self.carattrs)
car_object = types.Object(Car)
argtys = (car_object,)
flags = compiler.Flags()
cr = compiler.compile_extra(tyctx, cgctx, use_car_value, args=argtys,
return_type=None, flags=flags, locals={})
func = cr.entry_point
if cr.typing_error:
raise cr.typing_error
car = Car(value=123)
self.assertEqual(use_car_value(car), func(car))
def bm_python():
use_car_value(car)
def bm_numba():
func(car)
python = utils.benchmark(bm_python, maxsec=.1)
numba = utils.benchmark(bm_numba, maxsec=.1)
print(python)
print(numba)
def test_use_car_move(self):
tyctx = typing.Context()
tyctx.insert_class(Car, self.carattrs)
cgctx = CPUContext(tyctx)
cgctx.insert_class(Car, self.carattrs)
car_object = types.Object(Car)
argtys = (car_object, types.int32)
flags = compiler.Flags()
cr = compiler.compile_extra(tyctx, cgctx, use_car_move, args=argtys,
return_type=None, flags=flags, locals={})
func = cr.entry_point
if cr.typing_error:
raise cr.typing_error
car1 = Car(value=123)
car2 = Car(value=123)
self.assertEqual(use_car_move(car1, 321), func(car2, 321))
def bm_python():
use_car_move(car1, 321)
def bm_numba():
func(car2, 321)
python = utils.benchmark(bm_python, maxsec=.1)
numba = utils.benchmark(bm_numba, maxsec=.1)
print(python)
print(numba)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_lists
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
import math
import numpy as np
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def identity_func(l):
return l
def create_list(x, y, z):
return [x, y, z]
def create_nested_list(x, y, z, a, b, c):
return [[x, y, z], [a, b, c]]
def get_list_item(l, i):
return l[i]
def get_list_slice(l, start, stop, step):
return l[start:stop:step]
def set_list_item(l, i, x):
l[i] = x
return l
def set_list_slice(l, start, stop, step, x):
l[start:stop:step] = x
return l
def get_list_len(l):
return len(l)
def list_comprehension1():
return sum([x**2 for x in range(10)])
def list_comprehension2():
return sum([x for x in range(10) if x % 2 == 0])
def list_comprehension3():
return sum([math.pow(x, 2) for x in range(10)])
def list_comprehension4():
return sum([x * y for x in range(10) for y in range(10)])
def list_append(l, x):
l.append(x)
return l
def list_extend(l1, l2):
l1.extend(l2)
return l1
def list_insert(l, i, x):
l.insert(i, x)
return l
def list_remove(l, x):
l.remove(x)
return l
def list_pop(l):
l.pop()
return l
def list_index(l, x):
return l.index(x)
def list_count(l, x):
return l.count(x)
def list_sort(l):
l.sort()
return l
def list_reverse(l):
l.reverse()
return l
class TestLists(unittest.TestCase):
@unittest.expectedFailure
def test_identity_func(self):
pyfunc = identity_func
cr = compile_isolated(pyfunc, (types.Dummy('list'),))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l), pyfunc(l))
@unittest.expectedFailure
def test_create_list(self):
pyfunc = create_list
cr = compile_isolated(pyfunc, (types.int32, types.int32, types.int32))
cfunc = cr.entry_point
self.assertEqual(cfunc(1, 2, 3), pyfunc(1, 2, 3))
@unittest.expectedFailure
def test_create_nested_list(self):
pyfunc = create_nested_list
cr = compile_isolated(pyfunc, (types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32))
cfunc = cr.entry_point
self.assertEqual(cfunc(1, 2, 3, 4, 5, 6), pyfunc(1, 2, 3, 4, 5, 6))
@unittest.expectedFailure
def test_get_list_item(self):
pyfunc = get_list_item
cr = compile_isolated(pyfunc, (types.int32, types.int32, types.int32))
cfunc = cr.entry_point
self.assertEqual(cfunc(1,2,3), pyfunc(1,2,3))
@unittest.expectedFailure
def test_get_list_slice(self):
pyfunc = get_list_slice
cr = compile_isolated(pyfunc, (types.Dummy('list'),
types.int32, types.int32, types.int32))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l, 0, 10, 2), pyfunc(l, 0, 10, 2))
@unittest.expectedFailure
def test_set_list_item(self):
pyfunc = set_list_item
cr = compile_isolated(pyfunc, (types.Dummy('list'),
types.int32, types.int32))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l, 0, 999), pyfunc(l, 0, 999))
@unittest.expectedFailure
def test_set_list_slice(self):
pyfunc = set_list_slice
cr = compile_isolated(pyfunc, (types.Dummy('list'),
types.int32, types.int32, types.int32, types.int32))
cfunc = cr.entry_point
l = range(10)
x = [999, 999, 999, 999, 999]
self.assertEqual(cfunc(l, 0, 10, 2, x), pyfunc(l, 0, 10, 2, x))
@unittest.expectedFailure
def test_get_list_len(self):
pyfunc = get_list_len
cr = compile_isolated(pyfunc, (types.Dummy('list'),))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l), pyfunc(l))
@unittest.expectedFailure
def test_list_comprehension(self):
list_tests = [list_comprehension1,
list_comprehension2,
list_comprehension3,
list_comprehension3]
for test in list_tests:
pyfunc = test
cr = compile_isolated(pyfunc, ())
cfunc = cr.entry_point
self.assertEqual(cfunc(), pyfunc())
@unittest.expectedFailure
def test_list_append(self):
pyfunc = list_append
cr = compile_isolated(pyfunc, (types.Dummy('list'), types.int32))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l, 10), pyfunc(l, 10))
@unittest.expectedFailure
def test_list_extend(self):
pyfunc = list_extend
cr = compile_isolated(pyfunc, (types.Dummy('list'),
types.Dummy('list')))
cfunc = cr.entry_point
l1 = range(10)
l2 = range(10)
self.assertEqual(cfunc(l1, l2), pyfunc(l1, l2))
@unittest.expectedFailure
def test_list_insert(self):
pyfunc = list_insert
cr = compile_isolated(pyfunc, (types.Dummy('list'),
types.int32, types.int32))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l, 0, 999), pyfunc(l, 0, 999))
@unittest.expectedFailure
def test_list_remove(self):
pyfunc = list_remove
cr = compile_isolated(pyfunc, (types.Dummy('list'), types.int32))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l, 1), pyfunc(l, 1))
@unittest.expectedFailure
def test_list_pop(self):
pyfunc = list_pop
cr = compile_isolated(pyfunc, (types.Dummy('list'),))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l), pyfunc(l))
@unittest.expectedFailure
def test_list_index(self):
pyfunc = list_index
cr = compile_isolated(pyfunc, (types.Dummy('list'), types.int32))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l, 1), pyfunc(l, 1))
@unittest.expectedFailure
def test_list_count(self):
pyfunc = list_count
cr = compile_isolated(pyfunc, (types.Dummy('list'), types.int32))
cfunc = cr.entry_point
l = [1,1,2,1]
self.assertEqual(cfunc(l, 1), pyfunc(l, 1))
@unittest.expectedFailure
def test_list_sort(self):
pyfunc = list_sort
cr = compile_isolated(pyfunc, (types.Dummy('list'),))
cfunc = cr.entry_point
l = np.random.randint(10, size=10)
self.assertEqual(cfunc(l), pyfunc(l))
@unittest.expectedFailure
def test_list_reverse(self):
pyfunc = list_reverse
cr = compile_isolated(pyfunc, (types.Dummy('list'),))
cfunc = cr.entry_point
l = range(10)
self.assertEqual(cfunc(l), pyfunc(l))
if __name__ == '__main__':
unittest.main(buffer=True)
########NEW FILE########
__FILENAME__ = test_locals
from __future__ import print_function, division, absolute_import
from numba import jit, float32
from numba import unittest_support as unittest
def foo():
x = 123
return x
class TestLocals(unittest.TestCase):
def test_seed_types(self):
cfoo = jit((), locals={'x': float32})(foo)
cres = list(cfoo.overloads.values())[0]
self.assertEqual(cres.signature.return_type, float32)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_loopdetection
from __future__ import print_function
import numba.unittest_support as unittest
from numba import bytecode, interpreter
from . import usecases
def interpret(func):
bc = bytecode.ByteCode(func=func)
print(bc.dump())
interp = interpreter.Interpreter(bytecode=bc)
interp.interpret()
interp.dump()
for syn in interp.syntax_info:
print(syn)
interp.verify()
return interp
class TestLoopDetection(unittest.TestCase):
def test_sum1d(self):
interp = interpret(usecases.sum1d)
self.assertTrue(len(interp.syntax_info) == 1)
def test_sum2d(self):
interp = interpret(usecases.sum2d)
self.assertTrue(len(interp.syntax_info) == 2)
def test_while_count(self):
interp = interpret(usecases.while_count)
self.assertTrue(len(interp.syntax_info) == 1)
def test_copy_arrays(self):
interp = interpret(usecases.copy_arrays)
self.assertTrue(len(interp.syntax_info) == 1)
def test_andor(self):
interp = interpret(usecases.andor)
self.assertTrue(len(interp.syntax_info) == 0)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_looplifting
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import unittest_support as unittest
from numba import jit
def lift1(x):
a = np.empty(3)
for i in range(a.size):
a[i] = x
return a
def lift2(x):
a = np.empty((3, 4))
for i in range(a.shape[0]):
for j in range(a.shape[1]):
a[i, j] = x
return a
def reject1(x):
a = np.arange(4)
for i in range(a.shape[0]):
return a
return a
class TestLoopLifting(unittest.TestCase):
def test_lift1(self):
compiled = jit(lift1)
x = 123
expect = lift1(x)
got = compiled(x)
cres = list(compiled.overloads.values())[0]
self.assertTrue(cres.lifted)
loopcres = list(cres.lifted[0].overloads.values())[0]
self.assertIs(loopcres.typing_error, None)
self.assertTrue(np.all(expect == got))
def test_lift2(self):
compiled = jit(lift2)
x = 123
expect = lift2(x)
got = compiled(x)
cres = list(compiled.overloads.values())[0]
self.assertTrue(cres.lifted)
loopcres = list(cres.lifted[0].overloads.values())[0]
self.assertIs(loopcres.typing_error, None)
self.assertTrue(np.all(expect == got))
def test_reject1(self):
compiled = jit(reject1)
expect = reject1(1)
got = compiled(1)
self.assertTrue(np.all(expect == got))
cres = list(compiled.overloads.values())[0]
# Does not lift
self.assertFalse(cres.lifted)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_mandelbrot
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def is_in_mandelbrot(c):
i = 0
z = 0.0j
for i in range(100):
z = z ** 2 + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return False
return True
class TestMandelbrot(unittest.TestCase):
def test_mandelbrot(self):
pyfunc = is_in_mandelbrot
cr = compile_isolated(pyfunc, (types.complex64,))
cfunc = cr.entry_point
points = [0+0j, 1+0j, 0+1j, 1+1j, 0.1+0.1j]
for p in points:
self.assertEqual(cfunc(p), pyfunc(p))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_mathlib
from __future__ import print_function, absolute_import, division
import math
import numpy as np
from numba import unittest_support as unittest
from numba.compiler import compile_isolated, Flags, utils
from numba import types
PY27_AND_ABOVE = utils.PYVERSION > (2, 6)
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
def sin(x):
return math.sin(x)
def cos(x):
return math.cos(x)
def tan(x):
return math.tan(x)
def sinh(x):
return math.sinh(x)
def cosh(x):
return math.cosh(x)
def tanh(x):
return math.tanh(x)
def asin(x):
return math.asin(x)
def acos(x):
return math.acos(x)
def atan(x):
return math.atan(x)
def atan2(y, x):
return math.atan2(y, x)
def asinh(x):
return math.asinh(x)
def acosh(x):
return math.acosh(x)
def atanh(x):
return math.atanh(x)
def sqrt(x):
return math.sqrt(x)
def npy_sqrt(x):
return np.sqrt(x)
def exp(x):
return math.exp(x)
def expm1(x):
return math.expm1(x)
def log(x):
return math.log(x)
def log1p(x):
return math.log1p(x)
def log10(x):
return math.log10(x)
def floor(x):
return math.floor(x)
def ceil(x):
return math.ceil(x)
def trunc(x):
return math.trunc(x)
def isnan(x):
return math.isnan(x)
def isinf(x):
return math.isinf(x)
def hypot(x, y):
return math.hypot(x, y)
def degrees(x):
return math.degrees(x)
def radians(x):
return math.radians(x)
def erf(x):
return math.erf(x)
def erfc(x):
return math.erfc(x)
def gamma(x):
return math.gamma(x)
def lgamma(x):
return math.lgamma(x)
class TestMathLib(unittest.TestCase):
def run_unary(self, pyfunc, x_types, x_values, flags=enable_pyobj_flags,
places=6):
for tx, vx in zip(x_types, x_values):
cr = compile_isolated(pyfunc, [tx], flags=flags)
cfunc = cr.entry_point
self.assertAlmostEqual(cfunc(vx), pyfunc(vx), places=places)
def test_sin(self, flags=enable_pyobj_flags):
pyfunc = sin
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [-2, -1, -2, 2, 1, 2, .1, .2]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_sin_npm(self):
self.test_sin(flags=no_pyobj_flags)
def test_cos(self, flags=enable_pyobj_flags):
pyfunc = cos
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [-2, -1, -2, 2, 1, 2, .1, .2]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_cos_npm(self):
self.test_cos(flags=no_pyobj_flags)
def test_tan(self, flags=enable_pyobj_flags):
pyfunc = tan
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [-2, -1, -2, 2, 1, 2, .1, .2]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_tan_npm(self):
self.test_tan(flags=no_pyobj_flags)
def test_sqrt(self, flags=enable_pyobj_flags):
pyfunc = sqrt
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [2, 1, 2, 2, 1, 2, .1, .2]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_sqrt_npm(self):
self.test_sqrt(flags=no_pyobj_flags)
def test_npy_sqrt(self, flags=enable_pyobj_flags):
pyfunc = npy_sqrt
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [2, 1, 2, 2, 1, 2, .1, .2]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_npy_sqrt_npm(self):
self.test_npy_sqrt(flags=no_pyobj_flags)
def test_exp(self, flags=enable_pyobj_flags):
pyfunc = exp
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [-2, -1, -2, 2, 1, 2, .1, .2]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_exp_npm(self):
self.test_exp(flags=no_pyobj_flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
def test_expm1(self, flags=enable_pyobj_flags):
pyfunc = expm1
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [-2, -1, -2, 2, 1, 2, .1, .2]
self.run_unary(pyfunc, x_types, x_values, flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
def test_expm1_npm(self):
self.test_expm1(flags=no_pyobj_flags)
def test_log(self, flags=enable_pyobj_flags):
pyfunc = log
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 10, 100, 1000, 100000, 1000000, 0.1, 1.1]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_log_npm(self):
self.test_log(flags=no_pyobj_flags)
def test_log1p(self, flags=enable_pyobj_flags):
pyfunc = log1p
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 10, 100, 1000, 100000, 1000000, 0.1, 1.1]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_log1p_npm(self):
self.test_log1p(flags=no_pyobj_flags)
def test_log10(self, flags=enable_pyobj_flags):
pyfunc = log10
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 10, 100, 1000, 100000, 1000000, 0.1, 1.1]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_log10_npm(self):
self.test_log10(flags=no_pyobj_flags)
def test_asin(self, flags=enable_pyobj_flags):
pyfunc = asin
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_asin_npm(self):
self.test_asin(flags=no_pyobj_flags)
def test_acos(self, flags=enable_pyobj_flags):
pyfunc = acos
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_acos_npm(self):
self.test_acos(flags=no_pyobj_flags)
def test_atan(self, flags=enable_pyobj_flags):
pyfunc = atan
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [-2, -1, -2, 2, 1, 2, .1, .2]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_atan_npm(self):
self.test_atan(flags=no_pyobj_flags)
def test_atan2(self, flags=enable_pyobj_flags):
pyfunc = atan2
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [-2, -1, -2, 2, 1, 2, .1, .2]
for ty, xy in zip(x_types, x_values):
cres = compile_isolated(pyfunc, (ty, ty), flags=flags)
cfunc = cres.entry_point
x = xy
y = x * 2
self.assertAlmostEqual(pyfunc(x, y), cfunc(x, y))
def test_atan2_npm(self):
self.test_atan2(flags=no_pyobj_flags)
def test_asinh(self, flags=enable_pyobj_flags):
pyfunc = asinh
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_asinh_npm(self):
self.test_asinh(flags=no_pyobj_flags)
def test_acosh(self, flags=enable_pyobj_flags):
pyfunc = acosh
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_acosh_npm(self):
self.test_acosh(flags=no_pyobj_flags)
def test_atanh(self, flags=enable_pyobj_flags):
pyfunc = atanh
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [0, 0, 0, 0, 0, 0, 0.1, 0.1]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_atanh_npm(self):
self.test_atanh(flags=no_pyobj_flags)
def test_sinh(self, flags=enable_pyobj_flags):
pyfunc = sinh
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_sinh_npm(self):
self.test_sinh(flags=no_pyobj_flags)
def test_cosh(self, flags=enable_pyobj_flags):
pyfunc = cosh
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_cosh_npm(self):
self.test_cosh(flags=no_pyobj_flags)
def test_tanh(self, flags=enable_pyobj_flags):
pyfunc = tanh
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [0, 0, 0, 0, 0, 0, 0.1, 0.1]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_tanh_npm(self):
self.test_tanh(flags=no_pyobj_flags)
def test_floor(self, flags=enable_pyobj_flags):
pyfunc = floor
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [0, 0, 0, 0, 0, 0, 0.1, 1.9]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_floor_npm(self):
self.test_floor(flags=no_pyobj_flags)
def test_ceil(self, flags=enable_pyobj_flags):
pyfunc = ceil
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [0, 0, 0, 0, 0, 0, 0.1, 1.9]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_ceil_npm(self):
self.test_ceil(flags=no_pyobj_flags)
def test_trunc(self, flags=enable_pyobj_flags):
pyfunc = trunc
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [0, 0, 0, 0, 0, 0, 0.1, 1.9]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_trunc_npm(self):
self.test_trunc(flags=no_pyobj_flags)
def test_isnan(self, flags=enable_pyobj_flags):
pyfunc = isnan
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float32, types.float64, types.float64]
x_values = [0, 0, 0, 0, 0, 0, float('nan'), 0.0, float('nan'), 0.0]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_isnan_npm(self):
self.test_isnan(flags=no_pyobj_flags)
def test_isinf(self, flags=enable_pyobj_flags):
pyfunc = isinf
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float32, types.float64, types.float64]
x_values = [0, 0, 0, 0, 0, 0, float('inf'), 0.0, float('inf'), 0.0]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_isinf_npm(self):
self.test_isinf(flags=no_pyobj_flags)
def test_hypot(self, flags=enable_pyobj_flags):
pyfunc = hypot
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 2, 3, 4, 5, 6, .21, .34]
for ty, xy in zip(x_types, x_values):
x = xy
y = xy * 2
cres = compile_isolated(pyfunc, (ty, ty), flags=flags)
cfunc = cres.entry_point
self.assertAlmostEqual(pyfunc(x, y), cfunc(x, y))
def test_hypot_npm(self):
self.test_hypot(flags=no_pyobj_flags)
def test_degrees(self, flags=enable_pyobj_flags):
pyfunc = degrees
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags, places=5)
def test_degrees_npm(self):
self.test_degrees(flags=no_pyobj_flags)
def test_radians(self, flags=enable_pyobj_flags):
pyfunc = radians
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
def test_radians_npm(self):
self.test_radians(flags=no_pyobj_flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
def test_erf(self, flags=enable_pyobj_flags):
pyfunc = erf
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
@unittest.expectedFailure
def test_erf_npm(self):
self.test_erf(flags=no_pyobj_flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
def test_erfc(self, flags=enable_pyobj_flags):
pyfunc = erfc
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
@unittest.expectedFailure
def test_erfc_npm(self):
self.test_erfc(flags=no_pyobj_flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
def test_gamma(self, flags=enable_pyobj_flags):
pyfunc = gamma
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
@unittest.expectedFailure
def test_gamma_npm(self):
self.test_gamma(flags=no_pyobj_flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
def test_lgamma(self, flags=enable_pyobj_flags):
pyfunc = lgamma
x_types = [types.int16, types.int32, types.int64,
types.uint16, types.uint32, types.uint64,
types.float32, types.float64]
x_values = [1, 1, 1, 1, 1, 1, 1., 1.]
self.run_unary(pyfunc, x_types, x_values, flags)
@unittest.skipIf(not PY27_AND_ABOVE, "Only support for 2.7+")
@unittest.expectedFailure
def test_lgamma_npm(self):
self.test_lgamma(flags=no_pyobj_flags)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_maxmin
from __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
def domax3(a, b, c):
return max(a, b, c)
def domin3(a, b, c):
return min(a, b, c)
class TestMaxMin(unittest.TestCase):
def test_max3(self):
pyfunc = domax3
argtys = (types.int32, types.float32, types.double)
cres = compile_isolated(pyfunc, argtys)
cfunc = cres.entry_point
a = 1
b = 2
c = 3
self.assertEqual(pyfunc(a, b, c), cfunc(a, b, c))
def test_min3(self):
pyfunc = domin3
argtys = (types.int32, types.float32, types.double)
cres = compile_isolated(pyfunc, argtys)
cfunc = cres.entry_point
a = 1
b = 2
c = 3
self.assertEqual(pyfunc(a, b, c), cfunc(a, b, c))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_nan
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
def isnan(x):
return x != x
def isequal(x):
return x == x
class TestNaN(unittest.TestCase):
def test_nans(self, flags=enable_pyobj_flags):
pyfunc = isnan
cr = compile_isolated(pyfunc, (types.float64,), flags=flags)
cfunc = cr.entry_point
self.assertTrue(cfunc(float('nan')))
self.assertFalse(cfunc(1.0))
pyfunc = isequal
cr = compile_isolated(pyfunc, (types.float64,), flags=flags)
cfunc = cr.entry_point
self.assertFalse(cfunc(float('nan')))
self.assertTrue(cfunc(1.0))
def test_nans_npm(self):
self.test_nans(flags=no_pyobj_flags)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_numberctor
from __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
def doint(a):
return int(a)
def dofloat(a):
return float(a)
def docomplex(a):
return complex(a)
def docomplex2(a, b):
return complex(a, b)
class TestNumberCtor(unittest.TestCase):
def test_int(self):
pyfunc = doint
x_types = [
types.int32, types.int64, types.float32, types.float64
]
x_values = [1, 1000, 12.2, 23.4]
for ty, x in zip(x_types, x_values):
cres = compile_isolated(pyfunc, [ty])
cfunc = cres.entry_point
self.assertEqual(pyfunc(x), cfunc(x))
def test_float(self):
pyfunc = dofloat
x_types = [
types.int32, types.int64, types.float32, types.float64
]
x_values = [1, 1000, 12.2, 23.4]
for ty, x in zip(x_types, x_values):
cres = compile_isolated(pyfunc, [ty])
cfunc = cres.entry_point
self.assertAlmostEqual(pyfunc(x), cfunc(x), places=6)
def test_complex(self):
pyfunc = docomplex
x_types = [
types.int32, types.int64, types.float32, types.float64
]
x_values = [1, 1000, 12.2, 23.4]
for ty, x in zip(x_types, x_values):
cres = compile_isolated(pyfunc, [ty])
cfunc = cres.entry_point
self.assertAlmostEqual(pyfunc(x), cfunc(x), places=6)
def test_complex2(self):
pyfunc = docomplex2
x_types = [
types.int32, types.int64, types.float32, types.float64
]
x_values = [1, 1000, 12.2, 23.4]
for ty, x in zip(x_types, x_values):
cres = compile_isolated(pyfunc, [ty, ty])
cfunc = cres.entry_point
self.assertAlmostEqual(pyfunc(x, x), cfunc(x, x), places=6)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_numpyadapt
from __future__ import print_function
from numba.ctypes_support import *
import numpy
import numba.unittest_support as unittest
from numba._numpyadapt import get_ndarray_adaptor
class ArrayStruct3D(Structure):
_fields_ = [
("data", c_void_p),
("shape", (c_ssize_t * 3)),
("strides", (c_ssize_t * 3)),
("parent", c_void_p),
]
class TestArrayAdaptor(unittest.TestCase):
def test_array_adaptor(self):
arystruct = ArrayStruct3D()
adaptorptr = get_ndarray_adaptor()
adaptor = PYFUNCTYPE(c_int, py_object, c_void_p)(adaptorptr)
ary = numpy.arange(60).reshape(2, 3, 10)
status = adaptor(ary, byref(arystruct))
self.assertEqual(status, 0)
self.assertEqual(arystruct.data, ary.ctypes.data)
for i in range(3):
self.assertEqual(arystruct.shape[i], ary.ctypes.shape[i])
self.assertEqual(arystruct.strides[i], ary.ctypes.strides[i])
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_object_mode
"""
Testing object mode specifics.
"""
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import utils
def complex_constant(n):
tmp = n + 4
return tmp + 3j
forceobj = Flags()
forceobj.set("force_pyobject")
def loop_nest_3(x, y):
n = 0
for i in range(x):
for j in range(y):
for k in range(x+y):
n += i * j
return n
class TestObjectMode(unittest.TestCase):
def test_complex_constant(self):
pyfunc = complex_constant
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertEqual(pyfunc(12), cfunc(12))
def test_loop_nest(self):
"""
Test bug that decref the iterator early.
If the bug occurs, a segfault should occur
"""
pyfunc = loop_nest_3
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertEqual(pyfunc(5, 5), cfunc(5, 5))
def bm_pyfunc():
pyfunc(5, 5)
def bm_cfunc():
cfunc(5, 5)
print(utils.benchmark(bm_pyfunc))
print(utils.benchmark(bm_cfunc))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_operators
from __future__ import print_function
import numba.unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, typeinfer
from numba.config import PYVERSION
from numba.tests.true_div_usecase import truediv_usecase
import itertools
Noflags = Flags()
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
def add_usecase(x, y):
return x + y
def sub_usecase(x, y):
return x - y
def mul_usecase(x, y):
return x * y
def div_usecase(x, y):
return x / y
def floordiv_usecase(x, y):
return x / y
def mod_usecase(x, y):
return x % y
def pow_usecase(x, y):
return x ** y
def bitshift_left_usecase(x, y):
return x << y
def bitshift_right_usecase(x, y):
return x >> y
def bitwise_and_usecase(x, y):
return x & y
def bitwise_or_usecase(x, y):
return x | y
def bitwise_xor_usecase(x, y):
return x ^ y
def bitwise_not_usecase(x, y):
return ~x
def not_usecase(x):
return not(x)
def negate_usecase(x):
return -x
class TestOperators(unittest.TestCase):
def run_test_ints(self, pyfunc, x_operands, y_operands, types_list,
flags=enable_pyobj_flags):
for arg_types in types_list:
cr = compile_isolated(pyfunc, arg_types, flags=flags)
cfunc = cr.entry_point
for x, y in itertools.product(x_operands, y_operands):
self.assertTrue(np.all(pyfunc(x, y) == cfunc(x, y)))
def run_test_floats(self, pyfunc, x_operands, y_operands, types_list,
flags=enable_pyobj_flags):
for arg_types in types_list:
cr = compile_isolated(pyfunc, arg_types, flags=flags)
cfunc = cr.entry_point
for x, y in itertools.product(x_operands, y_operands):
self.assertTrue(np.allclose(pyfunc(x, y), cfunc(x, y)))
def test_add_ints(self, flags=enable_pyobj_flags):
pyfunc = add_usecase
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
types_list = [(types.int32, types.int32),
(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, 1]
y_operands = [0, 1]
types_list = [(types.byte, types.byte),
(types.uint32, types.uint32),
(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_add_ints_array(self, flags=enable_pyobj_flags):
pyfunc = add_usecase
array = np.arange(-10, 10, dtype=np.int32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.int32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_add_ints_npm(self):
self.test_add_ints(flags=Noflags)
@unittest.expectedFailure
def test_add_ints_array_npm(self):
self.test_add_ints_array(flags=Noflags)
def test_add_floats(self, flags=enable_pyobj_flags):
pyfunc = add_usecase
x_operands = [-1.1, 0.0, 1.1]
y_operands = [-1.1, 0.0, 1.1]
types_list = [(types.float32, types.float32),
(types.float64, types.float64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_add_floats_array(self, flags=enable_pyobj_flags):
pyfunc = add_usecase
array = np.arange(-1, 1, 0.1, dtype=np.float32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.float32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_add_floats_npm(self):
self.test_add_floats(flags=Noflags)
@unittest.expectedFailure
def test_add_floats_array_npm(self):
self.test_add_floats_array(flags=Noflags)
def test_sub_ints(self, flags=enable_pyobj_flags):
pyfunc = sub_usecase
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
types_list = [(types.int32, types.int32),
(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
# Unsigned version will overflow and wraparound
x_operands = [1, 2]
y_operands = [0, 1]
types_list = [(types.byte, types.byte),
(types.uint32, types.uint32),
(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_sub_ints_array(self, flags=enable_pyobj_flags):
pyfunc = sub_usecase
array = np.arange(-10, 10, dtype=np.int32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.int32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_sub_ints_npm(self):
self.test_sub_ints(flags=Noflags)
@unittest.expectedFailure
def test_sub_ints_array_npm(self):
self.test_sub_ints_array(flags=Noflags)
def test_sub_floats(self, flags=enable_pyobj_flags):
pyfunc = sub_usecase
x_operands = [-1.1, 0.0, 1.1]
y_operands = [-1.1, 0.0, 1.1]
types_list = [(types.float32, types.float32),
(types.float64, types.float64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_sub_floats_array(self, flags=enable_pyobj_flags):
pyfunc = sub_usecase
array = np.arange(-1, 1, 0.1, dtype=np.float32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.float32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_sub_floats_npm(self):
self.test_sub_floats(flags=Noflags)
@unittest.expectedFailure
def test_sub_floats_array_npm(self):
self.test_sub_floats_array(flags=Noflags)
def test_mul_ints(self, flags=enable_pyobj_flags):
pyfunc = mul_usecase
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
types_list = [(types.int32, types.int32),
(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, 1]
y_operands = [0, 1]
types_list = [(types.byte, types.byte),
(types.uint32, types.uint32),
(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_mul_ints_array(self, flags=enable_pyobj_flags):
pyfunc = mul_usecase
array = np.arange(-10, 10, dtype=np.int32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.int32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_mul_ints_npm(self):
self.test_mul_ints(flags=Noflags)
@unittest.expectedFailure
def test_mul_ints_array_npm(self):
self.test_mul_ints_array(flags=Noflags)
def test_mul_floats(self, flags=enable_pyobj_flags):
pyfunc = mul_usecase
x_operands = [-111.111, 0.0, 111.111]
y_operands = [-111.111, 0.0, 111.111]
types_list = [(types.float32, types.float32),
(types.float64, types.float64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_mul_floats_array(self, flags=enable_pyobj_flags):
pyfunc = mul_usecase
array = np.arange(-1, 1, 0.1, dtype=np.float32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.float32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_mul_floats_npm(self):
self.test_mul_floats(flags=Noflags)
@unittest.expectedFailure
def test_mul_floats_array_npm(self):
self.test_mul_floats_array(flags=Noflags)
def test_div_ints(self, flags=enable_pyobj_flags):
if PYVERSION >= (3, 0):
# Due to true division returning float
tester = self.run_test_floats
else:
tester = self.run_test_ints
pyfunc = div_usecase
x_operands = [-1, 0, 1, 2, 3]
y_operands = [-3, -2, -1, 1]
types_list = [(types.int32, types.int32),
(types.int64, types.int64)]
tester(pyfunc, x_operands, y_operands, types_list, flags=flags)
x_operands = [0, 1, 2, 3]
y_operands = [1, 2, 3]
types_list = [(types.byte, types.byte),
(types.uint32, types.uint32),
(types.uint64, types.uint64)]
tester(pyfunc, x_operands, y_operands, types_list, flags=flags)
def test_div_ints_array(self, flags=enable_pyobj_flags):
pyfunc = div_usecase
array = np.array([-10, -9, -2, -1, 1, 2, 9, 10], dtype=np.int32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.int32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_div_ints_npm(self):
self.test_div_ints(flags=Noflags)
@unittest.expectedFailure
def test_div_ints_array_npm(self):
self.test_div_ints_array(flags=Noflags)
def test_div_floats(self, flags=enable_pyobj_flags):
pyfunc = div_usecase
x_operands = [-111.111, 0.0, 2.2]
y_operands = [-2.2, 1.0, 111.111]
types_list = [(types.float32, types.float32),
(types.float64, types.float64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_div_floats_array(self, flags=enable_pyobj_flags):
pyfunc = div_usecase
array = np.concatenate((np.arange(0.1, 1.1, 0.1, dtype=np.float32),
np.arange(-1.0, 0.0, 0.1, dtype=np.float32)))
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.float32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_div_floats_npm(self):
self.test_div_floats(flags=Noflags)
@unittest.expectedFailure
def test_div_floats_array_npm(self):
self.test_div_floats_array(flags=Noflags)
def test_truediv_ints(self, flags=enable_pyobj_flags):
pyfunc = truediv_usecase
x_operands = [0, 1, 2, 3]
y_operands = [1, 1, 2, 3]
types_list = [(types.uint32, types.uint32),
(types.uint64, types.uint64),
(types.int32, types.int32),
(types.int64, types.int64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_truediv_ints_npm(self):
self.test_truediv_ints(flags=Noflags)
def test_truediv_floats(self, flags=enable_pyobj_flags):
pyfunc = truediv_usecase
x_operands = [-111.111, 0.0, 2.2]
y_operands = [-2.2, 1.0, 111.111]
types_list = [(types.float32, types.float32),
(types.float64, types.float64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_truediv_floats_npm(self):
self.test_truediv_floats(flags=Noflags)
def test_floordiv_floats(self, flags=enable_pyobj_flags):
pyfunc = floordiv_usecase
x_operands = [-111.111, 0.0, 2.2]
y_operands = [-2.2, 1.0, 111.111]
types_list = [(types.float32, types.float32),
(types.float64, types.float64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_floordiv_floats_npm(self):
self.test_floordiv_floats(flags=Noflags)
def test_mod_ints(self, flags=enable_pyobj_flags):
pyfunc = mod_usecase
x_operands = [-1, 0, 1, 2, 3]
y_operands = [-3, -2, -1, 1]
types_list = [(types.int32, types.int32),
(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, 1, 2, 3]
y_operands = [1, 2, 3]
types_list = [(types.byte, types.byte),
(types.uint32, types.uint32),
(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_mod_ints_array(self, flags=enable_pyobj_flags):
pyfunc = mod_usecase
array = np.concatenate((np.arange(1, 11, dtype=np.int32),
np.arange(-10, 0, dtype=np.int32)))
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.int32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_mod_ints_npm(self):
self.test_mod_ints(flags=Noflags)
@unittest.expectedFailure
def test_mod_ints_array_npm(self):
self.test_mod_ints_array(flags=Noflags)
def test_mod_floats(self, flags=enable_pyobj_flags):
pyfunc = mod_usecase
x_operands = [-111.111, 0.0, 2.2]
y_operands = [-2.2, 1.0, 111.111]
types_list = [(types.float32, types.float32),
(types.float64, types.float64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_mod_floats_array(self, flags=enable_pyobj_flags):
pyfunc = mod_usecase
array = np.arange(-1, 1, 0.1, dtype=np.float32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.float32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_mod_floats_npm(self):
self.test_mod_floats(flags=Noflags)
@unittest.expectedFailure
def test_mod_floats_array_npm(self):
self.test_mod_floats_array(flags=Noflags)
def test_pow_ints(self, flags=enable_pyobj_flags):
pyfunc = pow_usecase
x_operands = [-2, -1, 0, 1, 2]
y_operands = [0, 1, 2]
types_list = [(types.int32, types.int32),
(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, 1, 2]
y_operands = [0, 1, 2]
types_list = [(types.byte, types.byte),
(types.uint32, types.uint32),
(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_pow_ints_array(self, flags=enable_pyobj_flags):
pyfunc = pow_usecase
array = np.arange(-10, 10, dtype=np.int32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.int32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_pow_ints_npm(self):
self.test_pow_ints(flags=Noflags)
@unittest.expectedFailure
def test_pow_ints_array_npm(self):
self.test_pow_ints_array(flags=Noflags)
def test_pow_floats(self, flags=enable_pyobj_flags):
pyfunc = pow_usecase
x_operands = [-222.222, -111.111, 111.111, 222.222]
y_operands = [-2, -1, 0, 1, 2]
types_list = [(types.float32, types.float32),
(types.float64, types.float64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0.0]
y_operands = [0, 1, 2] # TODO native handling of 0 ** negative power
types_list = [(types.float32, types.float32),
(types.float64, types.float64)]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_pow_floats_array(self, flags=enable_pyobj_flags):
pyfunc = pow_usecase
# NOTE
# If x is finite negative and y is finite but not an integer,
# it causes a domain error
array = np.arange(0.1, 1, 0.1, dtype=np.float32)
x_operands = [array]
y_operands = [array]
arraytype = types.Array(types.float32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_array = np.arange(-1, 0.1, 0.1, dtype=np.float32)
y_array = np.arange(len(x_array), dtype=np.float32)
x_operands = [x_array]
y_operands = [y_array]
arraytype = types.Array(types.float32, 1, 'C')
types_list = [(arraytype, arraytype)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_pow_floats_npm(self):
self.test_pow_floats(flags=Noflags)
@unittest.expectedFailure
def test_pow_floats_array_npm(self):
self.test_pow_floats_array(flags=Noflags)
def test_add_complex(self, flags=enable_pyobj_flags):
pyfunc = add_usecase
x_operands = [1+0j, 1j, -1-1j]
y_operands = x_operands
types_list = [(types.complex64, types.complex64),
(types.complex128, types.complex128),]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_add_complex_npm(self):
self.test_add_complex(flags=Noflags)
def test_sub_complex(self, flags=enable_pyobj_flags):
pyfunc = sub_usecase
x_operands = [1+0j, 1j, -1-1j]
y_operands = [1, 2, 3]
types_list = [(types.complex64, types.complex64),
(types.complex128, types.complex128),]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_sub_complex_npm(self):
self.test_sub_complex(flags=Noflags)
def test_mul_complex(self, flags=enable_pyobj_flags):
pyfunc = mul_usecase
x_operands = [1+0j, 1j, -1-1j]
y_operands = [1, 2, 3]
types_list = [(types.complex64, types.complex64),
(types.complex128, types.complex128),]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_mul_complex_npm(self):
self.test_mul_complex(flags=Noflags)
def test_div_complex(self, flags=enable_pyobj_flags):
pyfunc = div_usecase
x_operands = [1+0j, 1j, -1-1j]
y_operands = [1, 2, 3]
types_list = [(types.complex64, types.complex64),
(types.complex128, types.complex128),]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_div_complex_npm(self):
self.test_div_complex(flags=Noflags)
def test_truediv_complex(self, flags=enable_pyobj_flags):
pyfunc = truediv_usecase
x_operands = [1+0j, 1j, -1-1j]
y_operands = [1, 2, 3]
types_list = [(types.complex64, types.complex64),
(types.complex128, types.complex128),]
self.run_test_floats(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_truediv_complex_npm(self):
self.test_truediv_complex(flags=Noflags)
def test_mod_complex(self, flags=enable_pyobj_flags):
pyfunc = mod_usecase
try:
cres = compile_isolated(pyfunc, (types.complex64, types.complex64))
except typeinfer.TypingError as e:
e.msg.startswith("Undeclared %(complex64, complex64)")
else:
self.fail("Complex % should trigger an undeclared error")
def test_mod_complex_npm(self):
self.test_mod_complex(flags=Noflags)
def test_bitshift_left(self, flags=enable_pyobj_flags):
pyfunc = bitshift_left_usecase
x_operands = [0, 1]
y_operands = [0, 1, 2, 4, 8, 16, 31]
types_list = [(types.uint32, types.uint32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, 1]
y_operands = [0, 1, 2, 4, 8, 16, 32, 63]
types_list = [(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, -1]
y_operands = [0, 1, 2, 4, 8, 16, 31]
types_list = [(types.int32, types.int32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, -1]
y_operands = [0, 1, 2, 4, 8, 16, 32, 63]
types_list = [(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_bitshift_left_npm(self):
self.test_bitshift_left(flags=Noflags)
def test_bitshift_right(self, flags=enable_pyobj_flags):
pyfunc = bitshift_right_usecase
x_operands = [0, 1, 2**32 - 1]
y_operands = [0, 1, 2, 4, 8, 16, 31]
types_list = [(types.uint32, types.uint32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, 1, 2**64 - 1]
y_operands = [0, 1, 2, 4, 8, 16, 32, 63]
types_list = [(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, 1, -(2**31)]
y_operands = [0, 1, 2, 4, 8, 16, 31]
types_list = [(types.int32, types.int32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = [0, -1, -(2**31)]
y_operands = [0, 1, 2, 4, 8, 16, 32, 63]
types_list = [(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_bitshift_right_npm(self):
self.test_bitshift_right(flags=Noflags)
def test_bitwise_and(self, flags=enable_pyobj_flags):
pyfunc = bitwise_and_usecase
x_operands = list(range(0, 8)) + [2**32 - 1]
y_operands = list(range(0, 8)) + [2**32 - 1]
types_list = [(types.uint32, types.uint32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(0, 8)) + [2**64 - 1]
y_operands = list(range(0, 8)) + [2**64 - 1]
types_list = [(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1]
y_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1]
types_list = [(types.int32, types.int32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1]
y_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1]
types_list = [(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_bitwise_and_npm(self):
self.test_bitwise_and(flags=Noflags)
def test_bitwise_or(self, flags=enable_pyobj_flags):
pyfunc = bitwise_or_usecase
x_operands = list(range(0, 8)) + [2**32 - 1]
y_operands = list(range(0, 8)) + [2**32 - 1]
types_list = [(types.uint32, types.uint32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(0, 8)) + [2**64 - 1]
y_operands = list(range(0, 8)) + [2**64 - 1]
types_list = [(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1]
y_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1]
types_list = [(types.int32, types.int32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1]
y_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1]
types_list = [(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_bitwise_or_npm(self):
self.test_bitwise_or(flags=Noflags)
def test_bitwise_xor(self, flags=enable_pyobj_flags):
pyfunc = bitwise_xor_usecase
x_operands = list(range(0, 8)) + [2**32 - 1]
y_operands = list(range(0, 8)) + [2**32 - 1]
types_list = [(types.uint32, types.uint32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(0, 8)) + [2**64 - 1]
y_operands = list(range(0, 8)) + [2**64 - 1]
types_list = [(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1]
y_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1]
types_list = [(types.int32, types.int32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1]
y_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1]
types_list = [(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_bitwise_xor_npm(self):
self.test_bitwise_xor(flags=Noflags)
def test_bitwise_not(self, flags=enable_pyobj_flags):
pyfunc = bitwise_not_usecase
x_operands = list(range(0, 8)) + [2**32 - 1]
x_operands = [np.uint32(x) for x in x_operands]
y_operands = [0]
types_list = [(types.uint32, types.uint32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1]
y_operands = [0]
types_list = [(types.int32, types.int32)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(0, 8)) + [2**64 - 1]
x_operands = [np.uint64(x) for x in x_operands]
y_operands = [0]
types_list = [(types.uint64, types.uint64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
x_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1]
y_operands = [0]
types_list = [(types.int64, types.int64)]
self.run_test_ints(pyfunc, x_operands, y_operands, types_list,
flags=flags)
def test_bitwise_not_npm(self):
self.test_bitwise_not(flags=Noflags)
def test_not(self):
pyfunc = not_usecase
values = [
1,
2,
3,
1.2,
3.4j,
]
cres = compile_isolated(pyfunc, (), flags=enable_pyobj_flags)
cfunc = cres.entry_point
for val in values:
self.assertEqual(pyfunc(val), cfunc(val))
def test_not_npm(self):
pyfunc = not_usecase
# test native mode
argtys = [
types.int8,
types.int32,
types.int64,
types.float32,
types.complex128,
]
values = [
1,
2,
3,
1.2,
3.4j,
]
for ty, val in zip(argtys, values):
cres = compile_isolated(pyfunc, [ty])
self.assertEqual(cres.signature.return_type, types.boolean)
cfunc = cres.entry_point
self.assertEqual(pyfunc(val), cfunc(val))
def test_negate_npm(self):
pyfunc = negate_usecase
# test native mode
argtys = [
types.int8,
types.int32,
types.int64,
types.float32,
types.complex128,
]
values = [
1,
2,
3,
1.2,
3.4j,
]
for ty, val in zip(argtys, values):
cres = compile_isolated(pyfunc, [ty])
cfunc = cres.entry_point
self.assertAlmostEqual(pyfunc(val), cfunc(val))
def test_negate(self):
pyfunc = negate_usecase
values = [
1,
2,
3,
1.2,
3.4j,
]
cres = compile_isolated(pyfunc, (), flags=enable_pyobj_flags)
cfunc = cres.entry_point
for val in values:
self.assertEqual(pyfunc(val), cfunc(val))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_print
from __future__ import print_function
import numba.unittest_support as unittest
from contextlib import contextmanager
from numba.compiler import compile_isolated, Flags
from numba import types
from numba.io_support import StringIO
import numpy as np
import sys
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def print_value(x):
print(x)
@contextmanager
def swap_stdout():
old_stdout = sys.stdout
sys.stdout = StringIO()
yield
sys.stdout = old_stdout
class TestPrint(unittest.TestCase):
def test_print(self):
pyfunc = print_value
cr = compile_isolated(pyfunc, (types.int32,))
cfunc = cr.entry_point
with swap_stdout():
cfunc(1)
self.assertEqual(sys.stdout.getvalue().strip(), '1')
cr = compile_isolated(pyfunc, (types.int64,))
cfunc = cr.entry_point
with swap_stdout():
cfunc(1)
self.assertEqual(sys.stdout.getvalue().strip(), '1')
cr = compile_isolated(pyfunc, (types.float32,))
cfunc = cr.entry_point
with swap_stdout():
cfunc(1.1)
# Float32 will loose precision
got = sys.stdout.getvalue().strip()
expect = '1.10000002384'
self.assertTrue(got.startswith(expect))
cr = compile_isolated(pyfunc, (types.float64,))
cfunc = cr.entry_point
with swap_stdout():
cfunc(100.0**10.0)
self.assertEqual(sys.stdout.getvalue().strip(), '1e+20')
# Array will have to use object mode
arraytype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype,), flags=enable_pyobj_flags)
cfunc = cr.entry_point
with swap_stdout():
cfunc(np.arange(10))
self.assertEqual(sys.stdout.getvalue().strip(),
'[0 1 2 3 4 5 6 7 8 9]')
if __name__ == '__main__':
unittest.main(buffer=True)
########NEW FILE########
__FILENAME__ = test_pycc_tresult
from __future__ import print_function
import os
import tempfile
import sys
from ctypes import *
from numba import unittest_support as unittest
from numba.pycc import find_shared_ending, main
base_path = os.path.dirname(os.path.abspath(__file__))
@unittest.skipIf(sys.platform.startswith("win32"), "Skip win32 test for now")
class TestPYCC(unittest.TestCase):
def test_pycc_ctypes_lib(self):
modulename = os.path.join(base_path, 'compile_with_pycc')
cdll_modulename = modulename + find_shared_ending()
if os.path.exists(cdll_modulename):
os.unlink(cdll_modulename)
main(args=[modulename + '.py'])
lib = CDLL(cdll_modulename)
try:
lib.mult.argtypes = [POINTER(c_double), c_double, c_double]
lib.mult.restype = c_int
lib.multf.argtypes = [POINTER(c_float), c_float, c_float]
lib.multf.restype = c_int
res = c_double()
lib.mult(byref(res), 123, 321)
print('lib.mult(123, 321) = %f' % res.value)
self.assertEqual(res.value, 123 * 321)
res = c_float()
lib.multf(byref(res), 987, 321)
print('lib.multf(987, 321) = %f' % res.value)
self.assertEqual(res.value, 987 * 321)
finally:
del lib
if os.path.exists(cdll_modulename):
os.unlink(cdll_modulename)
def test_pycc_pymodule(self):
modulename = os.path.join(base_path, 'compile_with_pycc')
tmpdir = tempfile.gettempdir()
print('tmpdir: %s' % tmpdir)
out_modulename = (os.path.join(tmpdir, 'compiled_with_pycc')
+ find_shared_ending())
main(args=['--python', '-o', out_modulename, modulename + '.py'])
sys.path.append(tmpdir)
try:
import compiled_with_pycc as lib
try:
res = lib.mult(123, 321)
print('lib.mult(123, 321) = %f' % res)
assert res == 123 * 321
res = lib.multf(987, 321)
print('lib.multf(987, 321) = %f' % res)
assert res == 987 * 321
finally:
del lib
finally:
if os.path.exists(out_modulename):
os.unlink(out_modulename)
if __name__ == "__main__":
unittest.main()
########NEW FILE########
__FILENAME__ = test_range
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
def loop1(n):
s = 0
for i in range(n):
s += i
return s
def loop2(a, b):
s = 0
for i in range(a, b):
s += i
return s
class TestRange(unittest.TestCase):
def test_loop1_int16(self):
pyfunc = loop1
cres = compile_isolated(pyfunc, [types.int16])
cfunc = cres.entry_point
self.assertTrue(cfunc(5), pyfunc(5))
def test_loop2_int16(self):
pyfunc = loop2
cres = compile_isolated(pyfunc, [types.int16, types.int16])
cfunc = cres.entry_point
self.assertTrue(cfunc(1, 6), pyfunc(1, 6))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_storeslice
from __future__ import print_function
import numba.unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
def usecase(obs, nPoints, B, sigB, A, sigA, M, sigM):
center = nPoints / 2
print(center)
obs[0:center] = np.arange(center)
obs[center] = 321
obs[(center + 1):] = np.arange(nPoints - center - 1)
class TestStoreSlice(unittest.TestCase):
def test_usecase(self):
n = 10
obs_got = np.zeros(n)
obs_expected = obs_got.copy()
flags = Flags()
flags.set("enable_pyobject")
cres = compile_isolated(usecase, (), flags=flags)
cres.entry_point(obs_got, n, 10.0, 1.0, 2.0, 3.0, 4.0, 5.0)
usecase(obs_expected, n, 10.0, 1.0, 2.0, 3.0, 4.0, 5.0)
print(obs_got, obs_expected)
self.assertTrue(np.allclose(obs_got, obs_expected))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_structref
from __future__ import print_function
import numba.unittest_support as unittest
from numba import types
from numba.compiler import compile_isolated
import numpy as np
def foo(a):
b = a[0]
a[0] = 123
return b
class TestStructRef(unittest.TestCase):
def test_complex(self):
pyfunc = foo
aryty = types.Array(types.complex128, 1, 'A')
cres = compile_isolated(pyfunc, [aryty])
a = np.array([321], dtype='complex128')
a0 = a[0]
cfunc = cres.entry_point
self.assertEqual(cfunc(a), a0)
self.assertEqual(a[0], 123)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_tuple_return
from __future__ import print_function, division, absolute_import
import numpy
from numba import unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
def tuple_return_usecase(a, b):
return a, b
class TestTupleReturn(unittest.TestCase):
def test_array_tuple(self):
aryty = types.Array(types.float64, 1, 'C')
cres = compile_isolated(tuple_return_usecase, (aryty, aryty))
a = b = numpy.arange(5, dtype='float64')
ra, rb = cres.entry_point(a, b)
self.assertTrue((ra == a).all())
self.assertTrue((rb == b).all())
del a, b
self.assertTrue((ra == rb).all())
def test_scalar_tuple(self):
scalarty = types.float32
cres = compile_isolated(tuple_return_usecase, (scalarty, scalarty))
a = b = 1
ra, rb = cres.entry_point(a, b)
self.assertEqual(ra, a)
self.assertEqual(rb, b)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_typeconv
from __future__ import print_function
import itertools
from numba import unittest_support as unittest
from numba import types
from numba.typeconv.typeconv import TypeManager
from numba.typeconv import rules
class TestTypeConv(unittest.TestCase):
def test_typeconv(self):
tm = TypeManager()
i32 = types.int32
i64 = types.int64
f32 = types.float32
tm.set_promote(i32, i64)
tm.set_unsafe_convert(i32, f32)
sig = (i32, f32)
ovs = [
(i32, i32),
(f32, f32),
]
sel = tm.select_overload(sig, ovs)
self.assertEqual(sel, 1)
def test_default(self):
tm = rules.default_type_manager
i16 = types.int16
i32 = types.int32
i64 = types.int64
f32 = types.float32
self.assertEqual(tm.check_compatible(i32, i64), 'promote')
self.assertEqual(tm.check_compatible(i32, f32), 'unsafe')
self.assertEqual(tm.check_compatible(i16, i64), 'promote')
for ta, tb in itertools.product(types.number_domain,
types.number_domain):
if ta in types.complex_domain and tb not in types.complex_domain:
continue
self.assertTrue(tm.check_compatible(ta, tb) is not None,
msg="No cast from %s to %s" % (ta, tb))
def test_overload1(self):
tm = rules.default_type_manager
i32 = types.int32
i64 = types.int64
sig = (i64, i32, i32)
ovs = [
(i32, i32, i32),
(i64, i64, i64),
]
print(tm.select_overload(sig, ovs))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_typecreate
from __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
from numba import types
class TestTypeCreate(unittest.TestCase):
def test_array_c(self):
self.assertEqual(types.int32[::1].layout, 'C')
self.assertEqual(types.int32[:, ::1].layout, 'C')
self.assertEqual(types.int32[:, :, ::1].layout, 'C')
self.assertTrue(types.int32[::1].is_c_contig)
self.assertTrue(types.int32[:, ::1].is_c_contig)
self.assertTrue(types.int32[:, :, ::1].is_c_contig)
def test_array_f(self):
self.assertEqual(types.int32[::1, :].layout, 'F')
self.assertEqual(types.int32[::1, :, :].layout, 'F')
self.assertTrue(types.int32[::1].is_f_contig)
self.assertTrue(types.int32[::1, :].is_f_contig)
self.assertTrue(types.int32[::1, :, :].is_f_contig)
def test_array_a(self):
self.assertEqual(types.int32[:].layout, 'A')
self.assertEqual(types.int32[:, :].layout, 'A')
self.assertEqual(types.int32[:, :, :].layout, 'A')
self.assertFalse(types.int32[:].is_contig)
self.assertFalse(types.int32[:, :].is_contig)
self.assertFalse(types.int32[:, :, :].is_contig)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_typenames
from __future__ import print_function, absolute_import
import numpy
from numba import types
from numba import unittest_support as unittest
class TestTypeNames(unittest.TestCase):
def test_numpy_integers(self):
expect = getattr(types, "int%d" % (numpy.dtype("int").itemsize * 8))
self.assertEqual(types.int_, expect)
expect = getattr(types, "uint%d" % (numpy.dtype("uint").itemsize * 8))
self.assertEqual(types.uint, expect)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_typingerror
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
from numba.typeinfer import TypingError
def what():
pass
def foo():
return what()
def bar(x):
return x.a
class TestTypingError(unittest.TestCase):
def test_unknown_function(self):
try:
compile_isolated(foo, ())
except TypingError as e:
self.assertTrue(e.msg.startswith("Untyped global name"))
else:
self.fail("Should raise error")
def test_unknown_attrs(self):
try:
compile_isolated(bar, (types.int32,))
except TypingError as e:
self.assertTrue(e.msg.startswith("Unknown attribute"))
else:
self.fail("Should raise error")
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_ufuncbuilding
from __future__ import print_function, absolute_import, division
import numpy
from numba import unittest_support as unittest
from numba.npyufunc.ufuncbuilder import UFuncBuilder, GUFuncBuilder
from numba import vectorize, guvectorize
def add(a, b):
return a + b
def guadd(a, b, c):
x, y = c.shape
for i in range(x):
for j in range(y):
c[i, j] = a[i, j] + b[i, j]
class TestUfuncBuilding(unittest.TestCase):
def test_basic_ufunc(self):
ufb = UFuncBuilder(add)
ufb.add("int32(int32, int32)")
ufb.add("int64(int64, int64)")
ufunc = ufb.build_ufunc()
a = numpy.arange(10, dtype='int32')
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
def test_ufunc_struct(self):
ufb = UFuncBuilder(add)
ufb.add("complex64(complex64, complex64)")
ufunc = ufb.build_ufunc()
a = numpy.arange(10, dtype='complex64') + 1j
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
class TestGUfuncBuilding(unittest.TestCase):
def test_basic_gufunc(self):
gufb = GUFuncBuilder(guadd, "(x, y),(x, y)->(x, y)")
gufb.add("void(int32[:,:], int32[:,:], int32[:,:])")
ufunc = gufb.build_ufunc()
a = numpy.arange(10, dtype="int32").reshape(2, 5)
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
def test_gufunc_struct(self):
gufb = GUFuncBuilder(guadd, "(x, y),(x, y)->(x, y)")
gufb.add("void(complex64[:,:], complex64[:,:], complex64[:,:])")
ufunc = gufb.build_ufunc()
a = numpy.arange(10, dtype="complex64").reshape(2, 5) + 1j
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
class TestVectorizeDecor(unittest.TestCase):
def test_vectorize(self):
ufunc = vectorize(['int32(int32, int32)'])(add)
a = numpy.arange(10, dtype='int32')
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
def test_guvectorize(self):
ufunc = guvectorize(['(int32[:,:], int32[:,:], int32[:,:])'],
"(x,y),(x,y)->(x,y)")(guadd)
a = numpy.arange(10, dtype='int32').reshape(2, 5)
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_ufuncs
from __future__ import print_function
import sys
import warnings
import numba.unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.config import PYVERSION
import itertools
is32bits = tuple.__itemsize__ == 4
iswindows = sys.platform.startswith('win32')
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
# unary ufuncs
def negative_usecase(x, result):
np.negative(x, result)
def absolute_usecase(x, result):
np.absolute(x, result)
def rint_usecase(x, result):
np.rint(x, result)
def sign_usecase(x, result):
np.sign(x, result)
def conj_usecase(x, result):
np.conj(x, result)
def exp_usecase(x, result):
np.exp(x, result)
def exp2_usecase(x, result):
np.exp2(x, result)
def log_usecase(x, result):
np.log(x, result)
def log2_usecase(x, result):
np.log2(x, result)
def log10_usecase(x, result):
np.log10(x, result)
def expm1_usecase(x, result):
np.expm1(x, result)
def log1p_usecase(x, result):
np.log1p(x, result)
def sqrt_usecase(x, result):
np.sqrt(x, result)
def square_usecase(x, result):
np.square(x, result)
def reciprocal_usecase(x, result):
np.reciprocal(x, result)
def sin_usecase(x, result):
np.sin(x, result)
def cos_usecase(x, result):
np.cos(x, result)
def tan_usecase(x, result):
np.tan(x, result)
def arcsin_usecase(x, result):
np.arcsin(x, result)
def arccos_usecase(x, result):
np.arccos(x, result)
def arctan_usecase(x, result):
np.arctan(x, result)
def sinh_usecase(x, result):
np.sinh(x, result)
def cosh_usecase(x, result):
np.cosh(x, result)
def tanh_usecase(x, result):
np.tanh(x, result)
def arcsinh_usecase(x, result):
np.arcsinh(x, result)
def arccosh_usecase(x, result):
np.arccosh(x, result)
def arctanh_usecase(x, result):
np.arctanh(x, result)
def deg2rad_usecase(x, result):
np.deg2rad(x, result)
def rad2deg_usecase(x, result):
np.rad2deg(x, result)
def invertlogical_not_usecase(x, result):
np.invertlogical_not(x, result)
def floor_usecase(x, result):
np.floor(x, result)
def ceil_usecase(x, result):
np.ceil(x, result)
def trunc_usecase(x, result):
np.trunc(x, result)
# binary ufuncs
def add_usecase(x, y, result):
np.add(x, y, result)
def subtract_usecase(x, y, result):
np.subtract(x, y, result)
def multiply_usecase(x, y, result):
np.multiply(x, y, result)
def divide_usecase(x, y, result):
np.divide(x, y, result)
def logaddexp_usecase(x, y, result):
np.logaddexp(x, y, result)
def logaddexp2_usecase(x, y, result):
np.logaddexp2(x, y, result)
def true_divide_usecase(x, y, result):
np.true_divide(x, y, result)
def floor_divide_usecase(x, y, result):
np.floor_divide(x, y, result)
def power_usecase(x, y, result):
np.power(x, y, result)
def remainder_usecase(x, y, result):
np.remainder(x, y, result)
def mod_usecase(x, y, result):
np.mod(x, y, result)
def fmod_usecase(x, y, result):
np.fmod(x, y, result)
def arctan2_usecase(x, y, result):
np.arctan2(x, y, result)
def hypot_usecase(x, y, result):
np.hypot(x, y, result)
def bitwise_and_usecase(x, y, result):
np.bitwise_and(x, y, result)
def bitwise_or_usecase(x, y, result):
np.bitwise_or(x, y, result)
def bitwise_xor_usecase(x, y, result):
np.bitwise_xor(x, y, result)
def left_shift_usecase(x, y, result):
np.left_shift(x, y, result)
def right_shift_usecase(x, y, result):
np.right_shift(x, y, result)
def greater_usecase(x, y, result):
np.greater(x, y, result)
def greater_equal_usecase(x, y, result):
np.greater_equal(x, y, result)
def less_usecase(x, y, result):
np.less(x, y, result)
def less_equal_usecase(x, y, result):
np.less_equal(x, y, result)
def not_equal_usecase(x, y, result):
np.not_equal(x, y, result)
def equal_usecase(x, y, result):
np.equal(x, y, result)
def logical_and_usecase(x, y, result):
np.logical_and(x, y, result)
def logical_or_usecase(x, y, result):
np.logical_or(x, y, result)
def logical_xor_usecase(x, y, result):
np.logical_xor(x, y, result)
def maximum_usecase(x, y, result):
np.maximum(x, y, result)
def minimum_usecase(x, y, result):
np.minimum(x, y, result)
def fmax_usecase(x, y, result):
np.fmax(x, y, result)
def fmin_usecase(x, y, result):
np.fmin(x, y, result)
def copysign_usecase(x, y, result):
np.copysign(x, y, result)
def ldexp_usecase(x, y, result):
np.ldexp(x, y, result)
class TestUFuncs(unittest.TestCase):
def unary_ufunc_test(self, ufunc_name, flags=enable_pyobj_flags,
skip_inputs=None, additional_inputs=None,
int_output_type=None, float_output_type=None):
ufunc = globals()[ufunc_name + '_usecase']
inputs = [
(0, types.uint32),
(1, types.uint32),
(-1, types.int32),
(0, types.int32),
(1, types.int32),
(0, types.uint64),
(1, types.uint64),
(-1, types.int64),
(0, types.int64),
(1, types.int64),
(-0.5, types.float32),
(0, types.float32),
(0.5, types.float32),
(-0.5, types.float64),
(0, types.float64),
(0.5, types.float64),
(np.array([0,1], dtype='u4'), types.Array(types.uint32, 1, 'C')),
(np.array([0,1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1,0,1], dtype='i4'), types.Array(types.int32, 1, 'C')),
(np.array([-1,0,1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f4'), types.Array(types.float32, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C'))]
if additional_inputs:
inputs = inputs + additional_inputs
pyfunc = ufunc
for input_tuple in inputs:
input_operand = input_tuple[0]
input_type = input_tuple[1]
if skip_inputs and input_type in skip_inputs:
continue
ty = input_type
if isinstance(ty, types.Array):
ty = ty.dtype
if ty in types.signed_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(types.int64, 1, 'C')
elif ty in types.unsigned_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(types.uint64, 1, 'C')
else:
if float_output_type:
output_type = types.Array(float_output_type, 1, 'C')
else:
output_type = types.Array(types.float64, 1, 'C')
# Due to __ftol2 llvm bug, skip testing uint64 output on windows.
# (llvm translates fptoui call to ftol2 call on windows which
# causes a crash later.
if iswindows and output_type.dtype is types.uint64:
continue
cr = compile_isolated(pyfunc, (input_type, output_type), flags=flags)
cfunc = cr.entry_point
if isinstance(input_operand, np.ndarray):
result = np.zeros(input_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input_operand.size,
dtype=output_type.dtype.name)
else:
result = np.zeros(1, dtype=output_type.dtype.name)
expected = np.zeros(1, dtype=output_type.dtype.name)
invalid_flag = False
with warnings.catch_warnings(record=True) as warnlist:
warnings.simplefilter('always')
pyfunc(input_operand, expected)
warnmsg = "invalid value encountered"
for thiswarn in warnlist:
if (issubclass(thiswarn.category, RuntimeWarning)
and str(thiswarn.message).startswith(warnmsg)):
invalid_flag = True
cfunc(input_operand, result)
# Need special checks if NaNs are in results
if np.isnan(expected).any() or np.isnan(result).any():
self.assertTrue(np.allclose(np.isnan(result), np.isnan(expected)))
if not np.isnan(expected).all() and not np.isnan(result).all():
self.assertTrue(np.allclose(result[np.invert(np.isnan(result))],
expected[np.invert(np.isnan(expected))]))
else:
match = np.all(result == expected) or np.allclose(result,
expected)
if not match:
if invalid_flag:
# Allow output to mismatch for invalid input
print("Output mismatch for invalid input",
input_tuple, result, expected)
else:
self.fail("%s != %s" % (result, expected))
def binary_ufunc_test(self, ufunc_name, flags=enable_pyobj_flags,
skip_inputs=None, additional_inputs=None,
int_output_type=None, float_output_type=None):
ufunc = globals()[ufunc_name + '_usecase']
inputs = [
(0, types.uint32),
(1, types.uint32),
(-1, types.int32),
(0, types.int32),
(1, types.int32),
(0, types.uint64),
(1, types.uint64),
(-1, types.int64),
(0, types.int64),
(1, types.int64),
(-0.5, types.float32),
(0.0, types.float32),
(0.5, types.float32),
(-0.5, types.float64),
(0.0, types.float64),
(0.5, types.float64),
(np.array([0,1], dtype='u4'), types.Array(types.uint32, 1, 'C')),
(np.array([0,1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1,0,1], dtype='i4'), types.Array(types.int32, 1, 'C')),
(np.array([-1,0,1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f4'), types.Array(types.float32, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C'))]
if additional_inputs:
inputs = inputs + additional_inputs
pyfunc = ufunc
for input_tuple in inputs:
input_operand = input_tuple[0]
input_type = input_tuple[1]
if skip_inputs and input_type in skip_inputs:
continue
ty = input_type
if isinstance(ty, types.Array):
ty = ty.dtype
if ty in types.signed_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(types.int64, 1, 'C')
elif ty in types.unsigned_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(types.uint64, 1, 'C')
else:
if float_output_type:
output_type = types.Array(float_output_type, 1, 'C')
else:
output_type = types.Array(types.float64, 1, 'C')
# Due to __ftol2 llvm bug, skip testing uint64 output on windows.
# (llvm translates fptoui call to ftol2 call on windows which
# causes a crash later.
if iswindows and output_type.dtype is types.uint64:
continue
cr = compile_isolated(pyfunc, (input_type, input_type, output_type),
flags=flags)
cfunc = cr.entry_point
if isinstance(input_operand, np.ndarray):
result = np.zeros(input_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input_operand.size,
dtype=output_type.dtype.name)
else:
result = np.zeros(1, dtype=output_type.dtype.name)
expected = np.zeros(1, dtype=output_type.dtype.name)
cfunc(input_operand, input_operand, result)
pyfunc(input_operand, input_operand, expected)
# Need special checks if NaNs are in results
if np.isnan(expected).any() or np.isnan(result).any():
self.assertTrue(np.allclose(np.isnan(result), np.isnan(expected)))
if not np.isnan(expected).all() and not np.isnan(result).all():
self.assertTrue(np.allclose(result[np.invert(np.isnan(result))],
expected[np.invert(np.isnan(expected))]))
else:
self.assertTrue(np.all(result == expected) or
np.allclose(result, expected))
def binary_int_ufunc_test(self, name=None, flags=enable_pyobj_flags):
self.binary_ufunc_test(name, flags=flags,
skip_inputs=[types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')])
# unary ufunc tests
def test_negative_ufunc(self, flags=enable_pyobj_flags):
# NumPy ufunc has bug with uint32 as input and int64 as output,
# so skip uint32 input.
self.unary_ufunc_test('negative', int_output_type=types.int64,
skip_inputs=[types.Array(types.uint32, 1, 'C')], flags=flags)
def test_negative_ufunc_npm(self):
self.test_negative_ufunc(flags=no_pyobj_flags)
def test_absolute_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('absolute', flags=flags,
additional_inputs = [(np.iinfo(np.uint32).max, types.uint32),
(np.iinfo(np.uint64).max, types.uint64),
(np.finfo(np.float32).min, types.float32),
(np.finfo(np.float64).min, types.float64)
])
def test_absolute_ufunc_npm(self):
self.test_absolute_ufunc(flags=no_pyobj_flags)
def test_rint_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('rint', flags=flags)
@unittest.expectedFailure
def test_rint_ufunc_npm(self):
self.test_rint_ufunc(flags=no_pyobj_flags)
def test_sign_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('sign', flags=flags)
def test_sign_ufunc_npm(self):
self.test_sign_ufunc(flags=no_pyobj_flags)
def test_conj_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('conj', flags=flags)
@unittest.expectedFailure
def test_conj_ufunc_npm(self):
self.test_conj_ufunc(flags=no_pyobj_flags)
def test_exp_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('exp', flags=flags)
def test_exp_ufunc_npm(self):
self.test_exp_ufunc(flags=no_pyobj_flags)
def test_exp2_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('exp2', flags=flags)
def test_exp2_ufunc_npm(self):
self.test_exp2_ufunc(flags=no_pyobj_flags)
def test_log_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('log', flags=flags)
def test_log_ufunc_npm(self):
self.test_log_ufunc(flags=no_pyobj_flags)
def test_log2_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('log2', flags=flags)
def test_log2_ufunc_npm(self):
self.test_log2_ufunc(flags=no_pyobj_flags)
def test_log10_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('log10', flags=flags)
def test_log10_ufunc_npm(self):
self.test_log10_ufunc(flags=no_pyobj_flags)
def test_expm1_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('expm1', flags=flags)
def test_expm1_ufunc_npm(self):
self.test_expm1_ufunc(flags=no_pyobj_flags)
def test_log1p_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('log1p', flags=flags)
def test_log1p_ufunc_npm(self):
self.test_log1p_ufunc(flags=no_pyobj_flags)
def test_sqrt_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('sqrt', flags=flags)
def test_sqrt_ufunc_npm(self):
self.test_sqrt_ufunc(flags=no_pyobj_flags)
def test_square_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('square', flags=flags)
@unittest.expectedFailure
def test_square_ufunc_npm(self):
self.test_square_ufunc(flags=no_pyobj_flags)
def test_reciprocal_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('reciprocal', flags=flags)
@unittest.expectedFailure
def test_reciprocal_ufunc_npm(self):
self.test_reciprocal_ufunc(flags=no_pyobj_flags)
def test_sin_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('sin', flags=flags)
def test_sin_ufunc_npm(self):
self.test_sin_ufunc(flags=no_pyobj_flags)
def test_cos_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('cos', flags=flags)
def test_cos_ufunc_npm(self):
self.test_cos_ufunc(flags=no_pyobj_flags)
def test_tan_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('tan', flags=flags)
def test_tan_ufunc_npm(self):
self.test_tan_ufunc(flags=no_pyobj_flags)
def test_arcsin_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('arcsin', flags=flags)
def test_arcsin_ufunc_npm(self):
self.test_arcsin_ufunc(flags=no_pyobj_flags)
def test_arccos_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('arccos', flags=flags)
def test_arccos_ufunc_npm(self):
self.test_arccos_ufunc(flags=no_pyobj_flags)
def test_arctan_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('arctan', flags=flags)
def test_arctan_ufunc_npm(self):
self.test_arctan_ufunc(flags=no_pyobj_flags)
def test_sinh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('sinh', flags=flags)
def test_sinh_ufunc_npm(self):
self.test_sinh_ufunc(flags=no_pyobj_flags)
def test_cosh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('cosh', flags=flags)
def test_cosh_ufunc_npm(self):
self.test_cosh_ufunc(flags=no_pyobj_flags)
def test_tanh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('tanh', flags=flags)
def test_tanh_ufunc_npm(self):
self.test_tanh_ufunc(flags=no_pyobj_flags)
def test_arcsinh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('arcsinh', flags=flags)
def test_arcsinh_ufunc_npm(self):
self.test_arcsinh_ufunc(flags=no_pyobj_flags)
def test_arccosh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('arccosh', flags=flags)
def test_arccosh_ufunc_npm(self):
self.test_arccosh_ufunc(flags=no_pyobj_flags)
def test_arctanh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('arctanh', flags=flags)
def test_arctanh_ufunc_npm(self):
self.test_arctanh_ufunc(flags=no_pyobj_flags)
def test_deg2rad_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('deg2rad', flags=flags)
def test_deg2rad_ufunc_npm(self):
self.test_deg2rad_ufunc(flags=no_pyobj_flags)
def test_rad2deg_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('rad2deg', flags=flags)
def test_rad2deg_ufunc_npm(self):
self.test_rad2deg_ufunc(flags=no_pyobj_flags)
@unittest.skipIf(not hasattr(np, "invertlogical_not"),
"invertlogical_not is not available")
def test_invertlogical_not_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('invertlogical_not', flags=flags)
@unittest.expectedFailure
def test_invertlogical_not_ufunc_npm(self):
self.test_invertlogical_not_ufunc(flags=no_pyobj_flags)
def test_floor_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('floor', flags=flags)
def test_floor_ufunc_npm(self):
self.test_floor_ufunc(flags=no_pyobj_flags)
def test_ceil_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('ceil', flags=flags)
def test_ceil_ufunc_npm(self):
self.test_ceil_ufunc(flags=no_pyobj_flags)
def test_trunc_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test('trunc', flags=flags)
def test_trunc_ufunc_npm(self):
self.test_trunc_ufunc(flags=no_pyobj_flags)
# binary ufunc tests
def test_add_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test('add', flags=flags)
def test_add_ufunc_npm(self):
self.test_add_ufunc(flags=no_pyobj_flags)
def test_subtract_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test('subtract', flags=flags)
def test_subtract_ufunc_npm(self):
self.test_subtract_ufunc(flags=no_pyobj_flags)
def test_multiply_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test('multiply', flags=flags)
def test_multiply_ufunc_npm(self):
self.test_multiply_ufunc(flags=no_pyobj_flags)
def test_divide_ufunc(self, flags=enable_pyobj_flags):
skip_inputs = None
# python3 integer division by zero and
# storing in 64 bit int produces garbage
# instead of 0, so skip
if PYVERSION >= (3, 0):
skip_inputs = [types.uint32, types.uint64,
types.Array(types.uint32, 1, 'C'),
types.Array(types.int32, 1, 'C'),
types.Array(types.uint64, 1, 'C')]
self.binary_ufunc_test('divide', flags=flags,
skip_inputs=skip_inputs, int_output_type=types.float64)
def test_divide_ufunc_npm(self):
self.test_divide_ufunc(flags=no_pyobj_flags)
def test_logaddexp_ufunc(self):
self.binary_ufunc_test('logaddexp')
@unittest.expectedFailure
def test_logaddexp_ufunc_npm(self):
self.binary_ufunc_test('logaddexp', flags=no_pyobj_flags)
def test_logaddexp2_ufunc(self):
self.binary_ufunc_test('logaddexp2')
@unittest.expectedFailure
def test_logaddexp2_ufunc_npm(self):
self.binary_ufunc_test('logaddexp2', flags=no_pyobj_flags)
def test_true_divide_ufunc(self):
self.binary_ufunc_test('true_divide')
@unittest.expectedFailure
def test_true_divide_ufunc_npm(self):
self.binary_ufunc_test('true_divide', flags=no_pyobj_flags)
def test_floor_divide_ufunc(self):
self.binary_ufunc_test('floor_divide')
@unittest.expectedFailure
def test_floor_divide_ufunc_npm(self):
self.binary_ufunc_test('floor_divide', flags=no_pyobj_flags)
def test_power_ufunc(self):
self.binary_ufunc_test('power')
@unittest.expectedFailure
def test_power_ufunc_npm(self):
self.binary_ufunc_test('power', flags=no_pyobj_flags)
def test_remainder_ufunc(self):
self.binary_ufunc_test('remainder')
@unittest.expectedFailure
def test_remainder_ufunc_npm(self):
self.binary_ufunc_test('remainder', flags=no_pyobj_flags)
def test_mod_ufunc(self):
self.binary_ufunc_test('mod')
@unittest.expectedFailure
def test_mod_ufunc_npm(self):
self.binary_ufunc_test('mod', flags=no_pyobj_flags)
def test_fmod_ufunc(self):
self.binary_ufunc_test('fmod')
@unittest.expectedFailure
def test_fmod_ufunc_npm(self):
self.binary_ufunc_test('fmod', flags=no_pyobj_flags)
def test_arctan2_ufunc(self):
self.binary_ufunc_test('arctan2')
def test_arctan2_ufunc_npm(self):
self.binary_ufunc_test('arctan2', flags=no_pyobj_flags)
def test_hypot_ufunc(self):
self.binary_ufunc_test('hypot')
@unittest.expectedFailure
def test_hypot_ufunc_npm(self):
self.binary_ufunc_test('hypot', flags=no_pyobj_flags)
def test_bitwise_and_ufunc(self):
self.binary_int_ufunc_test('bitwise_and')
@unittest.expectedFailure
def test_bitwise_and_ufunc_npm(self):
self.binary_int_ufunc_test('bitwise_and', flags=no_pyobj_flags)
def test_bitwise_or_ufunc(self):
self.binary_int_ufunc_test('bitwise_or')
@unittest.expectedFailure
def test_bitwise_or_ufunc_npm(self):
self.binary_int_ufunc_test('bitwise_or', flags=no_pyobj_flags)
def test_bitwise_xor_ufunc(self):
self.binary_int_ufunc_test('bitwise_xor')
@unittest.expectedFailure
def test_bitwise_xor_ufunc_npm(self):
self.binary_int_ufunc_test('bitwise_xor', flags=no_pyobj_flags)
def test_left_shift_ufunc(self):
self.binary_int_ufunc_test('left_shift')
@unittest.expectedFailure
def test_left_shift_ufunc_npm(self):
self.binary_int_ufunc_test('left_shift', flags=no_pyobj_flags)
def test_right_shift_ufunc(self):
self.binary_int_ufunc_test('right_shift')
@unittest.expectedFailure
def test_right_shift_ufunc_npm(self):
self.binary_int_ufunc_test('right_shift', flags=no_pyobj_flags)
def test_greater_ufunc(self):
self.binary_ufunc_test('greater')
@unittest.expectedFailure
def test_greater_ufunc_npm(self):
self.binary_ufunc_test('greater', flags=no_pyobj_flags)
def test_greater_equal_ufunc(self):
self.binary_ufunc_test('greater_equal')
@unittest.expectedFailure
def test_greater_equal_ufunc_npm(self):
self.binary_ufunc_test('greater_equal', flags=no_pyobj_flags)
def test_less_ufunc(self):
self.binary_ufunc_test('less')
@unittest.expectedFailure
def test_less_ufunc_npm(self):
self.binary_ufunc_test('less', flags=no_pyobj_flags)
def test_less_equal_ufunc(self):
self.binary_ufunc_test('less_equal')
@unittest.expectedFailure
def test_less_equal_ufunc_npm(self):
self.binary_ufunc_test('less_equal', flags=no_pyobj_flags)
def test_not_equal_ufunc(self):
self.binary_ufunc_test('not_equal')
@unittest.expectedFailure
def test_not_equal_ufunc_npm(self):
self.binary_ufunc_test('not_equal', flags=no_pyobj_flags)
def test_equal_ufunc(self):
self.binary_ufunc_test('equal')
@unittest.expectedFailure
def test_equal_ufunc_npm(self):
self.binary_ufunc_test('equal', flags=no_pyobj_flags)
def test_logical_and_ufunc(self):
self.binary_ufunc_test('logical_and')
@unittest.expectedFailure
def test_logical_and_ufunc_npm(self):
self.binary_ufunc_test('logical_and', flags=no_pyobj_flags)
def test_logical_or_ufunc(self):
self.binary_ufunc_test('logical_or')
@unittest.expectedFailure
def test_logical_or_ufunc_npm(self):
self.binary_ufunc_test('logical_or', flags=no_pyobj_flags)
def test_logical_xor_ufunc(self):
self.binary_ufunc_test('logical_xor')
@unittest.expectedFailure
def test_logical_xor_ufunc_npm(self):
self.binary_ufunc_test('logical_xor', flags=no_pyobj_flags)
def test_maximum_ufunc(self):
self.binary_ufunc_test('maximum')
@unittest.expectedFailure
def test_maximum_ufunc_npm(self):
self.binary_ufunc_test('maximum', flags=no_pyobj_flags)
def test_minimum_ufunc(self):
self.binary_ufunc_test('minimum')
@unittest.expectedFailure
def test_minimum_ufunc_npm(self):
self.binary_ufunc_test('minimum', flags=no_pyobj_flags)
def test_fmax_ufunc(self):
self.binary_ufunc_test('fmax')
@unittest.expectedFailure
def test_fmax_ufunc_npm(self):
self.binary_ufunc_test('fmax', flags=no_pyobj_flags)
def test_fmin_ufunc(self):
self.binary_ufunc_test('fmin')
@unittest.expectedFailure
def test_fmin_ufunc_npm(self):
self.binary_ufunc_test('fmin', flags=no_pyobj_flags)
def test_copysign_ufunc(self):
self.binary_ufunc_test('copysign')
@unittest.expectedFailure
def test_copysign_ufunc_npm(self):
self.binary_ufunc_test('copysign', flags=no_pyobj_flags)
# FIXME
@unittest.skipIf(is32bits or iswindows, "Some types are not supported on "
"32-bit "
"platform")
@unittest.expectedFailure
def test_ldexp_ufunc(self):
self.binary_int_ufunc_test('ldexp')
# FIXME
@unittest.skipIf(is32bits or iswindows,
"Some types are not supported on 32-bit platform")
@unittest.expectedFailure
def test_ldexp_ufunc_npm(self):
self.binary_int_ufunc_test('ldexp', flags=no_pyobj_flags)
def test_binary_ufunc_performance(self):
pyfunc = add_usecase
arraytype = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype, arraytype, arraytype))
cfunc = cr.entry_point
nelem = 5000
x_operand = np.arange(nelem, dtype=np.float32)
y_operand = np.arange(nelem, dtype=np.float32)
control = np.empty_like(x_operand)
result = np.empty_like(x_operand)
def bm_python():
pyfunc(x_operand, y_operand, control)
def bm_numba():
cfunc(x_operand, y_operand, result)
print(utils.benchmark(bm_python, maxsec=.1))
print(utils.benchmark(bm_numba, maxsec=.1))
assert np.allclose(control, result)
def binary_ufunc_mixed_types_test(self, ufunc_name, flags=enable_pyobj_flags):
ufunc = globals()[ufunc_name + '_usecase']
inputs1 = [
(1, types.uint64),
(-1, types.int64),
(0.5, types.float64),
(np.array([0, 1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1, 1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C'))]
inputs2 = inputs1
output_types = [types.Array(types.int64, 1, 'C'),
types.Array(types.float64, 1, 'C')]
pyfunc = ufunc
for input1, input2, output_type in itertools.product(inputs1, inputs2, output_types):
input1_operand = input1[0]
input1_type = input1[1]
input2_operand = input2[0]
input2_type = input2[1]
# Skip division by unsigned int because of NumPy bugs
if ufunc_name == 'divide' and (input2_type == types.Array(types.uint32, 1, 'C') or
input2_type == types.Array(types.uint64, 1, 'C')):
continue
# Skip some subtraction tests because of NumPy bugs
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint32 and types.Array(types.int64, 1, 'C'):
continue
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint64 and types.Array(types.int64, 1, 'C'):
continue
if ((isinstance(input1_type, types.Array) or
isinstance(input2_type, types.Array)) and
not isinstance(output_type, types.Array)):
continue
cr = compile_isolated(pyfunc, (input1_type, input2_type, output_type),
flags=flags)
cfunc = cr.entry_point
if isinstance(input1_operand, np.ndarray):
result = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
elif isinstance(input2_operand, np.ndarray):
result = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
else:
result = np.zeros(1, dtype=output_type.dtype.name)
expected = np.zeros(1, dtype=output_type.dtype.name)
cfunc(input1_operand, input2_operand, result)
pyfunc(input1_operand, input2_operand, expected)
# Need special checks if NaNs are in results
if np.isnan(expected).any() or np.isnan(result).any():
self.assertTrue(np.allclose(np.isnan(result), np.isnan(expected)))
if not np.isnan(expected).all() and not np.isnan(result).all():
self.assertTrue(np.allclose(result[np.invert(np.isnan(result))],
expected[np.invert(np.isnan(expected))]))
else:
self.assertTrue(np.all(result == expected) or
np.allclose(result, expected))
def test_mixed_types(self):
self.binary_ufunc_mixed_types_test('divide', flags=no_pyobj_flags)
def test_broadcasting(self):
# Test unary ufunc
pyfunc = negative_usecase
input_operands = [
np.arange(3, dtype='i8'),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3*3, dtype='i8').reshape(3,3)]
output_operands = [
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3)]
for x, result in zip(input_operands, output_operands):
input_type = types.Array(types.uint64, x.ndim, 'C')
output_type = types.Array(types.int64, result.ndim, 'C')
cr = compile_isolated(pyfunc, (input_type, output_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = np.zeros(result.shape, dtype=result.dtype)
np.negative(x, expected)
cfunc(x, result)
self.assertTrue(np.all(result == expected))
# Test binary ufunc
pyfunc = add_usecase
input1_operands = [
np.arange(3, dtype='u8'),
np.arange(3*3, dtype='u8').reshape(3,3),
np.arange(3*3*3, dtype='u8').reshape(3,3,3),
np.arange(3, dtype='u8').reshape(3,1),
np.arange(3, dtype='u8').reshape(1,3),
np.arange(3, dtype='u8').reshape(3,1,1),
np.arange(3*3, dtype='u8').reshape(3,3,1),
np.arange(3*3, dtype='u8').reshape(3,1,3),
np.arange(3*3, dtype='u8').reshape(1,3,3)]
input2_operands = input1_operands
for x, y in itertools.product(input1_operands, input2_operands):
input1_type = types.Array(types.uint64, x.ndim, 'C')
input2_type = types.Array(types.uint64, y.ndim, 'C')
output_type = types.Array(types.uint64, max(x.ndim, y.ndim), 'C')
cr = compile_isolated(pyfunc, (input1_type, input2_type, output_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = np.add(x, y)
result = np.zeros(expected.shape, dtype='u8')
cfunc(x, y, result)
self.assertTrue(np.all(result == expected))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_unpack_sequence
from __future__ import print_function
import numba.unittest_support as unittest
import numpy
from numba.compiler import compile_isolated, Flags
from numba import types
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def unpack_list(l):
a, b, c = l
return (a, b, c)
def unpack_shape(a):
x, y, z = a.shape
return x + y + z
class TestUnpack(unittest.TestCase):
def test_unpack_list(self):
pyfunc = unpack_list
cr = compile_isolated(pyfunc, (), flags=enable_pyobj_flags)
cfunc = cr.entry_point
l = [1, 2, 3]
self.assertEqual(cfunc(l), pyfunc(l))
def test_unpack_shape(self):
pyfunc = unpack_shape
cr = compile_isolated(pyfunc, [types.Array(dtype=types.int32,
ndim=3,
layout='C')])
cfunc = cr.entry_point
a = numpy.zeros(shape=(1, 2, 3))
self.assertEqual(cfunc(a), pyfunc(a))
if __name__ == '__main__':
unittest.main(buffer=True)
########NEW FILE########
__FILENAME__ = test_usecases
from __future__ import print_function
import numba.unittest_support as unittest
import itertools
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
class TestUsecases(unittest.TestCase):
def test_andor(self):
pyfunc = usecases.andor
cr = compile_isolated(pyfunc, (types.int32, types.int32))
cfunc = cr.entry_point
# Argument boundaries
xs = -1, 0, 1, 9, 10, 11
ys = -1, 0, 1, 9, 10, 11
for args in itertools.product(xs, ys):
print("case", args)
self.assertEqual(pyfunc(*args), cfunc(*args), "args %s" % (args,))
def test_sum1d(self):
pyfunc = usecases.sum1d
cr = compile_isolated(pyfunc, (types.int32, types.int32))
cfunc = cr.entry_point
ss = -1, 0, 1, 100, 200
es = -1, 0, 1, 100, 200
for args in itertools.product(ss, es):
print("case", args)
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_sum1d_pyobj(self):
pyfunc = usecases.sum1d
cr = compile_isolated(pyfunc, (types.int32, types.int32),
flags=force_pyobj_flags)
cfunc = cr.entry_point
ss = -1, 0, 1, 100, 200
es = -1, 0, 1, 100, 200
for args in itertools.product(ss, es):
print("case", args)
self.assertEqual(pyfunc(*args), cfunc(*args))
args = 0, 500
def bm_python():
pyfunc(*args)
def bm_numba():
cfunc(*args)
print(utils.benchmark(bm_python, maxsec=.1))
print(utils.benchmark(bm_numba, maxsec=.1))
def test_sum2d(self):
pyfunc = usecases.sum2d
cr = compile_isolated(pyfunc, (types.int32, types.int32))
cfunc = cr.entry_point
ss = -1, 0, 1, 100, 200
es = -1, 0, 1, 100, 200
for args in itertools.product(ss, es):
print("case", args)
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_while_count(self):
pyfunc = usecases.while_count
cr = compile_isolated(pyfunc, (types.int32, types.int32))
cfunc = cr.entry_point
ss = -1, 0, 1, 100, 200
es = -1, 0, 1, 100, 200
for args in itertools.product(ss, es):
print("case", args)
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_copy_arrays(self):
pyfunc = usecases.copy_arrays
arraytype = types.Array(types.int32, 1, 'A')
cr = compile_isolated(pyfunc, (arraytype, arraytype))
cfunc = cr.entry_point
nda = 0, 1, 10, 100
for nd in nda:
a = np.arange(nd, dtype='int32')
b = np.empty_like(a)
args = a, b
print("case", args)
cfunc(*args)
self.assertTrue(np.all(a == b))
def test_copy_arrays2d(self):
pyfunc = usecases.copy_arrays2d
arraytype = types.Array(types.int32, 2, 'A')
cr = compile_isolated(pyfunc, (arraytype, arraytype))
cfunc = cr.entry_point
nda = (0, 0), (1, 1), (2, 5), (4, 25)
for nd in nda:
d1, d2 = nd
a = np.arange(d1 * d2, dtype='int32').reshape(d1, d2)
b = np.empty_like(a)
args = a, b
print("case", args)
cfunc(*args)
self.assertTrue(np.all(a == b))
def test_ifelse1(self):
self.run_ifelse(usecases.ifelse1)
def test_ifelse2(self):
self.run_ifelse(usecases.ifelse2)
def test_ifelse3(self):
self.run_ifelse(usecases.ifelse3)
def run_ifelse(self, pyfunc):
cr = compile_isolated(pyfunc, (types.int32, types.int32))
cfunc = cr.entry_point
xs = -1, 0, 1
ys = -1, 0, 1
for x, y in itertools.product(xs, ys):
args = x, y
print("case", args)
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_string_concat(self):
pyfunc = usecases.string_concat
cr = compile_isolated(pyfunc, (types.int32, types.int32),
flags=enable_pyobj_flags)
cfunc = cr.entry_point
xs = -1, 0, 1
ys = -1, 0, 1
for x, y in itertools.product(xs, ys):
args = x, y
print("case", args)
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_string_len(self):
pyfunc = usecases.string_len
cr = compile_isolated(pyfunc, (types.pyobject,),
flags=enable_pyobj_flags)
cfunc = cr.entry_point
test_str = '123456'
self.assertEqual(pyfunc(test_str), cfunc(test_str))
test_str = '1'
self.assertEqual(pyfunc(test_str), cfunc(test_str))
test_str = ''
self.assertEqual(pyfunc(test_str), cfunc(test_str))
def test_string_slicing(self):
pyfunc = usecases.string_slicing
cr = compile_isolated(pyfunc, (types.pyobject,),
flags=enable_pyobj_flags)
cfunc = cr.entry_point
test_str = '123456'
self.assertEqual(pyfunc(test_str, 0, 3), cfunc(test_str, 0, 3))
self.assertEqual(pyfunc(test_str, 1, 5), cfunc(test_str, 1, 5))
self.assertEqual(pyfunc(test_str, 2, 3), cfunc(test_str, 2, 3))
def test_string_conversion(self):
pyfunc = usecases.string_conversion
cr = compile_isolated(pyfunc, (types.int32,),
flags=enable_pyobj_flags)
cfunc = cr.entry_point
self.assertEqual(pyfunc(1), cfunc(1))
cr = compile_isolated(pyfunc, (types.float32,),
flags=enable_pyobj_flags)
cfunc = cr.entry_point
self.assertEqual(pyfunc(1.1), cfunc(1.1))
def test_string_comparisons(self):
import operator
pyfunc = usecases.string_comparison
cr = compile_isolated(pyfunc, (types.pyobject, types.pyobject),
flags=enable_pyobj_flags)
cfunc = cr.entry_point
test_str1 = '123'
test_str2 = '123'
op = operator.eq
self.assertEqual(pyfunc(test_str1, test_str2, op),
cfunc(test_str1, test_str2, op))
test_str1 = '123'
test_str2 = '456'
op = operator.eq
self.assertEqual(pyfunc(test_str1, test_str2, op),
cfunc(test_str1, test_str2, op))
test_str1 = '123'
test_str2 = '123'
op = operator.ne
self.assertEqual(pyfunc(test_str1, test_str2, op),
cfunc(test_str1, test_str2, op))
test_str1 = '123'
test_str2 = '456'
op = operator.ne
self.assertEqual(pyfunc(test_str1, test_str2, op),
cfunc(test_str1, test_str2, op))
def test_blackscholes_cnd(self):
pyfunc = usecases.blackscholes_cnd
cr = compile_isolated(pyfunc, (types.float32,))
cfunc = cr.entry_point
ds = -0.5, 0, 0.5
for d in ds:
args = (d,)
print("case", args)
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_array_slicing(self):
pyfunc = usecases.slicing
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, types.intp, types.intp, types.intp)
cr = compile_isolated(pyfunc, argtys, flags=enable_pyobj_flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
cases = [
(a, 0, 10, 1),
(a, 0, 10, 2),
(a, 0, 10, -1),
(a, 2, 3, 1),
(a, 10, 0, 1),
]
for args in cases:
self.assertTrue(np.all(pyfunc(*args) == cfunc(*args)))
arraytype = types.Array(types.int32, 2, 'C')
argtys = (arraytype, types.intp, types.intp, types.intp)
cr = compile_isolated(pyfunc, argtys, flags=enable_pyobj_flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
cases = [
(a, 0, 10, 1),
(a, 0, 10, 2),
(a, 0, 10, -1),
(a, 2, 3, 1),
(a, 10, 0, 1),
]
for args in cases:
self.assertTrue(np.all(pyfunc(*args) == cfunc(*args)))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_wrapper
from __future__ import print_function
import numba.unittest_support as unittest
from numba import compiler, types, utils
from numba.targets import registry
import numpy
def overhead(x):
return x
def array_overhead(x):
x[0] = 1
x[1] = 2
def add(x):
return x + x + x + x + x
class TestWrapper(unittest.TestCase):
def test_overhead(self):
"""
This will show higher overhead due to unboxing in the native version.
"""
cr = compiler.compile_isolated(overhead, [types.int32])
cfunc = cr.entry_point
disp = registry.CPUOverloaded(overhead)
disp.add_overload(cr)
x = 321
def python():
overhead(x)
def pycfunc():
cfunc(x)
def overloaded():
disp(x)
print(overhead)
print(utils.benchmark(python, maxsec=.5))
print(utils.benchmark(pycfunc, maxsec=.5))
print(utils.benchmark(overloaded, maxsec=.5))
def test_array_overhead(self):
"""
The time to set two array element seems to be more expensive than
the overhead of the overloaded call.
"""
cr = compiler.compile_isolated(array_overhead, [types.int32[::1]])
cfunc = cr.entry_point
disp = registry.CPUOverloaded(array_overhead)
disp.add_overload(cr)
self.assertEqual(cr.signature.args[0].layout, 'C')
x = numpy.zeros(shape=2, dtype='int32')
def python():
array_overhead(x)
def pycfunc():
cfunc(x)
def overloaded():
disp(x)
print(array_overhead)
print(utils.benchmark(python, maxsec=.5))
print(utils.benchmark(pycfunc, maxsec=.5))
print(utils.benchmark(overloaded, maxsec=.5))
def test_add(self):
"""
This seems to be about the amount of work to balance out the overhead
by the overloaded one
"""
cr = compiler.compile_isolated(add, [types.int32])
cfunc = cr.entry_point
disp = registry.CPUOverloaded(add)
disp.add_overload(cr)
x = 321
def python():
add(x)
def pycfunc():
cfunc(x)
def overloaded():
disp(x)
print(add)
print(utils.benchmark(python, maxsec=.5))
print(utils.benchmark(pycfunc, maxsec=.5))
print(utils.benchmark(overloaded, maxsec=.5))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = true_div_usecase
from __future__ import division
def truediv_usecase(x, y):
return x / y
########NEW FILE########
__FILENAME__ = usecases
import math
import numpy as np
def sum1d(s, e):
c = 0
for i in range(s, e):
c += i
return c
def sum2d(s, e):
c = 0
for i in range(s, e):
for j in range(s, e):
c += i * j
return c
def while_count(s, e):
i = s
c = 0
while i < e:
c += i
i += 1
return c
def copy_arrays(a, b):
for i in range(a.shape[0]):
b[i] = a[i]
def copy_arrays2d(a, b):
for i in range(a.shape[0]):
for j in range(a.shape[1]):
b[i, j] = a[i, j]
def redefine1():
x = 0
for i in range(5):
x += 1
x = 0. + x
for i in range(5):
x += 1
return x
def andor(x, y):
return (x > 0 and x < 10) or (y > 0 and y < 10)
def ifelse1(x, y):
if x > y:
return 1
elif x == 0 or y == 0:
return 2
else:
return 3
def ifelse2(x, y):
if x > 0:
if y > 0:
return 1
elif y < 0:
return 1
else:
return 0
elif x < 0:
return 1
else:
return 0
def ifelse3(x, y):
if x == y:
return 1
def string_concat(x, y):
a = "whatzup"
return a + str(x + y)
def string_len(s):
return len(s)
def string_slicing(s, start, stop):
return s[start:stop]
def string_conversion(x):
return str(x)
def string_comparison(s1, s2, op):
return op(s1, s2)
def blackscholes_cnd(d):
A1 = 0.31938153
A2 = -0.356563782
A3 = 1.781477937
A4 = -1.821255978
A5 = 1.330274429
RSQRT2PI = 0.39894228040143267793994605993438
K = 1.0 / (1.0 + 0.2316419 * math.fabs(d))
ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))
if d > 0:
ret_val = 1.0 - ret_val
return ret_val
def slicing(a, start, stop, step):
"""
NoPython mode cannot create new array object that out live the scope of
the function because the array data buffer is borrowed from an object in
the function scope or from an argument. Returning array would require
stealing the data buffer reference or copying the array content.
"""
return a[start:stop:step]
########NEW FILE########
__FILENAME__ = rules
from __future__ import print_function, absolute_import
import itertools
from .typeconv import TypeManager
from numba import types
def _init_type_manager():
"""Returns a type manager with default rules
"""
tm = TypeManager()
grp_signed = (types.int8, types.int16, types.int32, types.int64)
grp_unsigned = (types.uint8, types.uint16, types.uint32, types.uint64)
grp_float = (types.float32, types.float64)
grp_complex = (types.complex64, types.complex128)
grp_inter = grp_signed + grp_unsigned + grp_float
grp_all = grp_inter + grp_complex
groups = grp_signed, grp_unsigned, grp_float, grp_complex
# First, all ints and floats are inter-convertible
for a, b in itertools.product(grp_inter, grp_inter):
tm.set_unsafe_convert(a, b)
# Other number types can convert to complex
for a, b in itertools.product(grp_all, grp_complex):
tm.set_unsafe_convert(a, b)
# Setup promotion
for grp in groups:
for i, a in enumerate(grp):
for b in grp[i + 1:]:
tm.set_promote(a, b)
# Setup safe conversion from unsigned to signed
# Allowed if the result can represent the full range
for a, b in zip(grp_unsigned, grp_signed[1:]):
tm.set_safe_convert(a, b)
# All ints less than 53 bits can safely convert to float64
f64 = types.float64
for ty in grp_signed[:-1]:
tm.set_safe_convert(ty, f64)
for ty in grp_unsigned[:-1]:
tm.set_safe_convert(ty, f64)
# Allow implicit convert from int64 to float64
tm.set_safe_convert(types.int64, types.float64)
tm.set_safe_convert(types.uint64, types.float64)
# All ints less than 24 bits can safely convert to float32
f32 = types.float32
for ty in grp_signed[:-2]:
tm.set_safe_convert(ty, f32)
for ty in grp_unsigned[:-2]:
tm.set_safe_convert(ty, f32)
# All numbers can unsafe convert to bool
boolean = types.boolean
for ty in grp_all:
tm.set_unsafe_convert(ty, boolean)
# boolean can safely promote to all numbers
boolean = types.boolean
for ty in grp_all:
tm.set_promote(boolean, ty)
return tm
default_type_manager = _init_type_manager()
def dump_number_rules():
tm = default_type_manager
for a, b in itertools.product(types.number_domain, types.number_domain):
print(a, '->', b, tm.check_compatible(a, b))
########NEW FILE########
__FILENAME__ = typeconv
from __future__ import print_function, absolute_import
from . import _typeconv
class TypeManager(object):
def __init__(self):
self._ptr = _typeconv.new_type_manager()
def select_overload(self, sig, overloads):
sig = [t._code for t in sig]
overloads = [[t._code for t in s] for s in overloads ]
return _typeconv.select_overload(self._ptr, sig, overloads)
def check_compatible(self, fromty, toty):
return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)
def set_compatbile(self, fromty, toty, by):
_typeconv.set_compatible(self._ptr, fromty._code, toty._code, by)
def set_promote(self, fromty, toty):
self.set_compatbile(fromty, toty, ord("p"))
def set_unsafe_convert(self, fromty, toty):
self.set_compatbile(fromty, toty, ord("u"))
def set_safe_convert(self, fromty, toty):
self.set_compatbile(fromty, toty, ord("s"))
def get_pointer(self):
return _typeconv.get_pointer(self._ptr)
########NEW FILE########
__FILENAME__ = typeinfer
"""
Type inference base on CPA.
The algorithm guarantees monotonic growth of type-sets for each variable.
Steps:
1. seed initial types
2. build constrains
3. propagate constrains
4. unify types
Constrain propagation is precise and does not regret (no backtracing).
Constrains push types forward following the dataflow.
"""
from __future__ import print_function, division, absolute_import
try:
import __builtin__ as builtins
except ImportError:
import builtins
from pprint import pprint
import itertools
from numba import ir, types, utils, config, ctypes_utils, cffi_support
from numba.config import PYVERSION
from numba import numpy_support
RANGE_ITER_OBJECTS = (builtins.range,)
if PYVERSION < (3, 0):
RANGE_ITER_OBJECTS += (builtins.xrange,)
class TypingError(Exception):
def __init__(self, msg, loc=None):
self.msg = msg
self.loc = loc
if loc:
super(TypingError, self).__init__("%s\n%s" % (msg, loc.strformat()))
else:
super(TypingError, self).__init__("%s" % (msg,))
class TypeVar(object):
def __init__(self, context, var):
self.context = context
self.var = var
self.typeset = set()
self.locked = False
def add_types(self, *types):
if not types:
return
nbefore = len(self.typeset)
if self.locked:
if set(types) != self.typeset:
[expect] = list(self.typeset)
for ty in types:
if self.context.type_compatibility(ty, expect) is None:
raise TypingError("No conversion from %s to %s for "
"'%s'" % (ty, expect, self.var))
else:
self.typeset |= set(types)
nafter = len(self.typeset)
assert nbefore <= nafter, "Must grow monotonically"
def lock(self, typ):
if self.locked:
[expect] = list(self.typeset)
if self.context.type_compatibility(typ, expect) is None:
raise TypingError("No convertsion from %s to %s for "
"'%s'" % (typ, expect, self.var))
else:
self.typeset = set([typ])
self.locked = True
def union(self, other):
self.add_types(*other.typeset)
def __repr__(self):
return '%s := {%s}' % (self.var, ', '.join(map(str, self.typeset)))
def get(self):
return tuple(self.typeset)
def getone(self):
assert len(self) == 1, self.typeset
return tuple(self.typeset)[0]
def __len__(self):
return len(self.typeset)
class ConstrainNetwork(object):
"""
TODO: It is possible to optimize constrain propagation to consider only
dirty type variables.
"""
def __init__(self):
self.constrains = []
def append(self, constrain):
self.constrains.append(constrain)
def propagate(self, context, typevars):
for constrain in self.constrains:
try:
constrain(context, typevars)
except TypingError:
raise
except Exception as e:
raise TypingError("Internal error:\n%s" % e, constrain.loc)
class Propagate(object):
"""
A simple constrain for direct propagation of types for assignments.
"""
def __init__(self, dst, src, loc):
self.dst = dst
self.src = src
self.loc = loc
def __call__(self, context, typevars):
typevars[self.dst].union(typevars[self.src])
class BuildTupleConstrain(object):
def __init__(self, target, items, loc):
self.target = target
self.items = items
self.loc = loc
def __call__(self, context, typevars):
tsets = [typevars[i.name].get() for i in self.items]
oset = typevars[self.target]
for vals in itertools.product(*tsets):
if all(vals[0] == v for v in vals):
tup = types.UniTuple(dtype=vals[0], count=len(vals))
else:
tup = types.Tuple(vals)
oset.add_types(tup)
class CallConstrain(object):
"""Constrain for calling functions.
Perform case analysis foreach combinations of argument types.
"""
def __init__(self, target, func, args, kws, loc):
self.target = target
self.func = func
self.args = args
self.kws = kws
self.loc = loc
def __call__(self, context, typevars):
fnty = typevars[self.func].getone()
self.resolve(context, typevars, fnty)
def resolve(self, context, typevars, fnty):
assert not self.kws, "Keyword argument is not supported, yet"
argtypes = [typevars[a.name].get() for a in self.args]
restypes = []
# Case analysis for each combination of argument types.
for args in itertools.product(*argtypes):
# TODO handling keyword arguments
sig = context.resolve_function_type(fnty, args, ())
if sig is None:
msg = "Undeclared %s%s" % (fnty, args)
raise TypingError(msg, loc=self.loc)
restypes.append(sig.return_type)
typevars[self.target].add_types(*restypes)
class IntrinsicCallConstrain(CallConstrain):
def __call__(self, context, typevars):
self.resolve(context, typevars, fnty=self.func)
class GetAttrConstrain(object):
def __init__(self, target, attr, value, loc, inst):
self.target = target
self.attr = attr
self.value = value
self.loc = loc
self.inst = inst
def __call__(self, context, typevars):
valtys = typevars[self.value.name].get()
restypes = []
for ty in valtys:
try:
attrty = context.resolve_getattr(value=ty, attr=self.attr)
except KeyError:
args = (self.attr, ty, self.value.name, self.inst)
msg = "Unknown attribute '%s' for %s %s %s" % args
raise TypingError(msg, loc=self.inst.loc)
restypes.append(attrty)
typevars[self.target].add_types(*restypes)
class SetItemConstrain(object):
def __init__(self, target, index, value, loc):
self.target = target
self.index = index
self.value = value
self.loc = loc
def __call__(self, context, typevars):
targettys = typevars[self.target.name].get()
idxtys = typevars[self.index.name].get()
valtys = typevars[self.value.name].get()
for ty, it, vt in itertools.product(targettys, idxtys, valtys):
if not context.resolve_setitem(target=ty, index=it, value=vt):
raise TypingError("Cannot resolve setitem: %s[%s] = %s" %
(ty, it, vt), loc=self.loc)
class TypeVarMap(dict):
def set_context(self, context):
self.context = context
def __getitem__(self, name):
if name not in self:
self[name] = TypeVar(self.context, name)
return super(TypeVarMap, self).__getitem__(name)
def __setitem__(self, name, value):
assert isinstance(name, str)
if name in self:
raise KeyError("Cannot redefine typevar %s" % name)
else:
super(TypeVarMap, self).__setitem__(name, value)
class TypeInferer(object):
"""
Operates on block that shares the same ir.Scope.
"""
def __init__(self, context, blocks):
self.context = context
self.blocks = blocks
self.typevars = TypeVarMap()
self.typevars.set_context(context)
self.constrains = ConstrainNetwork()
self.return_type = None
# Set of assumed immutable globals
self.assumed_immutables = set()
# Track all calls
self.usercalls = []
self.intrcalls = []
self.setitemcalls = []
def dump(self):
print('---- type variables ----')
pprint(utils.dict_values(self.typevars))
def seed_type(self, name, typ):
"""All arguments should be seeded.
"""
self.typevars[name].lock(typ)
def seed_return(self, typ):
"""Seeding of return value is optional.
"""
# self.return_type = typ
for blk in utils.dict_itervalues(self.blocks):
inst = blk.terminator
if isinstance(inst, ir.Return):
self.typevars[inst.value.name].lock(typ)
# self.typevars[inst.value.name].lock()
def build_constrain(self):
for blk in utils.dict_itervalues(self.blocks):
for inst in blk.body:
self.constrain_statement(inst)
def propagate(self):
newtoken = self.get_state_token()
oldtoken = None
if config.DEBUG:
self.dump()
# Since the number of types are finite, the typesets will eventually
# stop growing.
while newtoken != oldtoken:
if config.DEBUG:
print("propagate".center(80, '-'))
oldtoken = newtoken
self.constrains.propagate(self.context, self.typevars)
newtoken = self.get_state_token()
if config.DEBUG:
self.dump()
def unify(self):
typdict = utils.UniqueDict()
for var, tv in self.typevars.items():
if len(tv) == 1:
unified = tv.getone()
elif len(tv) == 0:
raise TypeError("Variable %s has no type" % var)
else:
unified = self.context.unify_types(*tv.get())
if unified == types.pyobject:
raise TypingError("Var '%s' unified to object: %s" % (var, tv))
typdict[var] = unified
retty = self.get_return_type(typdict)
fntys = self.get_function_types(typdict)
return typdict, retty, fntys
def get_function_types(self, typemap):
calltypes = utils.UniqueDict()
for call, args, kws in self.intrcalls:
if call.op in ('binop', 'unary'):
fnty = call.fn
else:
fnty = call.op
args = tuple(typemap[a.name] for a in args)
assert not kws
signature = self.context.resolve_function_type(fnty, args, ())
assert signature is not None, (fnty, args)
calltypes[call] = signature
for call, args, kws in self.usercalls:
args = tuple(typemap[a.name] for a in args)
if isinstance(call.func, ir.Intrinsic):
signature = call.func.type
else:
assert not kws
fnty = typemap[call.func.name]
signature = self.context.resolve_function_type(fnty, args, ())
assert signature is not None, (fnty, args)
calltypes[call] = signature
for inst in self.setitemcalls:
target = typemap[inst.target.name]
index = typemap[inst.index.name]
value = typemap[inst.value.name]
signature = self.context.resolve_setitem(target, index, value)
calltypes[inst] = signature
return calltypes
def get_return_type(self, typemap):
rettypes = set()
for blk in utils.dict_itervalues(self.blocks):
term = blk.terminator
if isinstance(term, ir.Return):
rettypes.add(typemap[term.value.name])
if types.none in rettypes:
# Special case None return
rettypes = rettypes - set([types.none])
if rettypes:
unified = self.context.unify_types(*rettypes)
return types.Optional(unified)
else:
return types.none
else:
unified = self.context.unify_types(*rettypes)
return unified
def get_state_token(self):
"""The algorithm is monotonic. It can only grow the typesets.
The sum of all lengths of type sets is a cheap and accurate
description of our progress.
"""
return sum(len(tv) for tv in utils.dict_itervalues(self.typevars))
def constrain_statement(self, inst):
if isinstance(inst, ir.Assign):
self.typeof_assign(inst)
elif isinstance(inst, ir.SetItem):
self.typeof_setitem(inst)
elif isinstance(inst, (ir.Jump, ir.Branch, ir.Return, ir.Del)):
pass
else:
raise NotImplementedError(inst)
def typeof_setitem(self, inst):
constrain = SetItemConstrain(target=inst.target, index=inst.index,
value=inst.value, loc=inst.loc)
self.constrains.append(constrain)
self.setitemcalls.append(inst)
def typeof_assign(self, inst):
value = inst.value
if isinstance(value, ir.Const):
self.typeof_const(inst, inst.target, value.value)
elif isinstance(value, ir.Var):
self.constrains.append(Propagate(dst=inst.target.name,
src=value.name, loc=inst.loc))
elif isinstance(value, ir.Global):
self.typeof_global(inst, inst.target, value)
elif isinstance(value, ir.Expr):
self.typeof_expr(inst, inst.target, value)
else:
raise NotImplementedError(type(value), value)
def typeof_const(self, inst, target, const):
if const is True or const is False:
self.typevars[target.name].lock(types.boolean)
elif isinstance(const, (int, float)):
ty = self.context.get_number_type(const)
self.typevars[target.name].lock(ty)
elif const is None:
self.typevars[target.name].lock(types.none)
# elif isinstance(const, str):
# self.typevars[target.name].lock(types.string)
elif isinstance(const, complex):
self.typevars[target.name].lock(types.complex128)
elif isinstance(const, tuple):
tys = []
for elem in const:
if isinstance(elem, int):
tys.append(types.intp)
if all(t == types.intp for t in tys):
typ = types.UniTuple(types.intp, len(tys))
else:
typ = types.Tuple(tys)
self.typevars[target.name].lock(typ)
else:
msg = "Unknown constant of type %s" % (const,)
raise TypingError(msg, loc=inst.loc)
def typeof_global(self, inst, target, gvar):
if (gvar.name in ('range', 'xrange') and
gvar.value in RANGE_ITER_OBJECTS):
gvty = self.context.get_global_type(gvar.value)
self.typevars[target.name].lock(gvty)
self.assumed_immutables.add(inst)
elif gvar.name == 'slice' and gvar.value is slice:
gvty = self.context.get_global_type(gvar.value)
self.typevars[target.name].lock(gvty)
self.assumed_immutables.add(inst)
elif gvar.name == 'len' and gvar.value is len:
gvty = self.context.get_global_type(gvar.value)
self.typevars[target.name].lock(gvty)
self.assumed_immutables.add(inst)
elif gvar.name in ('True', 'False'):
assert gvar.value in (True, False)
self.typevars[target.name].lock(types.boolean)
self.assumed_immutables.add(inst)
elif (isinstance(gvar.value, tuple) and
all(isinstance(x, int) for x in gvar.value)):
gvty = self.context.get_number_type(gvar.value[0])
self.typevars[target.name].lock(types.UniTuple(gvty, 2))
self.assumed_immutables.add(inst)
elif isinstance(gvar.value, (int, float)):
gvty = self.context.get_number_type(gvar.value)
self.typevars[target.name].lock(gvty)
self.assumed_immutables.add(inst)
elif numpy_support.is_arrayscalar(gvar.value):
gvty = numpy_support.map_arrayscalar_type(gvar.value)
self.typevars[target.name].lock(gvty)
self.assumed_immutables.add(inst)
elif numpy_support.is_array(gvar.value):
ary = gvar.value
dtype = numpy_support.from_dtype(ary.dtype)
# force C contiguous
gvty = types.Array(dtype, ary.ndim, 'C')
self.typevars[target.name].lock(gvty)
self.assumed_immutables.add(inst)
elif ctypes_utils.is_ctypes_funcptr(gvar.value):
cfnptr = gvar.value
fnty = ctypes_utils.make_function_type(cfnptr)
self.typevars[target.name].lock(fnty)
self.assumed_immutables.add(inst)
elif cffi_support.SUPPORTED and cffi_support.is_cffi_func(gvar.value):
fnty = cffi_support.make_function_type(gvar.value)
self.typevars[target.name].lock(fnty)
self.assumed_immutables.add(inst)
else:
try:
gvty = self.context.get_global_type(gvar.value)
except KeyError:
raise TypingError("Untyped global name '%s'" % gvar.name,
loc=inst.loc)
self.assumed_immutables.add(inst)
self.typevars[target.name].lock(gvty)
# TODO Hmmm...
# elif gvar.value is ir.UNDEFINED:
# self.typevars[target.name].add_types(types.pyobject)
def typeof_expr(self, inst, target, expr):
if expr.op == 'call':
if isinstance(expr.func, ir.Intrinsic):
restype = expr.func.type.return_type
self.typevars[target.name].add_types(restype)
self.usercalls.append((inst.value, expr.args, expr.kws))
else:
self.typeof_call(inst, target, expr)
elif expr.op in ('getiter', 'iternext', 'iternextsafe', 'itervalid'):
self.typeof_intrinsic_call(inst, target, expr.op, expr.value)
elif expr.op == 'binop':
self.typeof_intrinsic_call(inst, target, expr.fn, expr.lhs, expr.rhs)
elif expr.op == 'unary':
self.typeof_intrinsic_call(inst, target, expr.fn, expr.value)
elif expr.op == 'getitem':
self.typeof_intrinsic_call(inst, target, expr.op, expr.target,
expr.index)
elif expr.op == 'getattr':
constrain = GetAttrConstrain(target.name, attr=expr.attr,
value=expr.value, loc=inst.loc,
inst=inst)
self.constrains.append(constrain)
elif expr.op == 'getitem':
self.typeof_intrinsic_call(inst, target, expr.op, expr.target,
expr.index, loc=inst.loc)
elif expr.op == 'build_tuple':
constrain = BuildTupleConstrain(target.name, items=expr.items,
loc=inst.loc)
self.constrains.append(constrain)
else:
raise NotImplementedError(type(expr), expr)
def typeof_call(self, inst, target, call):
constrain = CallConstrain(target.name, call.func.name, call.args,
call.kws, loc=inst.loc)
self.constrains.append(constrain)
self.usercalls.append((inst.value, call.args, call.kws))
def typeof_intrinsic_call(self, inst, target, func, *args):
constrain = IntrinsicCallConstrain(target.name, func, args, (),
loc=inst.loc)
self.constrains.append(constrain)
self.intrcalls.append((inst.value, args, ()))
########NEW FILE########
__FILENAME__ = types
"""
These type objects do not have a fixed machine representation. It is up to
the targets to choose their representation.
"""
from __future__ import print_function, division, absolute_import
from collections import defaultdict
import numpy
def _autoincr():
n = len(_typecache)
# 4 billion types should be enough, right?
assert n <= 2 ** 32, "Limited to 4billion types"
return n
_typecache = defaultdict(_autoincr)
class Type(object):
"""
The default behavior is to provide equality through `name` attribute.
Two types are equal if there `name` are equal.
Subclass can refine this behavior.
"""
__slots__ = '_code', 'name', 'is_parametric'
def __init__(self, name, param=False):
self.name = name
self.is_parametric = param
self._code = _typecache[self]
def __repr__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not (self == other)
def __call__(self, *args):
if len(args) == 1 and not isinstance(args[0], Type):
return self.cast_python_value(args[0])
return Prototype(args=args, return_type=self)
def __getitem__(self, args):
assert not isinstance(self, Array)
ndim, layout = self._determine_array_spec(args)
return Array(dtype=self, ndim=ndim, layout=layout)
def _determine_array_spec(self, args):
if isinstance(args, (tuple, list)):
ndim = len(args)
if args[0].step == 1:
layout = 'F'
elif args[-1].step == 1:
layout = 'C'
else:
layout = 'A'
elif isinstance(args, slice):
ndim = 1
if args.step == 1:
layout = 'C'
else:
layout = 'A'
else:
ndim = 1
layout = 'A'
return ndim, layout
__iter__ = NotImplemented
cast_python_value = NotImplemented
class Integer(Type):
def __init__(self, *args, **kws):
super(Integer, self).__init__(*args, **kws)
# Determine bitwidth
for prefix in ('int', 'uint'):
if self.name.startswith(prefix):
bitwidth = int(self.name[len(prefix):])
self.bitwidth = bitwidth
self.signed = self.name.startswith('int')
def cast_python_value(self, value):
return getattr(numpy, self.name)(value)
class Float(Type):
def cast_python_value(self, value):
return getattr(numpy, self.name)(value)
class Complex(Type):
def cast_python_value(self, value):
return getattr(numpy, self.name)(value)
class Prototype(Type):
def __init__(self, args, return_type):
self.args = args
self.return_type = return_type
name = "%s(%s)" % (return_type, ', '.join(str(a) for a in args))
super(Prototype, self).__init__(name=name)
class Dummy(Type):
"""
For type that does not really have a representation and is compatible
with a void*.
"""
class Kind(Type):
def __init__(self, of):
self.of = of
super(Kind, self).__init__("kind(%s)" % of)
def __eq__(self, other):
if isinstance(other, Kind):
return self.of == other.of
def __hash__(self):
return hash(self.of)
class Module(Type):
def __init__(self, pymod):
self.pymod = pymod
super(Module, self).__init__("Module(%s)" % pymod)
def __eq__(self, other):
if isinstance(other, Module):
return self.pymod == other.pymod
def __hash__(self):
return hash(self.pymod)
class Macro(Type):
def __init__(self, template):
self.template = template
cls = type(self)
super(Macro, self).__init__("%s(%s)" % (cls.__name__, template))
def __eq__(self, other):
if isinstance(other, Macro):
return self.template == other.template
def __hash__(self):
# FIXME maybe this should not be hashable
return hash(self.template)
class Function(Type):
def __init__(self, template):
self.template = template
cls = type(self)
# TODO template is mutable. Should use different naming scheme
super(Function, self).__init__("%s(%s)" % (cls.__name__, template))
def __eq__(self, other):
if isinstance(other, Function):
return self.template == other.template
def __hash__(self):
# FIXME maybe this should not be hashable
return hash(self.template)
def extend(self, template):
self.template.cases.extend(template.cases)
class Dispatcher(Type):
def __init__(self, overloaded):
self.overloaded = overloaded
super(Dispatcher, self).__init__("Dispatcher(%s)" % overloaded)
def __eq__(self, other):
if isinstance(other, Dispatcher):
return self.overloaded is other.overloaded
def __hash__(self):
return hash(self.overloaded)
class FunctionPointer(Function):
def __init__(self, template, funcptr):
self.funcptr = funcptr
super(FunctionPointer, self).__init__(template)
class Method(Function):
def __init__(self, template, this):
self.this = this
newcls = type(template.__name__ + '.' + str(this), (template,),
dict(this=this))
super(Method, self).__init__(newcls)
def __eq__(self, other):
if isinstance(other, Method):
return (self.template.__name__ == other.template.__name__ and
self.this == other.this)
def __hash__(self):
return hash((self.template.__name__, self.this))
class Array(Type):
__slots__ = 'dtype', 'ndim', 'layout'
# CS and FS are not reserved for inner contig but strided
LAYOUTS = frozenset(['C', 'F', 'CS', 'FS', 'A'])
def __init__(self, dtype, ndim, layout):
from numba.typeconv.rules import default_type_manager as tm
if isinstance(dtype, Array):
raise TypeError("Array dtype cannot be Array")
if layout not in self.LAYOUTS:
raise ValueError("Invalid layout '%s'" % layout)
self.dtype = dtype
self.ndim = ndim
self.layout = layout
name = "array(%s, %sd, %s)" % (dtype, ndim, layout)
super(Array, self).__init__(name, param=True)
if layout != 'A':
# Install conversion from non-any layout to any layout
ary_any = Array(dtype, ndim, 'A')
tm.set_safe_convert(self, ary_any)
def copy(self, dtype=None, ndim=None, layout=None):
if dtype is None:
dtype = self.dtype
if ndim is None:
ndim = self.ndim
if layout is None:
layout = self.layout
return Array(dtype=dtype, ndim=ndim, layout=layout)
def get_layout(self, dim):
assert 0 <= dim < self.ndim
if self.layout in 'CFA':
return self.layout
elif self.layout == 'CS':
if dim == self.ndim - 1:
return 'C'
elif self.layout == 'FS':
if dim == 0:
return 'F'
return 'A'
def getitem(self, ind):
"""Returns (return-type, index-type)
"""
if isinstance(ind, UniTuple):
idxty = UniTuple(intp, ind.count)
else:
idxty = intp
return self.dtype, idxty
def setitem(self):
"""Returns (index-type, value-type)
"""
return intp, self.dtype
def __eq__(self, other):
if isinstance(other, Array):
return (self.dtype == other.dtype and
self.ndim == other.ndim and
self.layout == other.layout)
def __hash__(self):
return hash((self.dtype, self.ndim, self.layout))
@property
def is_c_contig(self):
return self.layout == 'C' or (self.ndim == 1 and self.layout in 'CF')
@property
def is_f_contig(self):
return self.layout == 'F' or (self.ndim == 1 and self.layout in 'CF')
@property
def is_contig(self):
return self.layout in 'CF'
class UniTuple(Type):
def __init__(self, dtype, count):
self.dtype = dtype
self.count = count
name = "(%s x %d)" % (dtype, count)
super(UniTuple, self).__init__(name, param=True)
def getitem(self, ind):
if isinstance(ind, UniTuple):
idxty = UniTuple(intp, ind.count)
else:
idxty = intp
return self.dtype, intp
def __getitem__(self, i):
"""
Return element at position i
"""
return self.dtype
def __iter__(self):
return iter([self.dtype] * self.count)
def __len__(self):
return self.count
def __eq__(self, other):
if isinstance(other, UniTuple):
return self.dtype == other.dtype and self.count == other.count
def __hash__(self):
return hash((self.dtype, self.count))
class UniTupleIter(Type):
def __init__(self, unituple):
self.unituple = unituple
name = 'iter(%s)' % unituple
super(UniTupleIter, self).__init__(name, param=True)
def __eq__(self, other):
if isinstance(other, UniTupleIter):
return self.unituple == other.unituple
def __hash__(self):
return hash(self.unituple)
class Tuple(Type):
def __init__(self, items):
self.items = items
self.count = len(items)
name = "(%s)" % ', '.join(str(i) for i in items)
super(Tuple, self).__init__(name, param=True)
def __getitem__(self, i):
"""
Return element at position i
"""
return self.items[i]
def __len__(self):
return len(self.items)
def __eq__(self, other):
if isinstance(other, Tuple):
return self.items == other.items
def __hash__(self):
return hash(self.items)
def __iter__(self):
return iter(self.items)
class CPointer(Type):
def __init__(self, dtype):
self.dtype = dtype
name = "*%s" % dtype
super(CPointer, self).__init__(name, param=True)
def __eq__(self, other):
if isinstance(other, CPointer):
return self.dtype == other.dtype
def __hash__(self):
return hash(self.dtype)
class Object(Type):
def __init__(self, clsobj):
self.cls = clsobj
name = "Object(%s)" % clsobj.__name__
super(Object, self).__init__(name, param=True)
def __eq__(self, other):
if isinstance(other, Object):
return self.cls == other.cls
def __hash__(self):
return hash(self.cls)
class Optional(Type):
def __init__(self, typ):
self.type = typ
name = "?%s" % typ
super(Optional, self).__init__(name, param=True)
def __eq__(self, other):
if isinstance(other, Optional):
return self.type == other.type
def __hash__(self):
return hash(self.type)
pyobject = Type('pyobject')
none = Dummy('none')
Any = Dummy('any')
VarArg = Dummy('...')
string = Dummy('str')
# No operation is defined on voidptr
# Can only pass it around
voidptr = Dummy('void*')
boolean = bool_ = Type('bool')
byte = uint8 = Integer('uint8')
uint16 = Integer('uint16')
uint32 = Integer('uint32')
uint64 = Integer('uint64')
int8 = Integer('int8')
int16 = Integer('int16')
int32 = Integer('int32')
int64 = Integer('int64')
intp = int32 if tuple.__itemsize__ == 4 else int64
uintp = uint32 if tuple.__itemsize__ == 4 else uint64
float32 = Float('float32')
float64 = Float('float64')
complex64 = Complex('complex64')
complex128 = Complex('complex128')
len_type = Dummy('len')
range_type = Dummy('range')
slice_type = Dummy('slice')
abs_type = Dummy('abs')
neg_type = Dummy('neg')
print_type = Dummy('print')
sign_type = Dummy('sign')
range_state32_type = Type('range_state32')
range_state64_type = Type('range_state64')
range_iter32_type = Type('range_iter32')
range_iter64_type = Type('range_iter64')
# slice2_type = Type('slice2_type')
slice3_type = Type('slice3_type')
signed_domain = frozenset([int8, int16, int32, int64])
unsigned_domain = frozenset([uint8, uint16, uint32, uint64])
integer_domain = signed_domain | unsigned_domain
real_domain = frozenset([float32, float64])
complex_domain = frozenset([complex64, complex128])
number_domain = real_domain | integer_domain | complex_domain
# Aliases to Numpy type names
b1 = bool_
i1 = int8
i2 = int16
i4 = int32
i8 = int64
u1 = uint8
u2 = uint16
u4 = uint32
u8 = uint64
f4 = float32
f8 = float64
c8 = complex64
c16 = complex128
float_ = float32
double = float64
void = none
_make_signed = lambda x: globals()["int%d" % (numpy.dtype(x).itemsize * 8)]
_make_unsigned = lambda x: globals()["uint%d" % (numpy.dtype(x).itemsize * 8)]
char = _make_signed(numpy.byte)
uchar = byte = _make_unsigned(numpy.byte)
short = _make_signed(numpy.short)
ushort = _make_unsigned(numpy.short)
int_ = _make_signed(numpy.int_)
uint = _make_unsigned(numpy.int_)
intc = _make_signed(numpy.intc) # C-compat int
uintc = _make_signed(numpy.uintc) # C-compat uint
long_ = _make_signed(numpy.long)
ulong = _make_unsigned(numpy.long)
longlong = _make_signed(numpy.longlong)
ulonglong = _make_unsigned(numpy.longlong)
__all__ = '''
int8
int16
int32
int64
uint8
uint16
uint32
uint64
intp
intc
boolean
float32
float64
complex64
complex128
bool_
byte
char
uchar
short
ushort
int_
uint
long_
ulong
longlong
ulonglong
float_
double
void
none
b1
i1
i2
i4
i8
u1
u2
u4
u8
f4
f8
c8
c16
'''.split()
########NEW FILE########
__FILENAME__ = type_annotations
from __future__ import print_function, absolute_import
import inspect
import re
from collections import Mapping, defaultdict
import textwrap
from contextlib import closing
from numba.io_support import StringIO
from numba import ir
class SourceLines(Mapping):
def __init__(self, func):
try:
lines, startno = inspect.getsourcelines(func)
except IOError:
self.lines = ()
self.startno = 0
else:
self.lines = textwrap.dedent(''.join(lines)).splitlines()
self.startno = startno
def __getitem__(self, lineno):
try:
return self.lines[lineno - self.startno].rstrip()
except IndexError:
return ''
def __iter__(self):
return iter((self.startno + i) for i in range(len(self.lines)))
def __len__(self):
return len(self.lines)
@property
def avail(self):
return bool(self.lines)
class TypeAnnotation(object):
def __init__(self, interp, typemap, calltypes, lifted):
self.filename = interp.bytecode.filename
self.func = interp.bytecode.func
self.blocks = interp.blocks
self.typemap = typemap
self.calltypes = calltypes
self.lifted = lifted
def annotate(self):
source = SourceLines(self.func)
# if not source.avail:
# return "Source code unavailable"
# Prepare annotations
groupedinst = defaultdict(list)
for blkid, blk in self.blocks.items():
groupedinst[blk.loc.line].append("label %d" % blkid)
for inst in blk.body:
lineno = inst.loc.line
if isinstance(inst, ir.Assign):
if (isinstance(inst.value, ir.Expr) and
inst.value.op == 'call'):
atype = self.calltypes[inst.value]
else:
atype = self.typemap[inst.target.name]
aline = "%s = %s :: %s" % (inst.target, inst.value, atype)
elif isinstance(inst, ir.SetItem):
atype = self.calltypes[inst]
aline = "%s :: %s" % (inst, atype)
else:
aline = "%s" % inst
groupedinst[lineno].append(" %s" % aline)
# Format annotations
io = StringIO()
with closing(io):
if source.avail:
print("# File: %s" % self.filename, file=io)
for num in source:
srcline = source[num]
ind = _getindent(srcline)
print("%s# --- LINE %d --- " % (ind, num), file=io)
for inst in groupedinst[num]:
print('%s# %s' % (ind, inst), file=io)
print(file=io)
print(srcline, file=io)
print(file=io)
if self.lifted:
print("# The function contains lifted loops", file=io)
for loop in self.lifted:
print("# Loop at line %d" % loop.bytecode.firstlineno,
file=io)
print("# Has %d overloads" % len(loop.overloads),
file=io)
for cres in loop.overloads.values():
print(cres.type_annotation, file=io)
else:
print("# Source code unavailable", file=io)
for num in groupedinst:
for inst in groupedinst[num]:
print('%s' % (inst,), file=io)
print(file=io)
return io.getvalue()
def __str__(self):
return self.annotate()
re_longest_white_prefix = re.compile('^\s*')
def _getindent(text):
m = re_longest_white_prefix.match(text)
if not m:
return ''
else:
return ' ' * len(m.group(0))
########NEW FILE########
__FILENAME__ = builtins
from __future__ import print_function, division, absolute_import
from numba import types
from numba.utils import PYVERSION
from numba.typing.templates import (AttributeTemplate, ConcreteTemplate,
AbstractTemplate, builtin_global, builtin,
builtin_attr, signature)
builtin_global(range, types.range_type)
if PYVERSION < (3, 0):
builtin_global(xrange, types.range_type)
builtin_global(len, types.len_type)
builtin_global(slice, types.slice_type)
builtin_global(abs, types.abs_type)
builtin_global(print, types.print_type)
@builtin
class Print(ConcreteTemplate):
key = types.print_type
intcases = [signature(types.none, ty) for ty in types.integer_domain]
realcases = [signature(types.none, ty) for ty in types.real_domain]
cases = intcases + realcases
@builtin
class Abs(ConcreteTemplate):
key = types.abs_type
intcases = [signature(ty, ty) for ty in types.signed_domain]
realcases = [signature(ty, ty) for ty in types.real_domain]
cases = intcases + realcases
@builtin
class Slice(ConcreteTemplate):
key = types.slice_type
cases = [
signature(types.slice3_type),
signature(types.slice3_type, types.none, types.none),
signature(types.slice3_type, types.none, types.intp),
signature(types.slice3_type, types.intp, types.none),
signature(types.slice3_type, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.intp, types.intp),
]
@builtin
class Range(ConcreteTemplate):
key = types.range_type
cases = [
signature(types.range_state32_type, types.int32),
signature(types.range_state32_type, types.int32, types.int32),
signature(types.range_state32_type, types.int32, types.int32,
types.int32),
signature(types.range_state64_type, types.int64),
signature(types.range_state64_type, types.int64, types.int64),
signature(types.range_state64_type, types.int64, types.int64,
types.int64),
]
@builtin
class GetIter(ConcreteTemplate):
key = "getiter"
cases = [
signature(types.range_iter32_type, types.range_state32_type),
signature(types.range_iter64_type, types.range_state64_type),
]
@builtin
class GetIterUniTuple(AbstractTemplate):
key = "getiter"
def generic(self, args, kws):
assert not kws
[tup] = args
if isinstance(tup, types.UniTuple):
return signature(types.UniTupleIter(tup), tup)
@builtin
class IterNext(ConcreteTemplate):
key = "iternext"
cases = [
signature(types.int32, types.range_iter32_type),
signature(types.int64, types.range_iter64_type),
]
@builtin
class IterNextSafe(AbstractTemplate):
key = "iternextsafe"
def generic(self, args, kws):
assert not kws
[tupiter] = args
if isinstance(tupiter, types.UniTupleIter):
return signature(tupiter.unituple.dtype, tupiter)
@builtin
class IterValid(ConcreteTemplate):
key = "itervalid"
cases = [
signature(types.boolean, types.range_iter32_type),
signature(types.boolean, types.range_iter64_type),
]
class BinOp(ConcreteTemplate):
cases = [
signature(types.uintp, types.uint8, types.uint8),
signature(types.uintp, types.uint16, types.uint16),
signature(types.uintp, types.uint32, types.uint32),
signature(types.uint64, types.uint64, types.uint64),
signature(types.intp, types.int8, types.int8),
signature(types.intp, types.int16, types.int16),
signature(types.intp, types.int32, types.int32),
signature(types.int64, types.int64, types.int64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
signature(types.complex64, types.complex64, types.complex64),
signature(types.complex128, types.complex128, types.complex128),
]
@builtin
class BinOpAdd(BinOp):
key = "+"
@builtin
class BinOpSub(BinOp):
key = "-"
@builtin
class BinOpMul(BinOp):
key = "*"
@builtin
class BinOpDiv(BinOp):
key = "/?"
@builtin
class BinOpMod(ConcreteTemplate):
key = "%"
cases = [
signature(types.uint8, types.uint8, types.uint8),
signature(types.uint16, types.uint16, types.uint16),
signature(types.uint32, types.uint32, types.uint32),
signature(types.uint64, types.uint64, types.uint64),
signature(types.int8, types.int8, types.int8),
signature(types.int16, types.int16, types.int16),
signature(types.int32, types.int32, types.int32),
signature(types.int64, types.int64, types.int64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
]
@builtin
class BinOpTrueDiv(ConcreteTemplate):
key = "/"
cases = [
signature(types.float64, types.uint8, types.uint8),
signature(types.float64, types.uint16, types.uint16),
signature(types.float64, types.uint32, types.uint32),
signature(types.float64, types.uint64, types.uint64),
signature(types.float64, types.int8, types.int8),
signature(types.float64, types.int16, types.int16),
signature(types.float64, types.int32, types.int32),
signature(types.float64, types.int64, types.int64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
signature(types.complex64, types.complex64, types.complex64),
signature(types.complex128, types.complex128, types.complex128),
]
@builtin
class BinOpFloorDiv(ConcreteTemplate):
key = "//"
cases = [
signature(types.int8, types.int8, types.int8),
signature(types.int16, types.int16, types.int16),
signature(types.int32, types.int32, types.int32),
signature(types.int64, types.int64, types.int64),
signature(types.uint8, types.uint8, types.uint8),
signature(types.uint16, types.uint16, types.uint16),
signature(types.uint32, types.uint32, types.uint32),
signature(types.uint64, types.uint64, types.uint64),
signature(types.int32, types.float32, types.float32),
signature(types.int64, types.float64, types.float64),
]
@builtin
class BinOpPower(ConcreteTemplate):
key = "**"
cases = [
signature(types.float64, types.float64, types.uint8),
signature(types.float64, types.float64, types.uint16),
signature(types.float64, types.float64, types.uint32),
signature(types.float64, types.float64, types.uint64),
signature(types.float64, types.float64, types.int8),
signature(types.float64, types.float64, types.int16),
signature(types.float64, types.float64, types.int32),
signature(types.float64, types.float64, types.int64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
signature(types.complex64, types.complex64, types.complex64),
signature(types.complex128, types.complex128, types.complex128),
]
class BitwiseShiftOperation(ConcreteTemplate):
cases = [
signature(types.int8, types.int8, types.uint32),
signature(types.int16, types.int16, types.uint32),
signature(types.int32, types.int32, types.uint32),
signature(types.int64, types.int64, types.uint32),
signature(types.uint8, types.uint8, types.uint32),
signature(types.uint16, types.uint16, types.uint32),
signature(types.uint32, types.uint32, types.uint32),
signature(types.uint64, types.uint64, types.uint32),
]
@builtin
class BitwiseLeftShift(BitwiseShiftOperation):
key = "<<"
@builtin
class BitwiseRightShift(BitwiseShiftOperation):
key = ">>"
class BitwiseLogicOperation(BinOp):
cases = [
signature(types.uintp, types.uint8, types.uint8),
signature(types.uintp, types.uint16, types.uint16),
signature(types.uintp, types.uint32, types.uint32),
signature(types.uint64, types.uint64, types.uint64),
signature(types.intp, types.int8, types.int8),
signature(types.intp, types.int16, types.int16),
signature(types.intp, types.int32, types.int32),
signature(types.int64, types.int64, types.int64),
]
@builtin
class BitwiseAnd(BitwiseLogicOperation):
key = "&"
@builtin
class BitwiseOr(BitwiseLogicOperation):
key = "|"
@builtin
class BitwiseXor(BitwiseLogicOperation):
key = "^"
@builtin
class BitwiseInvert(ConcreteTemplate):
key = "~"
cases = [
signature(types.int8, types.boolean),
signature(types.uint8, types.uint8),
signature(types.uint16, types.uint16),
signature(types.uint32, types.uint32),
signature(types.uint64, types.uint64),
signature(types.int8, types.int8),
signature(types.int16, types.int16),
signature(types.int32, types.int32),
signature(types.int64, types.int64),
]
class UnaryOp(ConcreteTemplate):
cases = [
signature(types.uintp, types.uint8),
signature(types.uintp, types.uint16),
signature(types.uintp, types.uint32),
signature(types.uint64, types.uint64),
signature(types.intp, types.int8),
signature(types.intp, types.int16),
signature(types.intp, types.int32),
signature(types.int64, types.int64),
signature(types.float32, types.float32),
signature(types.float64, types.float64),
signature(types.complex64, types.complex64),
signature(types.complex128, types.complex128),
]
@builtin
class UnaryNot(UnaryOp):
key = "not"
cases = [
signature(types.boolean, types.boolean),
signature(types.boolean, types.uint8),
signature(types.boolean, types.uint16),
signature(types.boolean, types.uint32),
signature(types.boolean, types.uint64),
signature(types.boolean, types.int8),
signature(types.boolean, types.int16),
signature(types.boolean, types.int32),
signature(types.boolean, types.int64),
signature(types.boolean, types.float32),
signature(types.boolean, types.float64),
signature(types.boolean, types.complex64),
signature(types.boolean, types.complex128),
]
@builtin
class UnaryNegate(UnaryOp):
key = "-"
cases = [
signature(types.uintp, types.uint8),
signature(types.uintp, types.uint16),
signature(types.uintp, types.uint32),
signature(types.uint64, types.uint64),
signature(types.intp, types.int8),
signature(types.intp, types.int16),
signature(types.intp, types.int32),
signature(types.int64, types.int64),
signature(types.float32, types.float32),
signature(types.float64, types.float64),
signature(types.complex64, types.complex64),
signature(types.complex128, types.complex128),
]
class CmpOp(ConcreteTemplate):
cases = [
signature(types.boolean, types.boolean, types.boolean),
signature(types.boolean, types.uint8, types.uint8),
signature(types.boolean, types.uint16, types.uint16),
signature(types.boolean, types.uint32, types.uint32),
signature(types.boolean, types.uint64, types.uint64),
signature(types.boolean, types.int8, types.int8),
signature(types.boolean, types.int16, types.int16),
signature(types.boolean, types.int32, types.int32),
signature(types.boolean, types.int64, types.int64),
signature(types.boolean, types.float32, types.float32),
signature(types.boolean, types.float64, types.float64),
]
@builtin
class CmpOpLt(CmpOp):
key = '<'
@builtin
class CmpOpLe(CmpOp):
key = '<='
@builtin
class CmpOpGt(CmpOp):
key = '>'
@builtin
class CmpOpGe(CmpOp):
key = '>='
@builtin
class CmpOpEq(CmpOp):
key = '=='
@builtin
class CmpOpNe(CmpOp):
key = '!='
def normalize_index(index):
if isinstance(index, types.UniTuple):
if index.dtype in types.integer_domain:
return types.UniTuple(types.intp, len(index))
elif index.dtype == types.slice3_type:
return index
elif isinstance(index, types.Tuple):
for ty in index:
if (ty not in types.integer_domain and
ty not in types.real_domain and
ty != types.slice3_type):
return
return index
elif index == types.slice3_type:
return types.slice3_type
# elif index == types.slice2_type:
# return types.slice2_type
else:
return types.intp
@builtin
class GetItemUniTuple(AbstractTemplate):
key = "getitem"
def generic(self, args, kws):
tup, idx = args
if isinstance(tup, types.UniTuple):
return signature(tup.dtype, tup, normalize_index(idx))
@builtin
class GetItemArray(AbstractTemplate):
key = "getitem"
def generic(self, args, kws):
assert not kws
[ary, idx] = args
if not isinstance(ary, types.Array):
return
idx = normalize_index(idx)
if not idx:
return
if idx == types.slice3_type: #(types.slice2_type, types.slice3_type):
res = ary.copy(layout='A')
elif isinstance(idx, (types.UniTuple, types.Tuple)):
if ary.ndim > len(idx):
return
elif ary.ndim < len(idx):
return
elif any(i == types.slice3_type for i in idx):
ndim = ary.ndim
for i in idx:
if i != types.slice3_type:
ndim -= 1
res = ary.copy(ndim=ndim, layout='A')
else:
res = ary.dtype
elif idx == types.intp:
if ary.ndim != 1:
return
res = ary.dtype
else:
raise Exception("unreachable: index type of %s" % idx)
return signature(res, ary, idx)
@builtin
class SetItemArray(AbstractTemplate):
key = "setitem"
def generic(self, args, kws):
assert not kws
ary, idx, val = args
if isinstance(ary, types.Array):
return signature(types.none, ary, normalize_index(idx), ary.dtype)
@builtin
class LenArray(AbstractTemplate):
key = types.len_type
def generic(self, args, kws):
assert not kws
(ary,) = args
if isinstance(ary, types.Array):
return signature(types.intp, ary)
#-------------------------------------------------------------------------------
@builtin_attr
class ArrayAttribute(AttributeTemplate):
key = types.Array
def resolve_shape(self, ary):
return types.UniTuple(types.intp, ary.ndim)
def resolve_strides(self, ary):
return types.UniTuple(types.intp, ary.ndim)
def resolve_ndim(self, ary):
return types.intp
#
# def resolve_flatten(self, ary):
# return types.Method(Array_flatten, ary)
def resolve_size(self, ary):
return types.intp
class Array_flatten(AbstractTemplate):
key = "array.flatten"
def generic(self, args, kws):
assert not args
assert not kws
this = self.this
if this.layout == 'C':
resty = this.copy(ndim=1)
return signature(resty, recvr=this)
@builtin
class CmpOpEqArray(AbstractTemplate):
key = '=='
def generic(self, args, kws):
assert not kws
[va, vb] = args
if isinstance(va, types.Array) and va == vb:
return signature(va.copy(dtype=types.boolean), va, vb)
#-------------------------------------------------------------------------------
class ComplexAttribute(AttributeTemplate):
def resolve_real(self, ty):
return self.innertype
def resolve_imag(self, ty):
return self.innertype
@builtin_attr
class Complex64Attribute(ComplexAttribute):
key = types.complex64
innertype = types.float32
@builtin_attr
class Complex128Attribute(ComplexAttribute):
key = types.complex128
innertype = types.float64
#-------------------------------------------------------------------------------
@builtin_attr
class NumbaTypesModuleAttribute(AttributeTemplate):
key = types.Module(types)
def resolve_int8(self, mod):
return types.Function(ToInt8)
def resolve_int16(self, mod):
return types.Function(ToInt16)
def resolve_int32(self, mod):
return types.Function(ToInt32)
def resolve_int64(self, mod):
return types.Function(ToInt64)
def resolve_uint8(self, mod):
return types.Function(ToUint8)
def resolve_uint16(self, mod):
return types.Function(ToUint16)
def resolve_uint32(self, mod):
return types.Function(ToUint32)
def resolve_uint64(self, mod):
return types.Function(ToUint64)
def resolve_float32(self, mod):
return types.Function(ToFloat32)
def resolve_float64(self, mod):
return types.Function(ToFloat64)
def resolve_complex64(self, mod):
return types.Function(ToComplex64)
def resolve_complex128(self, mod):
return types.Function(ToComplex128)
class Caster(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[a] = args
if a in types.number_domain:
return signature(self.key, a)
class ToInt8(Caster):
key = types.int8
class ToInt16(Caster):
key = types.int16
class ToInt32(Caster):
key = types.int32
class ToInt64(Caster):
key = types.int64
class ToUint8(Caster):
key = types.uint8
class ToUint16(Caster):
key = types.uint16
class ToUint32(Caster):
key = types.uint32
class ToUint64(Caster):
key = types.uint64
class ToFloat32(Caster):
key = types.float32
class ToFloat64(Caster):
key = types.float64
class ToComplex64(Caster):
key = types.complex64
class ToComplex128(Caster):
key = types.complex128
builtin_global(types, types.Module(types))
builtin_global(types.int8, types.Function(ToInt8))
builtin_global(types.int16, types.Function(ToInt16))
builtin_global(types.int32, types.Function(ToInt32))
builtin_global(types.int64, types.Function(ToInt64))
builtin_global(types.uint8, types.Function(ToUint8))
builtin_global(types.uint16, types.Function(ToUint16))
builtin_global(types.uint32, types.Function(ToUint32))
builtin_global(types.uint64, types.Function(ToUint64))
builtin_global(types.float32, types.Function(ToFloat32))
builtin_global(types.float64, types.Function(ToFloat64))
builtin_global(types.complex64, types.Function(ToComplex64))
builtin_global(types.complex128, types.Function(ToComplex128))
#------------------------------------------------------------------------------
class Max(AbstractTemplate):
key = max
def generic(self, args, kws):
assert not kws
for a in args:
if a not in types.number_domain:
raise TypeError("max() only support for numbers")
retty = self.context.unify_types(*args)
return signature(retty, *args)
class Min(AbstractTemplate):
key = min
def generic(self, args, kws):
assert not kws
for a in args:
if a not in types.number_domain:
raise TypeError("min() only support for numbers")
retty = self.context.unify_types(*args)
return signature(retty, *args)
class Round(ConcreteTemplate):
key = round
cases = [
signature(types.float32, types.float32),
signature(types.float64, types.float64),
]
builtin_global(max, types.Function(Max))
builtin_global(min, types.Function(Min))
builtin_global(round, types.Function(Round))
#------------------------------------------------------------------------------
class Bool(AbstractTemplate):
key = bool
def generic(self, args, kws):
assert not kws
[arg] = args
if arg not in types.number_domain:
raise TypeError("bool() only support for numbers")
return signature(types.boolean, arg)
class Int(AbstractTemplate):
key = int
def generic(self, args, kws):
assert not kws
[arg] = args
if arg not in types.number_domain:
raise TypeError("int() only support for numbers")
if arg in types.complex_domain:
raise TypeError("int() does not support complex")
if arg in types.integer_domain:
return signature(arg, arg)
if arg in types.real_domain:
return signature(types.intp, arg)
class Float(AbstractTemplate):
key = float
def generic(self, args, kws):
assert not kws
[arg] = args
if arg not in types.number_domain:
raise TypeError("float() only support for numbers")
if arg in types.complex_domain:
raise TypeError("float() does not support complex")
if arg in types.integer_domain:
return signature(types.float64, arg)
elif arg in types.real_domain:
return signature(arg, arg)
class Complex(AbstractTemplate):
key = complex
def generic(self, args, kws):
assert not kws
if len(args) == 1:
[arg] = args
if arg not in types.number_domain:
raise TypeError("complex() only support for numbers")
return signature(types.complex128, arg)
elif len(args) == 2:
[real, imag] = args
if (real not in types.number_domain or
imag not in types.number_domain):
raise TypeError("complex() only support for numbers")
return signature(types.complex128, real, imag)
builtin_global(bool, types.Function(Bool))
builtin_global(int, types.Function(Int))
builtin_global(float, types.Function(Float))
builtin_global(complex, types.Function(Complex))
########NEW FILE########
__FILENAME__ = context
from __future__ import print_function
from collections import defaultdict
import functools
import numpy
from numba import types, utils
from numba.typeconv import rules
from . import templates
# Initialize declarations
from . import builtins, mathdecl, npydecl
class BaseContext(object):
"""A typing context for storing function typing constrain template.
"""
def __init__(self):
self.functions = defaultdict(list)
self.attributes = {}
self.globals = utils.UniqueDict()
self.tm = rules.default_type_manager
self._load_builtins()
self.init()
def init(self):
pass
def get_number_type(self, num):
if isinstance(num, int):
nbits = utils.bit_length(num)
if nbits < 32:
typ = types.int32
elif nbits < 64:
typ = types.int64
else:
raise ValueError("Int value is too large: %s" % num)
return typ
elif isinstance(num, float):
return types.float64
else:
raise NotImplementedError(type(num), num)
def resolve_function_type(self, func, args, kws):
if isinstance(func, types.Function):
return func.template(self).apply(args, kws)
if isinstance(func, types.Dispatcher):
if kws:
raise TypeError("kwargs not supported")
if not func.overloaded.is_compiling:
# Avoid compiler re-entrant
fnobj = func.overloaded.compile(tuple(args))
else:
try:
fnobj = func.overloaded.get_overload(tuple(args))
except KeyError:
return None
ty = self.globals[fnobj]
return self.resolve_function_type(ty, args, kws)
defns = self.functions[func]
for defn in defns:
res = defn.apply(args, kws)
if res is not None:
return res
def resolve_getattr(self, value, attr):
try:
attrinfo = self.attributes[value]
except KeyError:
if value.is_parametric:
attrinfo = self.attributes[type(value)]
else:
raise
return attrinfo.resolve(value, attr)
def resolve_setitem(self, target, index, value):
args = target, index, value
kws = ()
return self.resolve_function_type("setitem", args, kws)
def get_global_type(self, gv):
return self.globals[gv]
def _load_builtins(self):
self.install(templates.builtin_registry)
def install(self, registry):
for ftcls in registry.functions:
self.insert_function(ftcls(self))
for ftcls in registry.attributes:
self.insert_attributes(ftcls(self))
for gv, gty in registry.globals:
self.insert_global(gv, gty)
def insert_global(self, gv, gty):
self.globals[gv] = gty
def insert_attributes(self, at):
key = at.key
assert key not in self.attributes, "Duplicated attributes template"
self.attributes[key] = at
def insert_function(self, ft):
key = ft.key
self.functions[key].append(ft)
def insert_overloaded(self, overloaded):
self.globals[overloaded] = types.Dispatcher(overloaded)
def insert_user_function(self, fn, ft):
"""Insert a user function.
Args
----
- fn:
object used as callee
- ft:
function template
"""
self.globals[fn] = types.Function(ft)
def extend_user_function(self, fn, ft):
""" Insert of extend a user function.
Args
----
- fn:
object used as callee
- ft:
function template
"""
if fn in self.globals:
self.globals[fn].extend(ft)
else:
self.insert_user_function(fn, ft)
def insert_class(self, cls, attrs):
clsty = types.Object(cls)
at = templates.ClassAttrTemplate(self, clsty, attrs)
self.insert_attributes(at)
def type_compatibility(self, fromty, toty):
"""
Returns None or a string describing the conversion e.g. exact, promote,
unsafe, safe
"""
if fromty == toty:
return 'exact'
elif (isinstance(fromty, types.UniTuple) and
isinstance(toty, types.UniTuple) and
len(fromty) == len(toty)):
return self.type_compatibility(fromty.dtype, toty.dtype)
return self.tm.check_compatible(fromty, toty)
def unify_types(self, *types):
return functools.reduce(self.unify_pairs, types)
def unify_pairs(self, first, second):
"""
Choose PyObject type as the abstract if we fail to determine a concrete
type.
"""
# TODO: should add an option to reject unsafe type conversion
d = self.type_compatibility(fromty=first, toty=second)
if d is None:
return types.pyobject
elif d == 'exact':
# Same type
return first
elif d == 'promote':
return second
elif d in ('safe', 'unsafe'):
assert first in types.number_domain
assert second in types.number_domain
a = numpy.dtype(str(first))
b = numpy.dtype(str(second))
# Just use NumPy coercion rules
sel = numpy.promote_types(a, b)
# Convert NumPy dtype back to Numba types
return getattr(types, str(sel))
else:
raise Exception("type_compatibility returned %s" % d)
class Context(BaseContext):
def init(self):
self.install(mathdecl.registry)
self.install(npydecl.registry)
def new_method(fn, sig):
name = "UserFunction_%s" % fn
ft = templates.make_concrete_template(name, fn, [sig])
return types.Method(ft, this=sig.recvr)
########NEW FILE########
__FILENAME__ = mathdecl
import math
from numba import types, utils
from numba.typing.templates import (AttributeTemplate, ConcreteTemplate,
signature, Registry)
registry = Registry()
builtin_attr = registry.register_attr
builtin_global = registry.register_global
@builtin_attr
class MathModuleAttribute(AttributeTemplate):
key = types.Module(math)
def resolve_fabs(self, mod):
return types.Function(Math_fabs)
def resolve_exp(self, mod):
return types.Function(Math_exp)
def resolve_expm1(self, mod):
return types.Function(Math_expm1)
def resolve_sqrt(self, mod):
return types.Function(Math_sqrt)
def resolve_log(self, mod):
return types.Function(Math_log)
def resolve_log1p(self, mod):
return types.Function(Math_log1p)
def resolve_log10(self, mod):
return types.Function(Math_log10)
def resolve_sin(self, mod):
return types.Function(Math_sin)
def resolve_cos(self, mod):
return types.Function(Math_cos)
def resolve_tan(self, mod):
return types.Function(Math_tan)
def resolve_sinh(self, mod):
return types.Function(Math_sinh)
def resolve_cosh(self, mod):
return types.Function(Math_cosh)
def resolve_tanh(self, mod):
return types.Function(Math_tanh)
def resolve_asin(self, mod):
return types.Function(Math_asin)
def resolve_acos(self, mod):
return types.Function(Math_acos)
def resolve_atan(self, mod):
return types.Function(Math_atan)
def resolve_atan2(self, mod):
return types.Function(Math_atan2)
def resolve_asinh(self, mod):
return types.Function(Math_asinh)
def resolve_acosh(self, mod):
return types.Function(Math_acosh)
def resolve_atanh(self, mod):
return types.Function(Math_atanh)
def resolve_pi(self, mod):
return types.float64
def resolve_e(self, mod):
return types.float64
def resolve_floor(self, mod):
return types.Function(Math_floor)
def resolve_ceil(self, mod):
return types.Function(Math_ceil)
def resolve_trunc(self, mod):
return types.Function(Math_trunc)
def resolve_isnan(self, mod):
return types.Function(Math_isnan)
def resolve_isinf(self, mod):
return types.Function(Math_isinf)
def resolve_degrees(self, mod):
return types.Function(Math_degrees)
def resolve_radians(self, mod):
return types.Function(Math_radians)
def resolve_hypot(self, mod):
return types.Function(Math_hypot)
class Math_unary(ConcreteTemplate):
cases = [
signature(types.float64, types.int64),
signature(types.float64, types.uint64),
signature(types.float32, types.float32),
signature(types.float64, types.float64),
]
class Math_fabs(Math_unary):
key = math.fabs
class Math_exp(Math_unary):
key = math.exp
if utils.PYVERSION > (2, 6):
class Math_expm1(Math_unary):
key = math.expm1
class Math_sqrt(Math_unary):
key = math.sqrt
class Math_log(Math_unary):
key = math.log
class Math_log1p(Math_unary):
key = math.log1p
class Math_log10(Math_unary):
key = math.log10
class Math_sin(Math_unary):
key = math.sin
class Math_cos(Math_unary):
key = math.cos
class Math_tan(Math_unary):
key = math.tan
class Math_sinh(Math_unary):
key = math.sinh
class Math_cosh(Math_unary):
key = math.cosh
class Math_tanh(Math_unary):
key = math.tanh
class Math_asin(Math_unary):
key = math.asin
class Math_acos(Math_unary):
key = math.acos
class Math_atan(Math_unary):
key = math.atan
class Math_atan2(ConcreteTemplate):
key = math.atan2
cases = [
signature(types.float64, types.int64, types.int64),
signature(types.float64, types.uint64, types.uint64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
]
class Math_asinh(Math_unary):
key = math.asinh
class Math_acosh(Math_unary):
key = math.acosh
class Math_atanh(Math_unary):
key = math.atanh
class Math_floor(Math_unary):
key = math.floor
class Math_ceil(Math_unary):
key = math.ceil
class Math_trunc(Math_unary):
key = math.trunc
class Math_radians(Math_unary):
key = math.radians
class Math_degrees(Math_unary):
key = math.degrees
class Math_hypot(ConcreteTemplate):
key = math.hypot
cases = [
signature(types.float64, types.int64, types.int64),
signature(types.float64, types.uint64, types.uint64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
]
class Math_isnan(ConcreteTemplate):
key = math.isnan
cases = [
signature(types.boolean, types.int64),
signature(types.boolean, types.uint64),
signature(types.boolean, types.float32),
signature(types.boolean, types.float64),
]
class Math_isinf(ConcreteTemplate):
key = math.isinf
cases = [
signature(types.boolean, types.int64),
signature(types.boolean, types.uint64),
signature(types.boolean, types.float32),
signature(types.boolean, types.float64),
]
builtin_global(math, types.Module(math))
builtin_global(math.fabs, types.Function(Math_fabs))
builtin_global(math.exp, types.Function(Math_exp))
if utils.PYVERSION > (2, 6):
builtin_global(math.expm1, types.Function(Math_expm1))
builtin_global(math.sqrt, types.Function(Math_sqrt))
builtin_global(math.log, types.Function(Math_log))
builtin_global(math.log1p, types.Function(Math_log1p))
builtin_global(math.log10, types.Function(Math_log10))
builtin_global(math.sin, types.Function(Math_sin))
builtin_global(math.cos, types.Function(Math_cos))
builtin_global(math.tan, types.Function(Math_tan))
builtin_global(math.sinh, types.Function(Math_sinh))
builtin_global(math.cosh, types.Function(Math_cosh))
builtin_global(math.tanh, types.Function(Math_tanh))
builtin_global(math.asin, types.Function(Math_asin))
builtin_global(math.acos, types.Function(Math_acos))
builtin_global(math.atan, types.Function(Math_atan))
builtin_global(math.atan2, types.Function(Math_atan2))
builtin_global(math.asinh, types.Function(Math_asinh))
builtin_global(math.acosh, types.Function(Math_acosh))
builtin_global(math.atanh, types.Function(Math_atanh))
builtin_global(math.hypot, types.Function(Math_hypot))
builtin_global(math.floor, types.Function(Math_floor))
builtin_global(math.ceil, types.Function(Math_ceil))
builtin_global(math.trunc, types.Function(Math_trunc))
builtin_global(math.isnan, types.Function(Math_isnan))
builtin_global(math.isinf, types.Function(Math_isinf))
builtin_global(math.degrees, types.Function(Math_degrees))
builtin_global(math.radians, types.Function(Math_radians))
########NEW FILE########
__FILENAME__ = npydecl
import numpy
from numba import types
from numba.typing.templates import (AttributeTemplate, AbstractTemplate,
Registry, signature)
registry = Registry()
builtin_global = registry.register_global
builtin_attr = registry.register_attr
@builtin_attr
class NumpyModuleAttribute(AttributeTemplate):
# note: many unary ufuncs are added later on, using setattr
key = types.Module(numpy)
def resolve_arctan2(self, mod):
return types.Function(Numpy_arctan2)
def resolve_add(self, mod):
return types.Function(Numpy_add)
def resolve_subtract(self, mod):
return types.Function(Numpy_subtract)
def resolve_multiply(self, mod):
return types.Function(Numpy_multiply)
def resolve_divide(self, mod):
return types.Function(Numpy_divide)
class Numpy_unary_ufunc(AbstractTemplate):
def generic(self, args, kws):
assert not kws
nargs = len(args)
if nargs == 2:
[inp, out] = args
if isinstance(inp, types.Array) and isinstance(out, types.Array):
return signature(out, inp, out)
elif inp in types.number_domain and isinstance(out, types.Array):
return signature(out, inp, out)
elif nargs == 1:
[inp] = args
if inp in types.number_domain:
if hasattr(self, "scalar_out_type"):
return signature(self.scalar_out_type, inp)
else:
return signature(inp, inp)
def _numpy_unary_ufunc(name):
the_key = eval("numpy."+name) # obtain the appropriate symbol for the key.
class typing_class(Numpy_unary_ufunc):
key = the_key
scalar_out_type = types.float64
# Add the resolve method to NumpyModuleAttribute
setattr(NumpyModuleAttribute, "resolve_"+name, lambda s, m: types.Function(typing_class))
builtin_global(the_key, types.Function(typing_class))
# list of unary ufuncs to register
_autoregister_unary_ufuncs = [
"sin", "cos", "tan", "arcsin", "arccos", "arctan",
"sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh",
"exp", "exp2", "expm1",
"log", "log2", "log10", "log1p",
"absolute", "negative", "floor", "ceil", "trunc", "sign",
"sqrt",
"deg2rad", "rad2deg"]
for func in _autoregister_unary_ufuncs:
_numpy_unary_ufunc(func)
del(_autoregister_unary_ufuncs)
class Numpy_binary_ufunc(AbstractTemplate):
def generic(self, args, kws):
assert not kws
nargs = len(args)
if nargs == 3:
[inp1, inp2, out] = args
if isinstance(out, types.Array) and \
(isinstance(inp1, types.Array) or inp1 in types.number_domain) or \
(isinstance(inp2, types.Array) or inp2 in types.number_domain):
return signature(out, inp1, inp2, out)
elif nargs == 2:
[inp1, inp2] = args
if inp1 in types.number_domain and inp2 in types.number_domain:
if hasattr(self, "scalar_out_type"):
return signature(self.scalar_out_type, inp1, inp2)
else:
return signature(inp1, inp1, inp2)
class Numpy_add(Numpy_binary_ufunc):
key = numpy.add
class Numpy_subtract(Numpy_binary_ufunc):
key = numpy.subtract
class Numpy_multiply(Numpy_binary_ufunc):
key = numpy.multiply
class Numpy_divide(Numpy_binary_ufunc):
key = numpy.divide
class Numpy_arctan2(Numpy_binary_ufunc):
key = numpy.arctan2
builtin_global(numpy, types.Module(numpy))
builtin_global(numpy.arctan2, types.Function(Numpy_arctan2))
builtin_global(numpy.add, types.Function(Numpy_add))
builtin_global(numpy.subtract, types.Function(Numpy_subtract))
builtin_global(numpy.multiply, types.Function(Numpy_multiply))
builtin_global(numpy.divide, types.Function(Numpy_divide))
########NEW FILE########
__FILENAME__ = templates
"""
Define typing templates
"""
from __future__ import print_function, division, absolute_import
from numba import types
class Signature(object):
__slots__ = 'return_type', 'args', 'recvr'
def __init__(self, return_type, args, recvr):
self.return_type = return_type
self.args = args
self.recvr = recvr
def __hash__(self):
return hash(self.args)
def __eq__(self, other):
if isinstance(other, Signature):
return (self.args == other.args and
self.recvr == other.recvr)
elif isinstance(other, tuple):
return (self.args == other)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s -> %s" % (self.args, self.return_type)
@property
def is_method(self):
return self.recvr is not None
def make_concrete_template(name, key, signatures):
baseclasses = (ConcreteTemplate,)
gvars = dict(key=key, cases=list(signatures))
return type(name, baseclasses, gvars)
def signature(return_type, *args, **kws):
recvr = kws.pop('recvr', None)
assert not kws
return Signature(return_type, args, recvr=recvr)
def _uses_downcast(dists):
for d in dists:
if d < 0:
return True
return False
def _sum_downcast(dists):
c = 0
for d in dists:
if d < 0:
c += abs(d)
return c
class Rating(object):
__slots__ = 'promote', 'safe_convert', "unsafe_convert"
def __init__(self):
self.promote = 0
self.safe_convert = 0
self.unsafe_convert = 0
def astuple(self):
"""Returns a tuple suitable for comparing with the worse situation
start first.
"""
return (self.unsafe_convert, self.safe_convert, self.promote)
def resolve_overload(context, key, cases, args, kws):
assert not kws, "Keyword arguments are not supported, yet"
# Rate each cases
candids = []
ratings = []
for case in cases:
if len(args) == len(case.args):
rate = Rating()
for actual, formal in zip(args, case.args):
by = context.type_compatibility(actual, formal)
if by is None:
break
if by == 'promote':
rate.promote += 1
elif by == 'safe':
rate.safe_convert += 1
elif by == 'unsafe':
rate.unsafe_convert += 1
elif by == 'exact':
pass
else:
raise Exception("unreachable", by)
else:
ratings.append(rate.astuple())
candids.append(case)
# Find the best case
ordered = sorted(zip(ratings, candids), key=lambda i: i[0])
if ordered:
if len(ordered) > 1:
(first, case1), (second, case2) = ordered[:2]
# Ambiguous overloading
if first == second:
ambiguous = []
for rate, case in ordered:
if rate == first:
ambiguous.append(case)
# Try to resolve promotion
# TODO: need to match this to the C overloading dispatcher
resolvable = resolve_ambiguous_promotions(context, ambiguous,
args)
if resolvable:
return resolvable
# Failed to resolve promotion
args = (key, args, '\n'.join(map(str, ambiguous)))
msg = "Ambiguous overloading for %s %s\n%s" % args
raise TypeError(msg)
return ordered[0][1]
class UnsafePromotionError(Exception):
pass
def safe_promotion(actual, formal):
"""
Allow integer to be casted to the nearest integer
"""
# Integers?
if actual in types.integer_domain and formal in types.integer_domain:
# Same signedness?
if actual.signed == formal.signed:
# Score by their distance
return formal.bitwidth - actual.bitwidth
raise UnsafePromotionError(actual, formal)
def resolve_ambiguous_promotions(context, cases, args):
ratings = []
for case in cases:
try:
rate = _safe_promote_case(context, case, args)
except UnsafePromotionError:
# Ignore error
pass
else:
ratings.append((rate, case))
_, bestcase = min(ratings)
return bestcase
def _safe_promote_case(context, case, args):
rate = 0
for actual, formal in zip(args, case.args):
by = context.type_compatibility(actual, formal)
if by == 'promote':
rate += safe_promotion(actual, formal)
else:
raise UnsafePromotionError(actual, formal)
return rate
class FunctionTemplate(object):
def __init__(self, context):
self.context = context
def _select(self, cases, args, kws):
selected = resolve_overload(self.context, self.key, cases, args, kws)
return selected
class AbstractTemplate(FunctionTemplate):
"""
Defines method ``generic(self, args, kws)`` which compute a possible
signature base on input types. The signature does not have to match the
input types. It is compared against the input types afterwards.
"""
def apply(self, args, kws):
generic = getattr(self, "generic")
sig = generic(args, kws)
if sig:
cases = [sig]
return self._select(cases, args, kws)
class ConcreteTemplate(FunctionTemplate):
"""
Defines attributes "cases" as a list of signature to match against the
given input types.
"""
def apply(self, args, kws):
cases = getattr(self, 'cases')
assert cases
return self._select(cases, args, kws)
class AttributeTemplate(object):
def __init__(self, context):
self.context = context
def resolve(self, value, attr):
fn = getattr(self, "resolve_%s" % attr, None)
if fn is None:
raise NameError("Attribute '%s' of %s is not typed" % (attr,
value))
return fn(value)
class ClassAttrTemplate(AttributeTemplate):
def __init__(self, context, key, clsdict):
super(ClassAttrTemplate, self).__init__(context)
self.key = key
self.clsdict = clsdict
def resolve(self, value, attr):
return self.clsdict[attr]
class MacroTemplate(object):
pass
# -----------------------------
class Registry(object):
def __init__(self):
self.functions = []
self.attributes = []
self.globals = []
def register(self, item):
assert issubclass(item, FunctionTemplate)
self.functions.append(item)
return item
def register_attr(self, item):
assert issubclass(item, AttributeTemplate)
self.attributes.append(item)
return item
def register_global(self, v, t):
self.globals.append((v, t))
builtin_registry = Registry()
builtin = builtin_registry.register
builtin_attr = builtin_registry.register_attr
builtin_global = builtin_registry.register_global
########NEW FILE########
__FILENAME__ = unittest_support
"""
This file fixes portability issues for unittest
"""
from numba.config import PYVERSION
if PYVERSION <= (2, 6):
from unittest2 import *
else:
from unittest import *
########NEW FILE########
__FILENAME__ = utils
from __future__ import print_function, division, absolute_import
import collections
import functools
import timeit
import math
import numpy
from numba.config import PYVERSION
class ConfigOptions(object):
OPTIONS = ()
def __init__(self):
self._enabled = set()
def set(self, name):
if name not in self.OPTIONS:
raise NameError("Invalid flag: %s" % name)
self._enabled.add(name)
def unset(self, name):
if name not in self.OPTIONS:
raise NameError("Invalid flag: %s" % name)
self._enabled.discard(name)
def __getattr__(self, name):
if name not in self.OPTIONS:
raise NameError("Invalid flag: %s" % name)
return name in self._enabled
def __repr__(self):
return "Flags(%s)" % ', '.join(str(x) for x in self._enabled)
def copy(self):
copy = type(self)()
copy._enabled = set(self._enabled)
return copy
class SortedMap(collections.Mapping):
"""Immutable
"""
def __init__(self, seq):
self._values = []
self._index = {}
for i, (k, v) in enumerate(sorted(seq)):
self._index[k] = i
self._values.append((k, v))
def __getitem__(self, k):
i = self._index[k]
return self._values[i][1]
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(k for k, v in self._values)
class SortedSet(collections.Set):
def __init__(self, seq):
self._set = set(seq)
self._values = list(sorted(self._set))
def __contains__(self, item):
return item in self._set
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
class UniqueDict(dict):
def __setitem__(self, key, value):
assert key not in self
super(UniqueDict, self).__setitem__(key, value)
# def cache(fn):
# @functools.wraps(fn)
# def cached_func(self, *args, **kws):
# if self in cached_func.cache:
# return cached_func.cache[self]
# ret = fn(self, *args, **kws)
# cached_func.cache[self] = ret
# return ret
# cached_func.cache = {}
# def invalidate(self):
# if self in cached_func.cache:
# del cached_func.cache[self]
# cached_func.invalidate = invalidate
#
# return cached_func
def runonce(fn):
@functools.wraps(fn)
def inner():
if not inner._ran:
res = fn()
inner._result = res
inner._ran = True
return inner._result
inner._ran = False
return inner
def bit_length(intval):
assert isinstance(intval, int)
return len(bin(abs(intval))) - 2
class BenchmarkResult(object):
def __init__(self, func, records, loop):
self.func = func
self.loop = loop
self.records = numpy.array(records) / loop
self.best = numpy.min(self.records)
def __repr__(self):
name = getattr(self.func, "__name__", self.func)
args = (name, self.loop, self.records.size, format_time(self.best))
return "%20s: %10d loops, best of %d: %s per loop" % args
def format_time(tm):
units = "s ms us ns ps".split()
base = 1
for unit in units[:-1]:
if tm >= base:
break
base /= 1000
else:
unit = units[-1]
return "%.1f%s" % (tm / base, unit)
def benchmark(func, maxsec=1):
timer = timeit.Timer(func)
number = 1
result = timer.repeat(1, number)
# Too fast to be measured
while min(result) / number == 0:
number *= 10
result = timer.repeat(3, number)
best = min(result) / number
if best >= maxsec:
return BenchmarkResult(func, result, number)
# Scale it up to make it close the maximum time
max_per_run_time = maxsec / 3 / number
number = max(max_per_run_time / best / 3, 1)
# Round to the next power of 10
number = int(10 ** math.ceil(math.log10(number)))
records = timer.repeat(3, number)
return BenchmarkResult(func, records, number)
# Other common python2/3 adaptors
# Copied from Blaze which borrowed from six
IS_PY3 = PYVERSION >= (3, 0)
if IS_PY3:
def dict_iteritems(d):
return d.items().__iter__()
def dict_itervalues(d):
return d.values().__iter__()
def dict_values(d):
return list(d.values())
def dict_keys(d):
return list(d.keys())
def iter_next(it):
return it.__next__()
def func_globals(f):
return f.__globals__
def longint(v):
return int(v)
else:
def dict_iteritems(d):
return d.iteritems()
def dict_itervalues(d):
return d.itervalues()
def dict_values(d):
return d.values()
def dict_keys(d):
return d.keys()
def iter_next(it):
return it.next()
def func_globals(f):
return f.func_globals
def longint(v):
return long(v)
########NEW FILE########
__FILENAME__ = _version
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by github's download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
GIT = "git"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print(("unable to run %s" % args[0]))
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print(("unable to run %s (error)" % args[0]))
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print(("discarding '%s', no digits" % ref))
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print(("remaining refs: %s" % ",".join(sorted(refs))))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print(("picking %s" % r))
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print(("no .git in %s" % root))
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print(("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print(("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix)))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = ""
parentdir_prefix = "numba-"
versionfile_source = "numba/_version.py"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
########NEW FILE########
__FILENAME__ = ad
# -*- coding: utf-8 -*-
"""
Example of how to use byte-code execution technique to trace accesses to numpy arrays.
This file demonstrates two applications of this technique:
* optimize numpy computations for repeated calling
* provide automatic differentiation of procedural code
"""
from __future__ import print_function, division, absolute_import
import __builtin__
import os
import sys
import inspect
import trace
import opcode
import numpy as np
import theano
from .utils import itercode
# Opcode help: http://docs.python.org/library/dis.html
# XXX: support full calling convention for named args, *args and **kwargs
class FrameVM(object):
"""
A Class for evaluating a code block of CPython bytecode,
and tracking accesses to numpy arrays.
"""
def __init__(self, watcher, func):
print('FrameVM', func)
self.watcher = watcher
self.func = func
self.fco = func.__code__
self.names = self.fco.co_names
self.varnames = self.fco.co_varnames
self.constants = self.fco.co_consts
self.costr = func.__code__.co_code
self.argnames = self.fco.co_varnames[:self.fco.co_argcount]
self.stack = []
def call(self, args, kwargs):
self.rval = None
self._myglobals = {}
for name in self.names:
#print 'name', name
try:
self._myglobals[name] = self.func.__globals__[name]
except KeyError:
try:
self._myglobals[name] = __builtin__.__getattribute__(name)
except AttributeError:
#print 'WARNING: name lookup failed', name
pass
self._locals = [None] * len(self.fco.co_varnames)
for i, name in enumerate(self.argnames):
#print 'i', args, self.argnames, self.fco.co_varnames
self._locals[i] = args[i]
self.code_iter = itercode(self.costr)
jmp = None
while True:
try:
i, op, arg = self.code_iter.send(jmp)
except StopIteration:
break
name = opcode.opname[op]
#print 'OP: ', i, name
jmp = getattr(self, 'op_' + name)(i, op, arg)
return self.rval
def op_BINARY_ADD(self, i, op, arg):
arg2 = self.stack.pop(-1)
arg1 = self.stack.pop(-1)
r = arg1 + arg2
self.stack.append(r)
if (id(arg1) in self.watcher.svars
or id(arg2) in self.watcher.svars):
s1 = self.watcher.svars.get(id(arg1), arg1)
s2 = self.watcher.svars.get(id(arg2), arg2)
self.watcher.svars[id(r)] = s1 + s2
#print 'added sym'
def op_BINARY_SUBTRACT(self, i, op, arg):
arg2 = self.stack.pop(-1)
arg1 = self.stack.pop(-1)
r = arg1 - arg2
self.stack.append(r)
if (id(arg1) in self.watcher.svars
or id(arg2) in self.watcher.svars):
s1 = self.watcher.svars.get(id(arg1), arg1)
s2 = self.watcher.svars.get(id(arg2), arg2)
self.watcher.svars[id(r)] = s1 - s2
def op_BINARY_MULTIPLY(self, i, op, arg):
arg2 = self.stack.pop(-1)
arg1 = self.stack.pop(-1)
r = arg1 * arg2
self.stack.append(r)
if (id(arg1) in self.watcher.svars
or id(arg2) in self.watcher.svars):
s1 = self.watcher.svars.get(id(arg1), arg1)
s2 = self.watcher.svars.get(id(arg2), arg2)
self.watcher.svars[id(r)] = s1 * s2
#print 'mul sym', id(r)
def op_CALL_FUNCTION(self, i, op, arg):
# XXX: does this work with kwargs?
args = [self.stack[-ii] for ii in range(arg, 0, -1)]
if arg > 0:
self.stack = self.stack[:-arg]
func = self.stack.pop(-1)
recurse = True
if func.__module__ and func.__module__.startswith('numpy'):
recurse = False
if 'built-in' in str(func):
recurse = False
if recurse:
vm = FrameVM(self.watcher, func)
rval = vm.call(args, {})
else:
#print 'running built-in', func, func.__name__, args
rval = func(*args)
if any(id(a) in self.watcher.svars for a in args):
sargs = [self.watcher.svars.get(id(a), a)
for a in args]
if func.__name__ == 'sum':
#print 'sym sum', sargs
self.watcher.svars[id(rval)] = theano.tensor.sum(*sargs)
else:
raise NotImplementedError(func)
self.stack.append(rval)
def op_COMPARE_OP(self, i, op, arg):
opname = opcode.cmp_op[arg]
left = self.stack.pop(-1)
right = self.stack.pop(-1)
if 0: pass
elif opname == '==': self.stack.append(left == right)
elif opname == '!=': self.stack.append(left != right)
else:
raise NotImplementedError('comparison: %s' % opname)
def op_FOR_ITER(self, i, op, arg):
# either push tos.next()
# or pop tos and send (arg)
tos = self.stack[-1]
try:
next = next(tos)
print('next', next)
self.stack.append(next)
except StopIteration:
self.stack.pop(-1)
return ('rel', arg)
def op_JUMP_ABSOLUTE(self, i, op, arg):
print('sending', arg)
return ('abs', arg)
def op_JUMP_IF_TRUE(self, i, op, arg):
tos = self.stack[-1]
if tos:
return ('rel', arg)
def op_GET_ITER(self, i, op, arg):
# replace tos -> iter(tos)
tos = self.stack[-1]
self.stack[-1] = iter(tos)
if id(tos) in self.watcher.svars:
raise NotImplementedError('iterator of watched value')
def op_LOAD_GLOBAL(self, i, op, arg):
#print 'LOAD_GLOBAL', self.names[arg]
self.stack.append(self._myglobals[self.names[arg]])
def op_LOAD_ATTR(self, i, op, arg):
#print 'LOAD_ATTR', self.names[arg]
TOS = self.stack[-1]
self.stack[-1] = getattr(TOS, self.names[arg])
def op_LOAD_CONST(self, i, op, arg):
#print 'LOAD_CONST', self.constants[arg]
self.stack.append(self.constants[arg])
def op_LOAD_FAST(self, i, op, arg):
#print 'LOAD_FAST', self.varnames[arg]
self.stack.append(self._locals[arg])
def op_POP_BLOCK(self, i, op, arg):
print('pop block, what to do?')
def op_POP_TOP(self, i, op, arg):
self.stack.pop(-1)
def op_PRINT_ITEM(self, i, op, arg):
print(self.stack.pop(-1), end=' ')
def op_PRINT_NEWLINE(self, i, op, arg):
print('')
def op_SETUP_LOOP(self, i, op, arg):
print('SETUP_LOOP, what to do?')
def op_STORE_FAST(self, i, op, arg):
#print 'STORE_FAST', self.varnames[arg]
self._locals[arg] = self.stack.pop(-1)
def op_RAISE_VARARGS(self, i, op, arg):
if 1 <= arg:
exc = self.stack.pop(-1)
if 2 <= arg:
param = self.stack.pop(-1)
if 3 <= arg:
tb = self.stack.pop(-1)
raise NotImplementedError('exception handling')
def op_RETURN_VALUE(self, i, op, arg):
self.rval = self.stack.pop(-1)
class Watcher(object):
def __init__(self, inputs):
self.inputs = inputs
self.svars = {}
for var in inputs:
self.svars[id(var)] = theano.tensor.vector()
def call(self, fn, *args, **kwargs):
vm = FrameVM(self, fn)
return vm.call(args, kwargs)
def grad_fn(self, rval, ival):
sy = self.svars[id(rval)]
sx = self.svars[id(ival)]
dydx = theano.tensor.grad(sy, sx)
return theano.function([sx], dydx)
def recalculate_fn(self, rval, ival):
sy = self.svars[id(rval)]
sx = self.svars[id(ival)]
return theano.function([sx], sy)
########NEW FILE########
__FILENAME__ = annotate
# -*- coding: UTF-8 -*-
"""
numba --annotate
"""
from __future__ import print_function, division, absolute_import
import operator
from itertools import groupby
from collections import namedtuple
# ______________________________________________________________________
Program = namedtuple("Program", ["python_source", "intermediates"])
SourceIntermediate = namedtuple("SourceIntermediate", ["name", "linenomap",
"source"])
DotIntermediate = namedtuple("DotIntermediate", ["name", "dotcode"]) # graphviz
Source = namedtuple("Source", ["linemap", "annotations"])
Annotation = namedtuple("Annotation", ["type", "value"])
# ______________________________________________________________________
def build_linemap(func):
import inspect
import textwrap
source = inspect.getsource(func)
source = textwrap.dedent(source)
lines = source.split('\n')
if lines[-1] == '':
lines = lines[0:-1]
func_code = getattr(func, 'func_code', None)
if func_code is None:
func_code = getattr(func, '__code__')
lineno = func_code.co_firstlineno
linemap = {}
for line in lines:
linemap[lineno] = line
lineno += 1
return linemap
# ______________________________________________________________________
# Annotation types
A_type = "Types"
A_c_api = "Python C API"
A_numpy = "NumPy"
A_errcheck = "Error check"
A_objcoerce = "Coercion"
A_pycall = "Python call"
A_pyattr = "Python attribute"
# Annotation formatting
def format_annotations(annotations):
adict = groupdict(annotations, 'type')
for category, annotations in adict.items():
vals = u" ".join([str(a.value) for a in annotations])
yield u"%s: %s" % (category, vals)
groupdict = lambda xs, attr: dict(
(k, list(v)) for k, v in groupby(xs, operator.attrgetter(attr)))
# ______________________________________________________________________
########NEW FILE########
__FILENAME__ = annotators
# -*- coding: utf-8 -*-
"""
Numba annotators.
"""
from __future__ import print_function, division, absolute_import
import re
import numba
from numba import *
from numba import nodes
from numba.annotate import annotate
from numba.annotate.annotate import Annotation, A_c_api
logger = logging.getLogger(__name__)
# Taken from Cython/Compiler/Annotate.py
py_c_api = re.compile(u'(Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]+)\(')
py_macro_api = re.compile(u'(Py[A-Z][a-z]+_[A-Z][A-Z_]+)\(')
# ______________________________________________________________________
class AnnotateTypes(object):
def visit_Name(self, node):
if isinstance(node, nodes.ExprNode):
self.annotate(annotate.A_type, (node.id, str(node.type)))
# ______________________________________________________________________
def annotate_pyapi(llvm_intermediate, py_annotations):
"""
Produce annotations for CPython C API calls from an LLVM SourceIntermediate
"""
for py_lineno in llvm_intermediate.linenomap:
count = 0
for llvm_lineno in llvm_intermediate.linenomap[py_lineno]:
line = llvm_intermediate.source.linemap[llvm_lineno]
if re.search(py_c_api, line):
count += 1
if count:
py_annotations[py_lineno].append(Annotation(A_c_api, count))
########NEW FILE########
__FILENAME__ = htmlrender
# -*- coding: UTF-8 -*-
"""
HTML annotation rendering. Heavily based on Cython/Compiler/Annotate.py
"""
from __future__ import print_function, division, absolute_import
import sys
import cgi
import os
import re
from .annotate import format_annotations, groupdict, A_c_api
from .step import Template
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.lexers import LlvmLexer
from pygments.formatters import HtmlFormatter
pygments_installed = True
except ImportError:
pygments_installed = False
def render(annotation_blocks, emit=sys.stdout.write,
intermediate_names=(), inline=True):
"""
Render a Program as html.
"""
root = os.path.join(os.path.dirname(__file__))
if inline:
templatefile = os.path.join(root, 'annotate_inline_template.html')
else:
templatefile = os.path.join(root, 'annotate_template.html')
with open(templatefile, 'r') as f:
template = f.read()
py_c_api = re.compile(u'(Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]+)\(')
data = {'blocks': []}
for i, block in enumerate(annotation_blocks):
python_source = block['python_source']
intermediates = block['intermediates']
data['blocks'].append({'lines':[]})
for num, source in sorted(python_source.linemap.items()):
types = {}
if num in python_source.annotations.keys():
for a in python_source.annotations[num]:
if a.type == 'Types':
name = a.value[0]
type = a.value[1]
types[name] = type
types_str = ','.join(name + ':' + type for name, type in types.items())
python_calls = 0
llvm_nums = intermediates[0].linenomap[num]
llvm_ir = ''
for llvm_num in llvm_nums:
ir = intermediates[0].source.linemap[llvm_num]
if re.search(py_c_api, ir):
python_calls += 1
if pygments_installed:
class LlvmHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
for i, t in source:
yield i, t
ir = highlight(ir, LlvmLexer(), LlvmHtmlFormatter())
llvm_ir += '<div>' + ir + '</div>'
if python_calls > 0:
tag = '*'
tag_css = 'tag'
else:
tag = ''
tag_css = ''
if num == python_source.linemap.keys()[0]:
firstlastline = 'firstline'
elif num == python_source.linemap.keys()[-1]:
firstlastline = 'lastline'
else:
firstlastline = 'innerline'
if pygments_installed:
class PythonHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
for i, t in source:
yield i, t
source = highlight(source, PythonLexer(), PythonHtmlFormatter())
data['blocks'][-1]['func_call'] = block['func_call']
data['blocks'][-1]['func_call_filename'] = block['func_call_filename']
data['blocks'][-1]['func_call_lineno'] = block['func_call_lineno']
data['blocks'][-1]['lines'].append({'id': str(i) + str(num),
'num':str(num) + tag,
'tag':tag_css,
'python_source':source,
'llvm_source':llvm_ir,
'types':types_str,
'firstlastline':firstlastline})
css_theme_file = os.path.join(root, 'jquery-ui.min.css')
with open(css_theme_file, 'r') as f:
css_theme = f.read()
data['jquery_theme'] = css_theme
jquery_lib_file = os.path.join(root, 'jquery.min.js')
with open(jquery_lib_file, 'r') as f:
jquery_lib = f.read()
data['jquery_lib'] = jquery_lib
jquery_ui_lib_file = os.path.join(root, 'jquery-ui.min.js')
with open(jquery_ui_lib_file, 'r') as f:
jquery_ui_lib = f.read()
data['jquery_ui_lib'] = jquery_ui_lib
html = Template(template).expand(data)
emit(html)
########NEW FILE########
__FILENAME__ = ir_capture
# -*- coding: UTF-8 -*-
"""
Capture IR emissions.
"""
from __future__ import print_function, division, absolute_import
import collections
from functools import partial
import llvm.core
from .annotate import SourceIntermediate, Source
# ______________________________________________________________________
class IRBuilder(object):
def __init__(self, name, builder):
self.name = name
self.builder = builder
self.captured = collections.defaultdict(list)
self.pos = -1
def update_pos(self, pos):
if pos is None:
pos = -1
self.pos = pos
def get_pos(self):
return self.pos
def __getattr__(self, attr):
m = getattr(self.builder, attr)
if not callable(m):
return m
def emit(*args, **kwargs):
result = m(*args, **kwargs)
self.captured[self.pos].append(result)
return result
return emit
# ______________________________________________________________________
def get_intermediate(ir_builder):
"Get IR source from an IR builder as a SourceIntermediate"
linenomap = collections.defaultdict(list)
linemap = {}
ir_lineno = 1
filterer = filters.get(ir_builder.name, lambda x: x)
ir_builder.captured = filterer(filter_unique(ir_builder.captured))
for pos, instrs in sorted(ir_builder.captured.iteritems()):
for instr in instrs:
linenomap[pos].append(ir_lineno)
linemap[ir_lineno] = str(instr)
ir_lineno += 1
source = Source(linemap, annotations=[])
return SourceIntermediate(ir_builder.name, linenomap, source)
# ______________________________________________________________________
def filter_llvm(captured):
for values in captured.values():
fn = lambda llvm_value: isinstance(llvm_value, llvm.core.Instruction)
blocks = collections.defaultdict(list)
for llvm_value in filter(fn, values):
blocks[llvm_value.basic_block].append(llvm_value)
values[:] = order_llvm(blocks)
return captured
def filter_unique(captured):
for values in captured.values():
seen = set()
def unique(item):
found = item in seen
seen.add(item)
return not found
values[:] = filter(unique, values)
return captured
# ______________________________________________________________________
def order_llvm(blocks):
"""
Put llvm instructions and basic blocks in the right order.
:param blocks: { llvm_block : [llvm_instr] }
:return: [llvm_line]
"""
result = []
if blocks:
block, values = blocks.popitem()
blocks[block] = values
lfunc = block.function
for block in lfunc.basic_blocks:
if block in blocks:
instrs = blocks[block]
instrpos = dict(
(instr, i) for i, instr in enumerate(block.instructions))
result.append(str(block.name) + ":")
result.extend(sorted(instrs, key=instrpos.get))
return result
# ______________________________________________________________________
filters = {
"llvm": filter_llvm,
}
########NEW FILE########
__FILENAME__ = step
"""Copyright (c) 2012, Daniele Mazzocchio
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the developer nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."""
"""A light and fast template engine."""
import re
class Template(object):
""" """
def __init__(self, template):
"""Initialize class"""
super(Template, self).__init__()
self.template = template
self.options = {"strip": False}
def expand(self, namespace={}, **kw):
"""Return the expanded template string"""
namespace.update(kw)
output = []
# Builtins
namespace["echo"] = lambda s: output.append(s)
namespace["isdef"] = lambda v: v in namespace
namespace["setopt"] = lambda k, v: self.options.update({k: v})
code = self._process(self._preprocess(self.template))
eval(compile(code, "<string>", "exec"), namespace)
return self._postprocess("".join(map(str, output)))
def _preprocess(self, template):
"""Modify template string before code conversion"""
# Replace inline ('%') blocks for easier parsing
o = re.compile("(?m)^[ \t]*%((if|for|while|try).+:)")
c = re.compile("(?m)^[ \t]*%(((else|elif|except|finally).*:)|(end\w+))")
template = c.sub(r"<%:\g<1>%>", o.sub(r"<%\g<1>%>", template))
# Replace (${x}) variables with '<%echo(x)%>'
v = re.compile("\${([a-zA-Z0-9[\].\"\'_]+)}")
template = v.sub(r"<%echo(\g<1>)%>\n", template)
return template
def _process(self, template):
"""Return the code generated from the template string"""
code_blk = re.compile(r"<%(.*?)%>\n?", re.DOTALL)
indent = 0
code = []
for n, blk in enumerate(code_blk.split(template)):
# Replace '<\%' and '%\>' escapes
blk = re.sub(r"<\\%", "<%", re.sub(r"%\\>", "%>", blk))
# Unescape '%{}' characters
blk = re.sub(r"\\(%|{|})", "\g<1>", blk)
if not (n % 2):
# Escape double-quote characters
blk = re.sub(r"\"", "\\\"", blk)
blk = (" " * (indent*4)) + 'echo("""{}""")'.format(blk)
else:
blk = blk.rstrip()
if blk.lstrip().startswith(":"):
if not indent:
err = "unexpected block ending"
raise SyntaxError("Line {}: {}".format(n, err))
indent -= 1
if blk.startswith(":end"):
continue
blk = blk.lstrip()[1:]
blk = re.sub("(?m)^", " " * (indent * 4), blk)
if blk.endswith(":"):
indent += 1
code.append(blk)
if indent:
err = "Reached EOF before closing block"
raise EOFError("Line {}: {}".format(n, err))
return "\n".join(code)
def _postprocess(self, output):
"""Modify output string after variables and code evaluation"""
if self.options["strip"]:
output = re.sub("(?m)(^[ \t]+|[ \t]+$|(?<=[ \t])[ \t]+|^\n)", "",
output)
return output
########NEW FILE########
__FILENAME__ = test_rendering
# -*- coding: UTF-8 -*-
from __future__ import print_function, division, absolute_import
from io import StringIO
from numba import config
from numba.annotate import render_text
from numba.annotate.annotate import (Source, Annotation, SourceIntermediate,
Program, A_type)
# ______________________________________________________________________
py_source = Source(
linemap={ 1: u'def foo(a, b):',
2: u' print a * b',
3: u' a / b',
4: u' return a - b' },
annotations={ 2: [Annotation(A_type, (u'a', u'double')),
Annotation(A_type, (u'b', u'double'))] }
)
linenomap = { 1: [0], 2: [1, 2, 3], 4: [5], }
llvm_linemap = {
0: u'call @printf(%a, %b)',
1: u'%0 = load a',
2: u'%1 = load b',
3: u'%2 = fadd %0 %1',
4: u'%3 = fdiv %a %b',
5: u'ret something',
}
annotations = {
3: [Annotation(A_type, (u'%0', u'double')),
Annotation(A_type, (u'%1', u'double'))],
}
llvm_intermediate = SourceIntermediate("llvm", linenomap,
Source(llvm_linemap, annotations))
# p = Program(py_source, [llvm_intermediate])
p = [{'python_source': py_source, 'intermediates': [llvm_intermediate]}]
# ______________________________________________________________________
config.config.colour = False # Disable lexing for tests
def run_render_text():
f = StringIO()
render_text(p, emit=f.write)
src = f.getvalue()
assert 'def foo(a, b):' in src, src
assert 'print a * b' in src
assert 'return a - b' in src
assert 'double' in src
# print(src)
def run_render_text_inline():
f = StringIO()
render_text(p, emit=f.write, intermediate_names=["llvm"])
src = f.getvalue()
assert 'def foo(a, b):' in src
assert '____llvm____' in src
assert '%0 = load a' in src
# print(src)
def run_render_text_outline():
f = StringIO()
render_text(p, emit=f.write, inline=False, intermediate_names=["llvm"])
src = f.getvalue()
assert 'def foo(a, b):' in src
assert "====llvm====" in src
assert '%0 = load a' in src
# print(src)
run_render_text()
run_render_text_inline()
run_render_text_outline()
########NEW FILE########
__FILENAME__ = textrender
# -*- coding: UTF-8 -*-
from __future__ import print_function, division, absolute_import
import sys
from functools import partial
from itertools import chain
from collections import namedtuple
from numba.lexing import lex_source
from .annotate import format_annotations
WIDTH = 40
ANNOT_SEP = "-"
Emitter = namedtuple("Emitter", ["emit", "emitline"])
lex = partial(lex_source, output="console")
# ______________________________________________________________________
def render(annotation_blocks, emit=sys.stdout.write,
intermediate_names=(), inline=True):
"""
Render a Program as text.
:param intermediate_names: [intermediate_name], e.g. ["llvm"]
:param inline: whether to display intermediate code inline
"""
indent = 8
emitline = lambda indent, s: emit(u" " * indent + s + u"\n")
emitter = Emitter(emit, emitline)
for i, block in enumerate(annotation_blocks):
python_source = block['python_source']
intermediates = block['intermediates']
if intermediates:
irs = [i for i in intermediates if i.name in intermediate_names]
else:
irs = None
# Render main source
render_source(python_source, emitter, indent,
irs if inline and irs else [])
if not inline and irs:
# Render IRs seperately
for irname, linenomap, ir_source in irs:
emitter.emitline(0, irname.center(80, "="))
render_source(ir_source, emitter, indent, [], linenomap)
emitter.emitline(0, "=" * 80)
def render_source(source, emitter, indent, intermediates, linenomap=None):
"""
Print a Source and its annotations. Print any given Intermediates inline.
"""
if linenomap:
indent += 8
headers = {}
for py_lineno, ir_linenos in linenomap.items():
for ir_lineno in ir_linenos:
headers[ir_lineno] = u"%4d | " % py_lineno
header = lambda lineno: headers.get(lineno, u" | ")
else:
header = lambda lineno: u""
_render_source(source, emitter, indent, intermediates, header)
def _render_source(source, emitter, indent, intermediates, header=None):
for lineno in sorted(source.linemap.iterkeys()):
if header:
emitter.emit(header(lineno))
line = lex(source.linemap[lineno])
emitter.emitline(0, u"%4d %s" % (lineno, line))
annots = format_annotations(source.annotations.get(lineno, []))
irs = _gather_text_intermediates(intermediates, lineno)
lines = list(chain(annots, irs))
if not lines:
continue
# Print out annotations
linestart = indent + len(source.linemap[lineno]) - len(source.linemap[lineno].lstrip())
emitter.emitline(linestart + 2, u"||".center(WIDTH, ANNOT_SEP))
for line in lines:
emitter.emitline(linestart + 2, line)
emitter.emitline(linestart + 2, u"||".center(WIDTH, ANNOT_SEP))
def _gather_text_intermediates(intermediates, lineno):
for irname, linenomap, ir_source in intermediates:
ir_linenos = linenomap.get(lineno, [])
if not ir_linenos:
continue
yield irname.center(WIDTH, "_")
for ir_lineno in ir_linenos:
yield lex(ir_source.linemap[ir_lineno], irname)
########NEW FILE########
__FILENAME__ = array_expressions
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
from numba.templating import temp_name
from numba import error, pipeline, nodes, ufunc_builder
from numba.minivect import specializers, miniast, miniutils, minitypes
from numba import utils, functions
from numba import typesystem
from numba import visitors
from numba.support.numpy_support import slicenodes
from numba.vectorize import basic
import llvm.core
print_ufunc = False
# ______________________________________________________________________
def is_elementwise_assignment(assmnt_node):
target_type = assmnt_node.targets[0].type
value_type = assmnt_node.value.type
if target_type.is_array:
# Allow arrays and scalars
return value_type.is_array or not value_type.is_object
return False
# ______________________________________________________________________
def get_py_ufunc_ast(env, lhs, node):
if lhs is not None:
lhs.ctx = ast.Load()
builder = ufunc_builder.UFuncConverter(env)
tree = builder.visit(node)
ufunc_ast = builder.build_ufunc_ast(tree)
if print_ufunc:
from meta import asttools
module = ast.Module(body=[ufunc_ast])
print((asttools.python_source(module)))
# Vectorize Python function
if lhs is None:
restype = node.type
else:
restype = lhs.type.dtype
argtypes = [op.type.dtype if op.type.is_array else op.type
for op in builder.operands]
signature = restype(*argtypes)
return ufunc_ast, signature, builder
def get_py_ufunc(env, lhs, node):
ufunc_ast, signature, ufunc_builder = get_py_ufunc_ast(env, lhs, node)
py_ufunc = ufunc_builder.compile_to_pyfunc(ufunc_ast)
return py_ufunc, signature, ufunc_builder
# ______________________________________________________________________
class ArrayExpressionRewrite(visitors.NumbaTransformer):
"""
Find element-wise expressions and run ElementalMapper to turn it into
a minivect AST or a ufunc.
"""
nesting_level = 0
elementwise = False
is_slice_assign = False
def register_array_expression(self, node, lhs=None):
"""
Start the mapping process for the outermost node in the array expression.
"""
self.elementwise = False
def visit_elementwise(self, elementwise, node):
if elementwise and self.nesting_level == 0:
return self.register_array_expression(node)
self.nesting_level += 1
self.generic_visit(node)
self.nesting_level -= 1
self.elementwise = elementwise
return node
def visit_Assign(self, node):
self.is_slice_assign = False
self.visitlist(node.targets)
target_node = node.targets[0]
is_slice_assign = self.is_slice_assign
self.nesting_level = self.is_slice_assign
node.value = self.visit(node.value)
self.nesting_level = 0
elementwise = self.elementwise
if (len(node.targets) == 1 and is_slice_assign and
is_elementwise_assignment(node)): # and elementwise):
target_node = slicenodes.rewrite_slice(target_node,
self.nopython)
return self.register_array_expression(node.value, lhs=target_node)
return node
def visit_Subscript(self, node):
# print ast.dump(node)
self.generic_visit(node)
is_store = isinstance(node.ctx, ast.Store)
self.is_slice_assign = is_store and node.type.is_array
if is_store:
if nodes.is_ellipsis(node.slice):
return node.value
elif node.value.type.is_array and node.type.is_array:
node = slicenodes.rewrite_slice(node, self.nopython)
return node
def visit_Call(self, node):
if self.query(node, 'is_math'):
elementwise = node.type.is_array
return self.visit_elementwise(elementwise, node)
self.visitchildren(node)
return node
def visit_BinOp(self, node):
elementwise = node.type.is_array
return self.visit_elementwise(elementwise, node)
visit_UnaryOp = visit_BinOp
# ______________________________________________________________________
class ArrayExpressionRewriteUfunc(ArrayExpressionRewrite):
"""
Compile array expressions to ufuncs. Then call the ufunc with the array
arguments.
vectorizer_cls: the ufunc vectorizer to use
CANNOT be used in a nopython context
"""
def __init__(self, context, func, ast, vectorizer_cls=None):
super(ArrayExpressionRewriteUfunc, self).__init__(context, func, ast)
self.vectorizer_cls = vectorizer_cls or basic.BasicASTVectorize
def register_array_expression(self, node, lhs=None):
super(ArrayExpressionRewriteUfunc, self).register_array_expression(node,
lhs)
py_ufunc, signature, ufunc_builder = self.get_py_ufunc(lhs, node)
# Vectorize Python function
vectorizer = self.vectorizer_cls(py_ufunc)
vectorizer.add(restype=signature.return_type, argtypes=signature.args)
ufunc = vectorizer.build_ufunc()
# Call ufunc
args = ufunc_builder.operands
if lhs is None:
keywords = None
else:
keywords = [ast.keyword('out', lhs)]
func = nodes.ObjectInjectNode(ufunc)
call_ufunc = nodes.ObjectCallNode(signature=None, func=func, args=args,
keywords=keywords, py_func=ufunc)
return nodes.ObjectTempNode(call_ufunc)
# ______________________________________________________________________
class NumbaStaticArgsContext(utils.NumbaContext):
"Use a static argument list: shape, data1, strides1, data2, strides2, ..."
astbuilder_cls = miniast.ASTBuilder
optimize_llvm = False
optimize_broadcasting = False
# debug = True
# debug_elements = True
def init(self):
self.astbuilder = self.astbuilder_cls(self)
self.typemapper = minitypes.TypeMapper(self)
# def promote_types(self, t1, t2):
# return typesystem.promote(t1, t2)
#
def to_llvm(self, type):
if type.is_object:
return typesystem.object_.to_llvm(self)
return NotImplementedError("to_llvm", type)
# ______________________________________________________________________
class ArrayExpressionRewriteNative(ArrayExpressionRewrite):
"""
Compile array expressions to a minivect kernel that calls a Numba
scalar kernel with scalar inputs:
a[:, :] = b[:, :] * c[:, :]
becomes
tmp_a = slice(a)
tmp_b = slice(b)
tmp_c = slice(c)
shape = broadcast(tmp_a, tmp_b, tmp_c)
call minikernel(shape, tmp_a.data, tmp_a.strides,
tmp_b.data, tmp_b.strides,
tmp_c.data, tmp_c.strides)
with
def numba_kernel(b, c):
return b * c
def minikernel(...):
for (...)
for(...)
a[i, j] = numba_kernel(b[i, j], c[i, j])
CAN be used in a nopython context
"""
expr_count = 0
def array_attr(self, node, attr):
# Perform a low-level bitcast from object to an array type
# array = nodes.CoercionNode(node, float_[:])
array = node
return nodes.ArrayAttributeNode(attr, array)
def register_array_expression(self, node, lhs=None):
super(ArrayExpressionRewriteNative, self).register_array_expression(
node, lhs)
# llvm_module = llvm.core.Module.new(temp_name("array_expression_module"))
# llvm_module = self.env.llvm_context.module
lhs_type = lhs.type if lhs else node.type
is_expr = lhs is None
if node.type.is_array and lhs_type.ndim < node.type.ndim:
# TODO: this is valid in NumPy if the leading dimensions of the
# TODO: RHS have extent 1
raise error.NumbaError(
node, "Right hand side must have a "
"dimensionality <= %d" % lhs_type.ndim)
# Create ufunc scalar kernel
ufunc_ast, signature, ufunc_builder = get_py_ufunc_ast(self.env, lhs, node)
# Compile ufunc scalar kernel with numba
ast.fix_missing_locations(ufunc_ast)
# func_env = self.env.crnt.inherit(
# func=None, ast=ufunc_ast, func_signature=signature,
# wrap=False, #link=False, #llvm_module=llvm_module,
# )
# pipeline.run_env(self.env, func_env) #, pipeline_name='codegen')
func_env, (_, _, _) = pipeline.run_pipeline2(
self.env, None, ufunc_ast, signature,
function_globals=self.env.crnt.function_globals,
wrap=False, link=False, nopython=True,
#llvm_module=llvm_module, # pipeline_name='codegen',
)
llvm_module = func_env.llvm_module
operands = ufunc_builder.operands
operands = [nodes.CloneableNode(operand) for operand in operands]
if lhs is not None:
lhs = nodes.CloneableNode(lhs)
broadcast_operands = [lhs] + operands
lhs = lhs.clone
else:
broadcast_operands = operands[:]
shape = slicenodes.BroadcastNode(lhs_type, broadcast_operands)
operands = [op.clone for op in operands]
if lhs is None and self.nopython:
raise error.NumbaError(
node, "Cannot allocate new memory in nopython context")
elif lhs is None:
# TODO: determine best output order at runtime
shape = shape.cloneable
lhs = nodes.ArrayNewEmptyNode(lhs_type, shape.clone,
lhs_type.is_f_contig).cloneable
# Build minivect wrapper kernel
context = NumbaStaticArgsContext()
context.llvm_module = llvm_module
# context.llvm_ee = self.env.llvm_context.execution_engine
b = context.astbuilder
variables = [b.variable(name_node.type, "op%d" % i)
for i, name_node in enumerate([lhs] + operands)]
miniargs = [b.funcarg(variable) for variable in variables]
body = miniutils.build_kernel_call(func_env.lfunc.name, signature,
miniargs, b)
minikernel = b.function_from_numpy(
temp_name("array_expression"), body, miniargs)
lminikernel, = context.run_simple(minikernel,
specializers.StridedSpecializer)
# lminikernel.linkage = llvm.core.LINKAGE_LINKONCE_ODR
# pipeline.run_env(self.env, func_env, pipeline_name='post_codegen')
# llvm_module.verify()
del func_env
assert lminikernel.module is llvm_module
# print("---------")
# print(llvm_module)
# print("~~~~~~~~~~~~")
lminikernel = self.env.llvm_context.link(lminikernel)
# Build call to minivect kernel
operands.insert(0, lhs)
args = [shape]
scalar_args = []
for operand in operands:
if operand.type.is_array:
data_p = self.array_attr(operand, 'data')
data_p = nodes.CoercionNode(data_p,
operand.type.dtype.pointer())
if not isinstance(operand, nodes.CloneNode):
operand = nodes.CloneNode(operand)
strides_p = self.array_attr(operand, 'strides')
args.extend((data_p, strides_p))
else:
scalar_args.append(operand)
args.extend(scalar_args)
result = nodes.NativeCallNode(minikernel.type, args, lminikernel)
# Use native slicing in array expressions
slicenodes.mark_nopython(ast.Suite(body=result.args))
if not is_expr:
# a[:] = b[:] * c[:]
return result
# b[:] * c[:], return new array as expression
return nodes.ExpressionNode(stmts=[result], expr=lhs.clone)
########NEW FILE########
__FILENAME__ = array_validation
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
from numba import error
from numba.ndarray_helpers import NumpyArray
is_object = lambda t: t.is_object and not t.is_array
class ArrayValidator(ast.NodeVisitor):
"""
Validate array usage, depending on array representation
(i.e. numpy vs. LLArray)
"""
def __init__(self, env):
self.env = env
self.foreign = not issubclass(env.crnt.array, NumpyArray)
def visit_CoercionNode(self, node):
t1, t2 = node.type, node.node.type
if self.foreign and t1.is_array ^ t2.is_array:
raise error.NumbaError(node, "Cannot coerce non-numpy array %s" %
self.env.crnt.array)
visit_CoerceToObject = visit_CoercionNode
visit_CoerceToNative = visit_CoercionNode
def visit_MultiArrayAPINode(self, node):
if self.foreign:
signature = node.signature
types = (signature.return_type,) + signature.argtypes
for ty in types:
if not ty.is_array:
raise TypeError("Cannot pass array %s as NumPy array" %
self.env.crnt.array)
# TODO: Calling other numba functions with different array
# TODO: representations may corrupt things
########NEW FILE########
__FILENAME__ = asdl
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys, os.path
_importlist = "VisitorBase parse check".split()
root = os.path.join(os.path.dirname(__file__))
common_path = os.path.join(root, 'common')
#------------------------------------------------------------------------
# ASDL Processing
#------------------------------------------------------------------------
class ASDLProcessor(object):
"""
Allow pre-processing of ASDL source (str) and post-processing of the
resulting ASDL tree.
"""
def preprocess(self, asdl_source):
return asdl_source
def postprocess(self, asdl_tree):
return asdl_tree
#------------------------------------------------------------------------
# Parse ASDL Schemas
#------------------------------------------------------------------------
class ASDLParser(object):
"""
ASDL parser that accepts string inputs. Defers to the given ASDL module
implementation.
"""
def __init__(self, asdlmod, asdl_processor):
self.asdlmod = asdlmod
self.asdl_processor = asdl_processor
def parse(self, buf):
"""
Parse an ASDL string.
"""
buf = self.asdl_processor.preprocess(buf)
scanner = self.asdlmod.ASDLScanner()
parser = self.asdlmod.ASDLParser()
tokens = scanner.tokenize(buf)
try:
asdl_tree = parser.parse(tokens)
except self.asdlmod.ASDLSyntaxError as err:
raise ValueError("Error while parsing schema: %s" % (err,))
# print(err)
# lines = buf.split("\n")
# print((lines[err.lineno - 1])) # lines starts at 0, files at 1
asdl_tree = self.asdl_processor.postprocess(asdl_tree)
return asdl_tree
def check(self, mod, schema_name):
"""
Check the validity of an ASDL parse.
"""
v = self.asdlmod.Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in self.asdlmod.builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
print(("Undefined type %s, used in %s" % (t, uses)))
if v.errors:
raise ValueError(
"Errors found while checking ASDL schema %r: %s" % (
schema_name, v.errors))
#------------------------------------------------------------------------
# Load ASDL Schemas
#------------------------------------------------------------------------
class ASDLLoader(object):
"""
Load a schema given an ASDLParser and an ASDL schema as a string.
"""
def __init__(self, parser, schema_str, schema_name):
self.parser = parser
self.schema_str = schema_str
self.schema_name = schema_name
def load(self):
asdl = self.parser.parse(self.schema_str)
self.parser.check(asdl, self.schema_name)
return asdl
def load(schema_name, schema_str, asdlmod, asdl_processor=None):
asdl_processor = asdl_processor or ASDLProcessor()
parser = ASDLParser(asdlmod, asdl_processor)
loader = ASDLLoader(parser, schema_str, schema_name)
return parser, loader
#------------------------------------------------------------------------
# Get ASDL implementation
#------------------------------------------------------------------------
def get_asdl_pydir():
major, minor = sys.version_info[0], sys.version_info[1]
# Assumes that specific-path and common-path are a subdirectory
# Build an absolute module path.
prefix = __name__.rsplit('.', 1)
# The else-case is for running tests in the current directory
base = (prefix[0] + '.') if len(prefix) > 1 else ''
dir = 'py%d_%d' % (major, minor)
return base, dir
def _get_asdl_depending_on_version():
"""
Return Python ASDL implementation depending on the Python version.
"""
use_abs_import = 0
base, dir = get_asdl_pydir()
modname = base + dir + '.asdl'
try:
# try to import from version specific directory
mod = __import__(modname, fromlist=_importlist, level=use_abs_import)
except ImportError:
# fallback to import from common directory
dir = 'common'
modname = base + dir + '.asdl'
mod = __import__(modname, fromlist=_importlist)
return mod
def load_pyschema(filename):
"""
Load ASDL from the version-specific directory if the schema exists in
there, otherwise from the 'common' subpackage.
Returns a two-tuple (ASDLParser, ASDLLoader).
"""
base, dir = get_asdl_pydir()
version_specific_path = os.path.join(root, dir)
srcfile = os.path.join(version_specific_path, filename)
if not os.path.exists(srcfile):
srcfile = os.path.join(common_path, filename)
from numba.asdl.common import asdl as asdlmod
else:
asdlmod = _get_asdl_depending_on_version()
asdl_str = open(srcfile).read()
return load(filename, asdl_str, asdlmod)
#------------------------------------------------------------------------
# Globals
#------------------------------------------------------------------------
python_parser, python_loader = load_pyschema("Python.asdl")
# Python version-specific parsed ASDL
python_asdl = python_loader.load()
# Python version-specific asdl implementation
pyasdl = python_parser.asdlmod
########NEW FILE########
__FILENAME__ = asdl
# -*- coding: utf-8 -*-
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
from __future__ import print_function, division, absolute_import
import os
import traceback
from . import spark
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError("unmatched input: %r" % s)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, xxx_todo_changeme):
" module ::= Id Id version { } "
(module, name, version, _0, _1) = xxx_todo_changeme
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
def p_module(self, xxx_todo_changeme1):
" module ::= Id Id version { definitions } "
(module, name, version, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
def p_module_1(self, xxx_todo_changeme1):
" module ::= Id Id { } "
(module, name, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, None)
def p_module_2(self, xxx_todo_changeme1):
" module ::= Id Id { definitions } "
(module, name, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, None)
def p_version(self, xxx_todo_changeme2):
"version ::= Id String"
(version, V) = xxx_todo_changeme2
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
def p_definition_0(self, xxx_todo_changeme3):
" definitions ::= definition "
(definition,) = xxx_todo_changeme3
return definition
def p_definition_1(self, xxx_todo_changeme4):
" definitions ::= definition definitions "
(definitions, definition) = xxx_todo_changeme4
return definitions + definition
def p_definition(self, xxx_todo_changeme5):
" definition ::= Id = type "
(id, _, type) = xxx_todo_changeme5
return [Type(id, type)]
def p_type_0(self, xxx_todo_changeme6):
" type ::= product "
(product,) = xxx_todo_changeme6
return product
def p_type_1(self, xxx_todo_changeme7):
" type ::= sum "
(sum,) = xxx_todo_changeme7
return Sum(sum)
def p_type_2(self, xxx_todo_changeme8):
" type ::= sum Id ( fields ) "
(sum, id, _0, attributes, _1) = xxx_todo_changeme8
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
if attributes:
attributes.reverse()
return Sum(sum, attributes)
def p_product(self, xxx_todo_changeme9):
" product ::= ( fields ) "
(_0, fields, _1) = xxx_todo_changeme9
fields.reverse()
return Product(fields)
def p_sum_0(self, xxx_todo_changeme10):
" sum ::= constructor "
(constructor,) = xxx_todo_changeme10
return [constructor]
def p_sum_1(self, xxx_todo_changeme11):
" sum ::= constructor | sum "
(constructor, _, sum) = xxx_todo_changeme11
return [constructor] + sum
def p_sum_2(self, xxx_todo_changeme12):
" sum ::= constructor | sum "
(constructor, _, sum) = xxx_todo_changeme12
return [constructor] + sum
def p_constructor_0(self, xxx_todo_changeme13):
" constructor ::= Id "
(id,) = xxx_todo_changeme13
return Constructor(id)
def p_constructor_1(self, xxx_todo_changeme14):
" constructor ::= Id ( fields ) "
(id, _0, fields, _1) = xxx_todo_changeme14
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, xxx_todo_changeme15):
" fields ::= field "
(field,) = xxx_todo_changeme15
return [field]
def p_fields_1(self, xxx_todo_changeme16):
" fields ::= field , fields "
(field, _, fields) = xxx_todo_changeme16
return fields + [field]
def p_field_0(self, xxx_todo_changeme17):
" field ::= Id "
(type,) = xxx_todo_changeme17
return Field(type)
def p_field_1(self, xxx_todo_changeme18):
" field ::= Id Id "
(type, name) = xxx_todo_changeme18
return Field(type, name)
def p_field_2(self, xxx_todo_changeme19):
" field ::= Id * Id "
(type, _, name) = xxx_todo_changeme19
return Field(type, name, seq=True)
def p_field_3(self, xxx_todo_changeme20):
" field ::= Id ? Id "
(type, _, name) = xxx_todo_changeme20
return Field(type, name, opt=True)
def p_field_4(self, xxx_todo_changeme21):
" field ::= Id * "
(type, _) = xxx_todo_changeme21
return Field(type, seq=True)
def p_field_5(self, xxx_todo_changeme22):
" field ::= Id ? "
(type, _) = xxx_todo_changeme22
return Field(type, opt=True)
builtin_types = ("identifier", "string", "int", "bool", "object", "bytes")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns, version):
self.name = name
self.dfns = dfns
self.version = version
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields):
self.fields = fields
def __repr__(self):
return "Product(%s)" % self.fields
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception as err:
print(("Error visiting", repr(object)))
print(err)
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
print(("Redefinition of constructor %s" % key))
print(("Defined in %s and %s" % (conflict, name)))
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
print(("Undefined type %s, used in %s" % (t, uses)))
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
buf = open(file).read()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError as err:
print(err)
lines = buf.split("\n")
print((lines[err.lineno - 1])) # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
print(file)
mod = parse(file)
print(("module", mod.name))
print((len(mod.dfns), "definitions"))
if not check(mod):
print("Check failed")
else:
for dfn in mod.dfns:
print(dfn.type)
########NEW FILE########
__FILENAME__ = spark
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# Copyright (c) 1998-2002 John Aycock
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__version__ = 'SPARK-0.7 (pre-alpha-5)'
import re
def _namelist(instance):
namelist, namedict, classlist = [], {}, [instance.__class__]
for c in classlist:
for b in c.__bases__:
classlist.append(b)
for name in c.__dict__.keys():
if name not in namedict:
namelist.append(name)
namedict[name] = 1
return namelist
class GenericScanner:
def __init__(self, flags=0):
pattern = self.reflect()
self.re = re.compile(pattern, re.VERBOSE|flags)
self.index2func = {}
for name, number in self.re.groupindex.items():
self.index2func[number-1] = getattr(self, 't_' + name)
def makeRE(self, name):
doc = getattr(self, name).__doc__
rv = '(?P<%s>%s)' % (name[2:], doc)
return rv
def reflect(self):
rv = []
for name in _namelist(self):
if name[:2] == 't_' and name != 't_default':
rv.append(self.makeRE(name))
rv.append(self.makeRE('t_default'))
return '|'.join(rv)
def error(self, s, pos):
print("Lexical error at position %s" % pos)
raise SystemExit
def tokenize(self, s):
pos = 0
n = len(s)
while pos < n:
m = self.re.match(s, pos)
if m is None:
self.error(s, pos)
groups = m.groups()
for i in range(len(groups)):
if groups[i] and i in self.index2func:
self.index2func[i](groups[i])
pos = m.end()
def t_default(self, s):
r'( . | \n )+'
print("Specification error: unmatched input")
raise SystemExit
#
# Extracted from GenericParser and made global so that [un]picking works.
#
class _State:
def __init__(self, stateno, items):
self.T, self.complete, self.items = [], [], items
self.stateno = stateno
class GenericParser:
#
# An Earley parser, as per J. Earley, "An Efficient Context-Free
# Parsing Algorithm", CACM 13(2), pp. 94-102. Also J. C. Earley,
# "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
# Carnegie-Mellon University, August 1968. New formulation of
# the parser according to J. Aycock, "Practical Earley Parsing
# and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
# 2001, and J. Aycock and R. N. Horspool, "Practical Earley
# Parsing", unpublished paper, 2001.
#
def __init__(self, start):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
self.augment(start)
self.ruleschanged = 1
_NULLABLE = '\e_'
_START = 'START'
_BOF = '|-'
#
# When pickling, take the time to generate the full state machine;
# some information is then extraneous, too. Unfortunately we
# can't save the rule2func map.
#
def __getstate__(self):
if self.ruleschanged:
#
# XXX - duplicated from parse()
#
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
#
# XXX - should find a better way to do this..
#
changes = 1
while changes:
changes = 0
for k, v in self.edges.items():
if v is None:
state, sym = k
if state in self.states:
self.goto(state, sym)
changes = 1
rv = self.__dict__.copy()
for s in self.states.values():
del s.items
del rv['rule2func']
del rv['nullable']
del rv['cores']
return rv
def __setstate__(self, D):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
start = D['rules'][self._START][0][1][1] # Blech.
self.augment(start)
D['rule2func'] = self.rule2func
D['makeSet'] = self.makeSet_fast
self.__dict__ = D
#
# A hook for GenericASTBuilder and GenericASTMatcher. Mess
# thee not with this; nor shall thee toucheth the _preprocess
# argument to addRule.
#
def preprocess(self, rule, func): return rule, func
def addRule(self, doc, func, _preprocess=1):
fn = func
rules = doc.split()
index = []
for i in range(len(rules)):
if rules[i] == '::=':
index.append(i-1)
index.append(len(rules))
for i in range(len(index)-1):
lhs = rules[index[i]]
rhs = rules[index[i]+2:index[i+1]]
rule = (lhs, tuple(rhs))
if _preprocess:
rule, fn = self.preprocess(rule, func)
if lhs in self.rules:
self.rules[lhs].append(rule)
else:
self.rules[lhs] = [ rule ]
self.rule2func[rule] = fn
self.rule2name[rule] = func.__name__[2:]
self.ruleschanged = 1
def collectRules(self):
for name in _namelist(self):
if name[:2] == 'p_':
func = getattr(self, name)
doc = func.__doc__
self.addRule(doc, func)
def augment(self, start):
rule = '%s ::= %s %s' % (self._START, self._BOF, start)
self.addRule(rule, lambda args: args[1], 0)
def computeNull(self):
self.nullable = {}
tbd = []
for rulelist in self.rules.values():
lhs = rulelist[0][0]
self.nullable[lhs] = 0
for rule in rulelist:
rhs = rule[1]
if len(rhs) == 0:
self.nullable[lhs] = 1
continue
#
# We only need to consider rules which
# consist entirely of nonterminal symbols.
# This should be a savings on typical
# grammars.
#
for sym in rhs:
if sym not in self.rules:
break
else:
tbd.append(rule)
changes = 1
while changes:
changes = 0
for lhs, rhs in tbd:
if self.nullable[lhs]:
continue
for sym in rhs:
if not self.nullable[sym]:
break
else:
self.nullable[lhs] = 1
changes = 1
def makeState0(self):
s0 = _State(0, [])
for rule in self.newrules[self._START]:
s0.items.append((rule, 0))
return s0
def finalState(self, tokens):
#
# Yuck.
#
if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
return 1
start = self.rules[self._START][0][1][1]
return self.goto(1, start)
def makeNewRules(self):
worklist = []
for rulelist in self.rules.values():
for rule in rulelist:
worklist.append((rule, 0, 1, rule))
for rule, i, candidate, oldrule in worklist:
lhs, rhs = rule
n = len(rhs)
while i < n:
sym = rhs[i]
if sym not in self.rules or \
not self.nullable[sym]:
candidate = 0
i = i + 1
continue
newrhs = list(rhs)
newrhs[i] = self._NULLABLE+sym
newrule = (lhs, tuple(newrhs))
worklist.append((newrule, i+1,
candidate, oldrule))
candidate = 0
i = i + 1
else:
if candidate:
lhs = self._NULLABLE+lhs
rule = (lhs, rhs)
if lhs in self.newrules:
self.newrules[lhs].append(rule)
else:
self.newrules[lhs] = [ rule ]
self.new2old[rule] = oldrule
def typestring(self, token):
return None
def error(self, token):
print("Syntax error at or near '%s' token" % token)
raise SystemExit
def parse(self, tokens):
sets = [ [(1,0), (2,0)] ]
self.links = {}
if self.ruleschanged:
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
for i in xrange(len(tokens)):
sets.append([])
if sets[i] == []:
break
self.makeSet(tokens[i], sets, i)
else:
sets.append([])
self.makeSet(None, sets, len(tokens))
#_dump(tokens, sets, self.states)
finalitem = (self.finalState(tokens), 0)
if finalitem not in sets[-2]:
if len(tokens) > 0:
self.error(tokens[i-1])
else:
self.error(None)
return self.buildTree(self._START, finalitem,
tokens, len(sets)-2)
def isnullable(self, sym):
#
# For symbols in G_e only. If we weren't supporting 1.5,
# could just use sym.startswith().
#
return self._NULLABLE == sym[0:len(self._NULLABLE)]
def skip(self, xxx_todo_changeme, pos=0):
(lhs, rhs) = xxx_todo_changeme
n = len(rhs)
while pos < n:
if not self.isnullable(rhs[pos]):
break
pos = pos + 1
return pos
def makeState(self, state, sym):
assert sym is not None
#
# Compute \epsilon-kernel state's core and see if
# it exists already.
#
kitems = []
for rule, pos in self.states[state].items:
lhs, rhs = rule
if rhs[pos:pos+1] == (sym,):
kitems.append((rule, self.skip(rule, pos+1)))
core = sorted(kitems)
tcore = tuple(core)
if tcore in self.cores:
return self.cores[tcore]
#
# Nope, doesn't exist. Compute it and the associated
# \epsilon-nonkernel state together; we'll need it right away.
#
k = self.cores[tcore] = len(self.states)
K, NK = _State(k, kitems), _State(k+1, [])
self.states[k] = K
predicted = {}
edges = self.edges
rules = self.newrules
for X in K, NK:
worklist = X.items
for item in worklist:
rule, pos = item
lhs, rhs = rule
if pos == len(rhs):
X.complete.append(rule)
continue
nextSym = rhs[pos]
key = (X.stateno, nextSym)
if nextSym not in rules:
if key not in edges:
edges[key] = None
X.T.append(nextSym)
else:
edges[key] = None
if nextSym not in predicted:
predicted[nextSym] = 1
for prule in rules[nextSym]:
ppos = self.skip(prule)
new = (prule, ppos)
NK.items.append(new)
#
# Problem: we know K needs generating, but we
# don't yet know about NK. Can't commit anything
# regarding NK to self.edges until we're sure. Should
# we delay committing on both K and NK to avoid this
# hacky code? This creates other problems..
#
if X is K:
edges = {}
if NK.items == []:
return k
#
# Check for \epsilon-nonkernel's core. Unfortunately we
# need to know the entire set of predicted nonterminals
# to do this without accidentally duplicating states.
#
core = predicted.keys()
core.sort()
tcore = tuple(core)
if tcore in self.cores:
self.edges[(k, None)] = self.cores[tcore]
return k
nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
self.edges.update(edges)
self.states[nk] = NK
return k
def goto(self, state, sym):
key = (state, sym)
if key not in self.edges:
#
# No transitions from state on sym.
#
return None
rv = self.edges[key]
if rv is None:
#
# Target state isn't generated yet. Remedy this.
#
rv = self.makeState(state, sym)
self.edges[key] = rv
return rv
def gotoT(self, state, t):
return [self.goto(state, t)]
def gotoST(self, state, st):
rv = []
for t in self.states[state].T:
if st == t:
rv.append(self.goto(state, t))
return rv
def add(self, set, item, i=None, predecessor=None, causal=None):
if predecessor is None:
if item not in set:
set.append(item)
else:
key = (item, i)
if item not in set:
self.links[key] = []
set.append(item)
self.links[key].append((predecessor, causal))
def makeSet(self, token, sets, i):
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
if ttype is not None:
fn, arg = self.gotoT, ttype
else:
fn, arg = self.gotoST, token
for item in cur:
ptr = (item, i)
state, parent = item
add = fn(state, arg)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
nk = self.goto(k, None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
k = self.goto(pstate, lhs)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
self.add(cur, (k, pparent),
i, pptr, why)
nk = self.goto(k, None)
if nk is not None:
self.add(cur, (nk, i))
def makeSet_fast(self, token, sets, i):
#
# Call *only* when the entire state machine has been built!
# It relies on self.edges being filled in completely, and
# then duplicates and inlines code to boost speed at the
# cost of extreme ugliness.
#
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
for item in cur:
ptr = (item, i)
state, parent = item
if ttype is not None:
k = self.edges.get((state, ttype), None)
if k is not None:
#self.add(next, (k, parent), i+1, ptr)
#INLINED --v
new = (k, parent)
key = (new, i+1)
if new not in next:
self.links[key] = []
next.append(new)
self.links[key].append((ptr, None))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(next, (nk, i+1))
#INLINED --v
new = (nk, i+1)
if new not in next:
next.append(new)
#INLINED --^
else:
add = self.gotoST(state, token)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
#k = self.goto(pstate, lhs)
k = self.edges.get((pstate, lhs), None)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
#self.add(cur, (k, pparent),
# i, pptr, why)
#INLINED --v
new = (k, pparent)
key = (new, i)
if new not in cur:
self.links[key] = []
cur.append(new)
self.links[key].append((pptr, why))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(cur, (nk, i))
#INLINED --v
new = (nk, i)
if new not in cur:
cur.append(new)
#INLINED --^
def predecessor(self, key, causal):
for p, c in self.links[key]:
if c == causal:
return p
assert 0
def causal(self, key):
links = self.links[key]
if len(links) == 1:
return links[0][1]
choices = []
rule2cause = {}
for p, c in links:
rule = c[2]
choices.append(rule)
rule2cause[rule] = c
return rule2cause[self.ambiguity(choices)]
def deriveEpsilon(self, nt):
if len(self.newrules[nt]) > 1:
rule = self.ambiguity(self.newrules[nt])
else:
rule = self.newrules[nt][0]
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs)-1, -1, -1):
attr[i] = self.deriveEpsilon(rhs[i])
return self.rule2func[self.new2old[rule]](attr)
def buildTree(self, nt, item, tokens, k):
state, parent = item
choices = []
for rule in self.states[state].complete:
if rule[0] == nt:
choices.append(rule)
rule = choices[0]
if len(choices) > 1:
rule = self.ambiguity(choices)
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs)-1, -1, -1):
sym = rhs[i]
if sym not in self.newrules:
if sym != self._BOF:
attr[i] = tokens[k-1]
key = (item, k)
item, k = self.predecessor(key, None)
#elif self.isnullable(sym):
elif self._NULLABLE == sym[0:len(self._NULLABLE)]:
attr[i] = self.deriveEpsilon(sym)
else:
key = (item, k)
why = self.causal(key)
attr[i] = self.buildTree(sym, why[0],
tokens, why[1])
item, k = self.predecessor(key, why)
return self.rule2func[self.new2old[rule]](attr)
def ambiguity(self, rules):
#
# XXX - problem here and in collectRules() if the same rule
# appears in >1 method. Also undefined results if rules
# causing the ambiguity appear in the same method.
#
sortlist = []
name2index = {}
for i in range(len(rules)):
lhs, rhs = rule = rules[i]
name = self.rule2name[self.new2old[rule]]
sortlist.append((len(rhs), name))
name2index[name] = i
sortlist.sort()
list_ = [x[1] for x in sortlist]
return rules[name2index[self.resolve(list_)]]
def resolve(self, list):
#
# Resolve ambiguity in favor of the shortest RHS.
# Since we walk the tree from the top down, this
# should effectively resolve in favor of a "shift".
#
return list[0]
#
# GenericASTBuilder automagically constructs a concrete/abstract syntax tree
# for a given input. The extra argument is a class (not an instance!)
# which supports the "__setslice__" and "__len__" methods.
#
# XXX - silently overrides any user code in methods.
#
class GenericASTBuilder(GenericParser):
def __init__(self, AST, start):
GenericParser.__init__(self, start)
self.AST = AST
def preprocess(self, rule, func):
rebind = lambda lhs, self=self: \
lambda args, lhs=lhs, self=self: \
self.buildASTNode(args, lhs)
lhs, rhs = rule
return rule, rebind(lhs)
def buildASTNode(self, args, lhs):
children = []
for arg in args:
if isinstance(arg, self.AST):
children.append(arg)
else:
children.append(self.terminal(arg))
return self.nonterminal(lhs, children)
def terminal(self, token): return token
def nonterminal(self, type, args):
rv = self.AST(type)
rv[:len(args)] = args
return rv
#
# GenericASTTraversal is a Visitor pattern according to Design Patterns. For
# each node it attempts to invoke the method n_<node type>, falling
# back onto the default() method if the n_* can't be found. The preorder
# traversal also looks for an exit hook named n_<node type>_exit (no default
# routine is called if it's not found). To prematurely halt traversal
# of a subtree, call the prune() method -- this only makes sense for a
# preorder traversal. Node type is determined via the typestring() method.
#
class GenericASTTraversalPruningException:
pass
class GenericASTTraversal:
def __init__(self, ast):
self.ast = ast
def typestring(self, node):
return node.type
def prune(self):
raise GenericASTTraversalPruningException
def preorder(self, node=None):
if node is None:
node = self.ast
try:
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
except GenericASTTraversalPruningException:
return
for kid in node:
self.preorder(kid)
name = name + '_exit'
if hasattr(self, name):
func = getattr(self, name)
func(node)
def postorder(self, node=None):
if node is None:
node = self.ast
for kid in node:
self.postorder(kid)
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
def default(self, node):
pass
#
# GenericASTMatcher. AST nodes must have "__getitem__" and "__cmp__"
# implemented.
#
# XXX - makes assumptions about how GenericParser walks the parse tree.
#
class GenericASTMatcher(GenericParser):
def __init__(self, start, ast):
GenericParser.__init__(self, start)
self.ast = ast
def preprocess(self, rule, func):
rebind = lambda func, self=self: \
lambda args, func=func, self=self: \
self.foundMatch(args, func)
lhs, rhs = rule
rhslist = list(rhs)
rhslist.reverse()
return (lhs, tuple(rhslist)), rebind(func)
def foundMatch(self, args, func):
func(args[-1])
return args[-1]
def match_r(self, node):
self.input.insert(0, node)
children = 0
for child in node:
if children == 0:
self.input.insert(0, '(')
children = children + 1
self.match_r(child)
if children > 0:
self.input.insert(0, ')')
def match(self, ast=None):
if ast is None:
ast = self.ast
self.input = []
self.match_r(ast)
self.parse(self.input)
def resolve(self, list):
#
# Resolve ambiguity in favor of the longest RHS.
#
return list[-1]
def _dump(tokens, sets, states):
for i in range(len(sets)):
print('set', i)
for item in sets[i]:
print('\t', item)
for (lhs, rhs), pos in states[item[0]].items:
print('\t\t', lhs, '::=', end=' ')
print(' '.join(rhs[:pos]), end=' ')
print('.', end=' ')
print(' '.join(rhs[pos:]))
if i < len(tokens):
print()
print('token', str(tokens[i]))
print()
########NEW FILE########
__FILENAME__ = processor
# -*- coding: utf-8 -*-
"""
ASDL processor, handles imports. This should be part of the parser,
but we have many multiple copies of those...
"""
from __future__ import print_function, division, absolute_import
import os
import re
import tokenize
from numba.asdl.asdl import ASDLProcessor
#------------------------------------------------------------------------
# Process ASDL imports
#------------------------------------------------------------------------
class ImportProcessor(ASDLProcessor):
def __init__(self, asdlmod, import_path):
self.asdlmod = asdlmod
self.import_path = import_path
def preprocess(self, asdl_source):
# Find and save import statements. Remove imports from source
self.imports, source = find_imports(asdl_source)
return source
def postprocess(self, asdl_tree):
# Import subtrees and types
subtrees, types = apply_imports(self.asdlmod,
self.imports,
self.import_path)
# Merge imported subtrees and types
asdl_tree.dfns.extend(subtrees)
asdl_tree.types.update(types)
return asdl_tree
#------------------------------------------------------------------------
# Find Source Imports
#------------------------------------------------------------------------
pattern = "^\s*from (\w+) import (\*|\w+(?:, \w+)*)$"
def find_imports(asdl_source):
"""
Find imports in the given ASDL source.
:return: Two-tuple of (imports, source) where source has the imports
removed and imports is a list of two-tuples (modname, (names))
([(str, (str,))], str)
"""
lines = asdl_source.splitlines()
imports = []
source_lines = []
for line in lines:
m = re.match(pattern, line)
if m is not None:
module_name, names = m.groups()
if names == '*':
import_tuple = (module_name, (names,))
else:
import_tuple = (module_name, tuple(names.split(", ")))
imports.append(import_tuple)
else:
source_lines.append(line)
return imports, "\n".join(source_lines)
#------------------------------------------------------------------------
# Find Imported Terms
#------------------------------------------------------------------------
def apply_imports(asdlmod, imports, import_path):
"""
Import ASDL subtrees from another schema along the given import path.
"""
from . import asdl
subtrees = []
types = {}
for modname, import_names in imports:
# Find module file
for path in import_path:
fname = os.path.join(path, modname + '.asdl')
if os.path.exists(fname):
# Load ASDL tree
parser, loader = asdl.load(modname, open(fname).read(), asdlmod)
tree = loader.load()
# Collect subtrees and types by name
handle_import(tree, import_names, subtrees, types)
break
else:
raise ImportError("No module named %r" % (modname + '.asdl',))
return subtrees, types
def handle_import(tree, import_names, subtrees, types):
for import_name in import_names:
dfns = from_import(tree, import_name)
subtrees.extend(dfns)
for dfn in dfns:
print(dfn.name, tree.types.keys())
types[dfn.name] = tree.types[str(dfn.name)]
def from_import(tree, import_name):
if import_name == '*':
return tree.dfns #[dfn for dfn in tree.dfns if dfn.name in tree.types]
for definition in tree.dfns:
if str(definition.name) == import_name:
return [definition]
raise ImportError("Module %r has no rule %r" % (tree.name, import_name))
########NEW FILE########
__FILENAME__ = asdl
# -*- coding: utf-8 -*-
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
from __future__ import print_function, division, absolute_import
import os
import traceback
from . import spark
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError("unmatched input: %r" % s)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, xxx_todo_changeme):
" module ::= Id Id version { } "
(module, name, version, _0, _1) = xxx_todo_changeme
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
def p_module(self, xxx_todo_changeme1):
" module ::= Id Id version { definitions } "
(module, name, version, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
def p_version(self, xxx_todo_changeme2):
"version ::= Id String"
(version, V) = xxx_todo_changeme2
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
def p_definition_0(self, xxx_todo_changeme3):
" definitions ::= definition "
(definition,) = xxx_todo_changeme3
return definition
def p_definition_1(self, xxx_todo_changeme4):
" definitions ::= definition definitions "
(definitions, definition) = xxx_todo_changeme4
return definitions + definition
def p_definition(self, xxx_todo_changeme5):
" definition ::= Id = type "
(id, _, type) = xxx_todo_changeme5
return [Type(id, type)]
def p_type_0(self, xxx_todo_changeme6):
" type ::= product "
(product,) = xxx_todo_changeme6
return product
def p_type_1(self, xxx_todo_changeme7):
" type ::= sum "
(sum,) = xxx_todo_changeme7
return Sum(sum)
def p_type_2(self, xxx_todo_changeme8):
" type ::= sum Id ( fields ) "
(sum, id, _0, attributes, _1) = xxx_todo_changeme8
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
if attributes:
attributes.reverse()
return Sum(sum, attributes)
def p_product(self, xxx_todo_changeme9):
" product ::= ( fields ) "
(_0, fields, _1) = xxx_todo_changeme9
fields.reverse()
return Product(fields)
def p_sum_0(self, xxx_todo_changeme10):
" sum ::= constructor "
(constructor,) = xxx_todo_changeme10
return [constructor]
def p_sum_1(self, xxx_todo_changeme11):
" sum ::= constructor | sum "
(constructor, _, sum) = xxx_todo_changeme11
return [constructor] + sum
def p_sum_2(self, xxx_todo_changeme12):
" sum ::= constructor | sum "
(constructor, _, sum) = xxx_todo_changeme12
return [constructor] + sum
def p_constructor_0(self, xxx_todo_changeme13):
" constructor ::= Id "
(id,) = xxx_todo_changeme13
return Constructor(id)
def p_constructor_1(self, xxx_todo_changeme14):
" constructor ::= Id ( fields ) "
(id, _0, fields, _1) = xxx_todo_changeme14
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, xxx_todo_changeme15):
" fields ::= field "
(field,) = xxx_todo_changeme15
return [field]
def p_fields_1(self, xxx_todo_changeme16):
" fields ::= field , fields "
(field, _, fields) = xxx_todo_changeme16
return fields + [field]
def p_field_0(self, xxx_todo_changeme17):
" field ::= Id "
(type,) = xxx_todo_changeme17
return Field(type)
def p_field_1(self, xxx_todo_changeme18):
" field ::= Id Id "
(type, name) = xxx_todo_changeme18
return Field(type, name)
def p_field_2(self, xxx_todo_changeme19):
" field ::= Id * Id "
(type, _, name) = xxx_todo_changeme19
return Field(type, name, seq=True)
def p_field_3(self, xxx_todo_changeme20):
" field ::= Id ? Id "
(type, _, name) = xxx_todo_changeme20
return Field(type, name, opt=True)
def p_field_4(self, xxx_todo_changeme21):
" field ::= Id * "
(type, _) = xxx_todo_changeme21
return Field(type, seq=True)
def p_field_5(self, xxx_todo_changeme22):
" field ::= Id ? "
(type, _) = xxx_todo_changeme22
return Field(type, opt=True)
builtin_types = ("identifier", "string", "int", "bool", "object")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns, version):
self.name = name
self.dfns = dfns
self.version = version
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields):
self.fields = fields
def __repr__(self):
return "Product(%s)" % self.fields
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception as err:
print(("Error visiting", repr(object)))
print(err)
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
print(("Redefinition of constructor %s" % key))
print(("Defined in %s and %s" % (conflict, name)))
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
print(("Undefined type %s, used in %s" % (t, uses)))
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
buf = open(file).read()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError as err:
print(err)
lines = buf.split("\n")
print((lines[err.lineno - 1])) # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
print(file)
mod = parse(file)
print(("module", mod.name))
print((len(mod.dfns), "definitions"))
if not check(mod):
print("Check failed")
else:
for dfn in mod.dfns:
print((dfn.type))
########NEW FILE########
__FILENAME__ = spark
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# Copyright (c) 1998-2002 John Aycock
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__version__ = 'SPARK-0.7 (pre-alpha-5)'
import re
import string
def _namelist(instance):
namelist, namedict, classlist = [], {}, [instance.__class__]
for c in classlist:
for b in c.__bases__:
classlist.append(b)
for name in c.__dict__.keys():
if name not in namedict:
namelist.append(name)
namedict[name] = 1
return namelist
class GenericScanner:
def __init__(self, flags=0):
pattern = self.reflect()
self.re = re.compile(pattern, re.VERBOSE|flags)
self.index2func = {}
for name, number in self.re.groupindex.items():
self.index2func[number-1] = getattr(self, 't_' + name)
def makeRE(self, name):
doc = getattr(self, name).__doc__
rv = '(?P<%s>%s)' % (name[2:], doc)
return rv
def reflect(self):
rv = []
for name in _namelist(self):
if name[:2] == 't_' and name != 't_default':
rv.append(self.makeRE(name))
rv.append(self.makeRE('t_default'))
return string.join(rv, '|')
def error(self, s, pos):
print("Lexical error at position %s" % pos)
raise SystemExit
def tokenize(self, s):
pos = 0
n = len(s)
while pos < n:
m = self.re.match(s, pos)
if m is None:
self.error(s, pos)
groups = m.groups()
for i in range(len(groups)):
if groups[i] and i in self.index2func:
self.index2func[i](groups[i])
pos = m.end()
def t_default(self, s):
r'( . | \n )+'
print("Specification error: unmatched input")
raise SystemExit
#
# Extracted from GenericParser and made global so that [un]picking works.
#
class _State:
def __init__(self, stateno, items):
self.T, self.complete, self.items = [], [], items
self.stateno = stateno
class GenericParser:
#
# An Earley parser, as per J. Earley, "An Efficient Context-Free
# Parsing Algorithm", CACM 13(2), pp. 94-102. Also J. C. Earley,
# "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
# Carnegie-Mellon University, August 1968. New formulation of
# the parser according to J. Aycock, "Practical Earley Parsing
# and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
# 2001, and J. Aycock and R. N. Horspool, "Practical Earley
# Parsing", unpublished paper, 2001.
#
def __init__(self, start):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
self.augment(start)
self.ruleschanged = 1
_NULLABLE = '\e_'
_START = 'START'
_BOF = '|-'
#
# When pickling, take the time to generate the full state machine;
# some information is then extraneous, too. Unfortunately we
# can't save the rule2func map.
#
def __getstate__(self):
if self.ruleschanged:
#
# XXX - duplicated from parse()
#
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
#
# XXX - should find a better way to do this..
#
changes = 1
while changes:
changes = 0
for k, v in self.edges.items():
if v is None:
state, sym = k
if state in self.states:
self.goto(state, sym)
changes = 1
rv = self.__dict__.copy()
for s in self.states.values():
del s.items
del rv['rule2func']
del rv['nullable']
del rv['cores']
return rv
def __setstate__(self, D):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
start = D['rules'][self._START][0][1][1] # Blech.
self.augment(start)
D['rule2func'] = self.rule2func
D['makeSet'] = self.makeSet_fast
self.__dict__ = D
#
# A hook for GenericASTBuilder and GenericASTMatcher. Mess
# thee not with this; nor shall thee toucheth the _preprocess
# argument to addRule.
#
def preprocess(self, rule, func): return rule, func
def addRule(self, doc, func, _preprocess=1):
fn = func
rules = string.split(doc)
index = []
for i in range(len(rules)):
if rules[i] == '::=':
index.append(i-1)
index.append(len(rules))
for i in range(len(index)-1):
lhs = rules[index[i]]
rhs = rules[index[i]+2:index[i+1]]
rule = (lhs, tuple(rhs))
if _preprocess:
rule, fn = self.preprocess(rule, func)
if lhs in self.rules:
self.rules[lhs].append(rule)
else:
self.rules[lhs] = [ rule ]
self.rule2func[rule] = fn
self.rule2name[rule] = func.__name__[2:]
self.ruleschanged = 1
def collectRules(self):
for name in _namelist(self):
if name[:2] == 'p_':
func = getattr(self, name)
doc = func.__doc__
self.addRule(doc, func)
def augment(self, start):
rule = '%s ::= %s %s' % (self._START, self._BOF, start)
self.addRule(rule, lambda args: args[1], 0)
def computeNull(self):
self.nullable = {}
tbd = []
for rulelist in self.rules.values():
lhs = rulelist[0][0]
self.nullable[lhs] = 0
for rule in rulelist:
rhs = rule[1]
if len(rhs) == 0:
self.nullable[lhs] = 1
continue
#
# We only need to consider rules which
# consist entirely of nonterminal symbols.
# This should be a savings on typical
# grammars.
#
for sym in rhs:
if sym not in self.rules:
break
else:
tbd.append(rule)
changes = 1
while changes:
changes = 0
for lhs, rhs in tbd:
if self.nullable[lhs]:
continue
for sym in rhs:
if not self.nullable[sym]:
break
else:
self.nullable[lhs] = 1
changes = 1
def makeState0(self):
s0 = _State(0, [])
for rule in self.newrules[self._START]:
s0.items.append((rule, 0))
return s0
def finalState(self, tokens):
#
# Yuck.
#
if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
return 1
start = self.rules[self._START][0][1][1]
return self.goto(1, start)
def makeNewRules(self):
worklist = []
for rulelist in self.rules.values():
for rule in rulelist:
worklist.append((rule, 0, 1, rule))
for rule, i, candidate, oldrule in worklist:
lhs, rhs = rule
n = len(rhs)
while i < n:
sym = rhs[i]
if sym not in self.rules or \
not self.nullable[sym]:
candidate = 0
i = i + 1
continue
newrhs = list(rhs)
newrhs[i] = self._NULLABLE+sym
newrule = (lhs, tuple(newrhs))
worklist.append((newrule, i+1,
candidate, oldrule))
candidate = 0
i = i + 1
else:
if candidate:
lhs = self._NULLABLE+lhs
rule = (lhs, rhs)
if lhs in self.newrules:
self.newrules[lhs].append(rule)
else:
self.newrules[lhs] = [ rule ]
self.new2old[rule] = oldrule
def typestring(self, token):
return None
def error(self, token):
print("Syntax error at or near '%s' token" % token)
raise SystemExit
def parse(self, tokens):
sets = [ [(1,0), (2,0)] ]
self.links = {}
if self.ruleschanged:
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
for i in xrange(len(tokens)):
sets.append([])
if sets[i] == []:
break
self.makeSet(tokens[i], sets, i)
else:
sets.append([])
self.makeSet(None, sets, len(tokens))
#_dump(tokens, sets, self.states)
finalitem = (self.finalState(tokens), 0)
if finalitem not in sets[-2]:
if len(tokens) > 0:
self.error(tokens[i-1])
else:
self.error(None)
return self.buildTree(self._START, finalitem,
tokens, len(sets)-2)
def isnullable(self, sym):
#
# For symbols in G_e only. If we weren't supporting 1.5,
# could just use sym.startswith().
#
return self._NULLABLE == sym[0:len(self._NULLABLE)]
def skip(self, xxx_todo_changeme, pos=0):
(lhs, rhs) = xxx_todo_changeme
n = len(rhs)
while pos < n:
if not self.isnullable(rhs[pos]):
break
pos = pos + 1
return pos
def makeState(self, state, sym):
assert sym is not None
#
# Compute \epsilon-kernel state's core and see if
# it exists already.
#
kitems = []
for rule, pos in self.states[state].items:
lhs, rhs = rule
if rhs[pos:pos+1] == (sym,):
kitems.append((rule, self.skip(rule, pos+1)))
core = sorted(kitems)
tcore = tuple(core)
if tcore in self.cores:
return self.cores[tcore]
#
# Nope, doesn't exist. Compute it and the associated
# \epsilon-nonkernel state together; we'll need it right away.
#
k = self.cores[tcore] = len(self.states)
K, NK = _State(k, kitems), _State(k+1, [])
self.states[k] = K
predicted = {}
edges = self.edges
rules = self.newrules
for X in K, NK:
worklist = X.items
for item in worklist:
rule, pos = item
lhs, rhs = rule
if pos == len(rhs):
X.complete.append(rule)
continue
nextSym = rhs[pos]
key = (X.stateno, nextSym)
if nextSym not in rules:
if key not in edges:
edges[key] = None
X.T.append(nextSym)
else:
edges[key] = None
if nextSym not in predicted:
predicted[nextSym] = 1
for prule in rules[nextSym]:
ppos = self.skip(prule)
new = (prule, ppos)
NK.items.append(new)
#
# Problem: we know K needs generating, but we
# don't yet know about NK. Can't commit anything
# regarding NK to self.edges until we're sure. Should
# we delay committing on both K and NK to avoid this
# hacky code? This creates other problems..
#
if X is K:
edges = {}
if NK.items == []:
return k
#
# Check for \epsilon-nonkernel's core. Unfortunately we
# need to know the entire set of predicted nonterminals
# to do this without accidentally duplicating states.
#
core = predicted.keys()
core.sort()
tcore = tuple(core)
if tcore in self.cores:
self.edges[(k, None)] = self.cores[tcore]
return k
nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
self.edges.update(edges)
self.states[nk] = NK
return k
def goto(self, state, sym):
key = (state, sym)
if key not in self.edges:
#
# No transitions from state on sym.
#
return None
rv = self.edges[key]
if rv is None:
#
# Target state isn't generated yet. Remedy this.
#
rv = self.makeState(state, sym)
self.edges[key] = rv
return rv
def gotoT(self, state, t):
return [self.goto(state, t)]
def gotoST(self, state, st):
rv = []
for t in self.states[state].T:
if st == t:
rv.append(self.goto(state, t))
return rv
def add(self, set, item, i=None, predecessor=None, causal=None):
if predecessor is None:
if item not in set:
set.append(item)
else:
key = (item, i)
if item not in set:
self.links[key] = []
set.append(item)
self.links[key].append((predecessor, causal))
def makeSet(self, token, sets, i):
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
if ttype is not None:
fn, arg = self.gotoT, ttype
else:
fn, arg = self.gotoST, token
for item in cur:
ptr = (item, i)
state, parent = item
add = fn(state, arg)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
nk = self.goto(k, None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
k = self.goto(pstate, lhs)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
self.add(cur, (k, pparent),
i, pptr, why)
nk = self.goto(k, None)
if nk is not None:
self.add(cur, (nk, i))
def makeSet_fast(self, token, sets, i):
#
# Call *only* when the entire state machine has been built!
# It relies on self.edges being filled in completely, and
# then duplicates and inlines code to boost speed at the
# cost of extreme ugliness.
#
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
for item in cur:
ptr = (item, i)
state, parent = item
if ttype is not None:
k = self.edges.get((state, ttype), None)
if k is not None:
#self.add(next, (k, parent), i+1, ptr)
#INLINED --v
new = (k, parent)
key = (new, i+1)
if new not in next:
self.links[key] = []
next.append(new)
self.links[key].append((ptr, None))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(next, (nk, i+1))
#INLINED --v
new = (nk, i+1)
if new not in next:
next.append(new)
#INLINED --^
else:
add = self.gotoST(state, token)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
#k = self.goto(pstate, lhs)
k = self.edges.get((pstate, lhs), None)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
#self.add(cur, (k, pparent),
# i, pptr, why)
#INLINED --v
new = (k, pparent)
key = (new, i)
if new not in cur:
self.links[key] = []
cur.append(new)
self.links[key].append((pptr, why))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(cur, (nk, i))
#INLINED --v
new = (nk, i)
if new not in cur:
cur.append(new)
#INLINED --^
def predecessor(self, key, causal):
for p, c in self.links[key]:
if c == causal:
return p
assert 0
def causal(self, key):
links = self.links[key]
if len(links) == 1:
return links[0][1]
choices = []
rule2cause = {}
for p, c in links:
rule = c[2]
choices.append(rule)
rule2cause[rule] = c
return rule2cause[self.ambiguity(choices)]
def deriveEpsilon(self, nt):
if len(self.newrules[nt]) > 1:
rule = self.ambiguity(self.newrules[nt])
else:
rule = self.newrules[nt][0]
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs)-1, -1, -1):
attr[i] = self.deriveEpsilon(rhs[i])
return self.rule2func[self.new2old[rule]](attr)
def buildTree(self, nt, item, tokens, k):
state, parent = item
choices = []
for rule in self.states[state].complete:
if rule[0] == nt:
choices.append(rule)
rule = choices[0]
if len(choices) > 1:
rule = self.ambiguity(choices)
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in range(len(rhs)-1, -1, -1):
sym = rhs[i]
if sym not in self.newrules:
if sym != self._BOF:
attr[i] = tokens[k-1]
key = (item, k)
item, k = self.predecessor(key, None)
#elif self.isnullable(sym):
elif self._NULLABLE == sym[0:len(self._NULLABLE)]:
attr[i] = self.deriveEpsilon(sym)
else:
key = (item, k)
why = self.causal(key)
attr[i] = self.buildTree(sym, why[0],
tokens, why[1])
item, k = self.predecessor(key, why)
return self.rule2func[self.new2old[rule]](attr)
def ambiguity(self, rules):
#
# XXX - problem here and in collectRules() if the same rule
# appears in >1 method. Also undefined results if rules
# causing the ambiguity appear in the same method.
#
sortlist = []
name2index = {}
for i in range(len(rules)):
lhs, rhs = rule = rules[i]
name = self.rule2name[self.new2old[rule]]
sortlist.append((len(rhs), name))
name2index[name] = i
sortlist.sort()
list = map(lambda a_b: a_b[1], sortlist)
return rules[name2index[self.resolve(list)]]
def resolve(self, list):
#
# Resolve ambiguity in favor of the shortest RHS.
# Since we walk the tree from the top down, this
# should effectively resolve in favor of a "shift".
#
return list[0]
#
# GenericASTBuilder automagically constructs a concrete/abstract syntax tree
# for a given input. The extra argument is a class (not an instance!)
# which supports the "__setslice__" and "__len__" methods.
#
# XXX - silently overrides any user code in methods.
#
class GenericASTBuilder(GenericParser):
def __init__(self, AST, start):
GenericParser.__init__(self, start)
self.AST = AST
def preprocess(self, rule, func):
rebind = lambda lhs, self=self: \
lambda args, lhs=lhs, self=self: \
self.buildASTNode(args, lhs)
lhs, rhs = rule
return rule, rebind(lhs)
def buildASTNode(self, args, lhs):
children = []
for arg in args:
if isinstance(arg, self.AST):
children.append(arg)
else:
children.append(self.terminal(arg))
return self.nonterminal(lhs, children)
def terminal(self, token): return token
def nonterminal(self, type, args):
rv = self.AST(type)
rv[:len(args)] = args
return rv
#
# GenericASTTraversal is a Visitor pattern according to Design Patterns. For
# each node it attempts to invoke the method n_<node type>, falling
# back onto the default() method if the n_* can't be found. The preorder
# traversal also looks for an exit hook named n_<node type>_exit (no default
# routine is called if it's not found). To prematurely halt traversal
# of a subtree, call the prune() method -- this only makes sense for a
# preorder traversal. Node type is determined via the typestring() method.
#
class GenericASTTraversalPruningException:
pass
class GenericASTTraversal:
def __init__(self, ast):
self.ast = ast
def typestring(self, node):
return node.type
def prune(self):
raise GenericASTTraversalPruningException
def preorder(self, node=None):
if node is None:
node = self.ast
try:
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
except GenericASTTraversalPruningException:
return
for kid in node:
self.preorder(kid)
name = name + '_exit'
if hasattr(self, name):
func = getattr(self, name)
func(node)
def postorder(self, node=None):
if node is None:
node = self.ast
for kid in node:
self.postorder(kid)
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
def default(self, node):
pass
#
# GenericASTMatcher. AST nodes must have "__getitem__" and "__cmp__"
# implemented.
#
# XXX - makes assumptions about how GenericParser walks the parse tree.
#
class GenericASTMatcher(GenericParser):
def __init__(self, start, ast):
GenericParser.__init__(self, start)
self.ast = ast
def preprocess(self, rule, func):
rebind = lambda func, self=self: \
lambda args, func=func, self=self: \
self.foundMatch(args, func)
lhs, rhs = rule
rhslist = list(rhs)
rhslist.reverse()
return (lhs, tuple(rhslist)), rebind(func)
def foundMatch(self, args, func):
func(args[-1])
return args[-1]
def match_r(self, node):
self.input.insert(0, node)
children = 0
for child in node:
if children == 0:
self.input.insert(0, '(')
children = children + 1
self.match_r(child)
if children > 0:
self.input.insert(0, ')')
def match(self, ast=None):
if ast is None:
ast = self.ast
self.input = []
self.match_r(ast)
self.parse(self.input)
def resolve(self, list):
#
# Resolve ambiguity in favor of the longest RHS.
#
return list[-1]
def _dump(tokens, sets, states):
for i in range(len(sets)):
print('set', i)
for item in sets[i]:
print('\t', item)
for (lhs, rhs), pos in states[item[0]].items:
print('\t\t', lhs, '::=', end=' ')
print(string.join(rhs[:pos]), end=' ')
print('.', end=' ')
print(string.join(rhs[pos:]))
if i < len(tokens):
print()
print('token', str(tokens[i]))
print()
########NEW FILE########
__FILENAME__ = schema
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
import keyword
from collections import defaultdict, namedtuple
import contextlib
from . import asdl
from .asdl import pyasdl
def load(name):
'''Load a ASDL Schema by name; e.g. "Python.asdl".
Returns a Schema object.
This tries to load from the version-specific directory, first.
If it failed, it loads from the common-directory.
'''
parser, loader = asdl.load_pyschema(name)
python_asdl = loader.load()
return build_schema(python_asdl)
def build_schema(asdl_tree):
schblr = SchemaBuilder()
schblr.visit(asdl_tree)
return schblr.schema
class _rule(namedtuple('rule', ['kind', 'fields'])):
__slots__ = ()
SUM = 0 # sums, e.g. Foo | Bar
PROD = 1 # producs, e.g. (Foo, Bar)
@classmethod
def sum(cls, fields):
return cls(kind=cls.SUM, fields=fields)
@classmethod
def product(cls, fields):
return cls(kind=cls.PROD, fields=fields)
@property
def is_sum(self):
return self.kind == self.SUM
@property
def is_product(self):
return self.kind == self.PROD
class _debuginfo_t(namedtuple("_debuginfo_t", ['node', 'field', 'offset'])):
__slots__ = ()
def __str__(self):
'''Create string reprentation to be used in SchemaError
'''
if self.field is not None:
if self.offset is not None:
return "At %s.%s[%d]" % (self.node, self.field, self.offset)
else:
return "At %s.%s" % (self.node, self.field)
else:
return "At %s" % (self.node)
def _debuginfo(node, field=None, offset=None):
assert field or not offset
return _debuginfo_t(node=node, field=field, offset=offset)
class SchemaError(Exception):
def __init__(self, ctxt, msg):
super(SchemaError, self).__init__("%s: %s" % (ctxt, msg))
class Schema(object):
'''A Schema object that is used to verify against an AST.
It is built from SchemaBuilder
Usage:
schema.verify(ast)
schema.verify(ast, context=SchemaContext())
'''
def __init__(self, name):
# name of the asdl module
self.name = name
# a dictionary of {type name -> fields}
self.types = defaultdict(list)
# a dictionary of {definition name -> _rule}
self.dfns = {}
# { type name -> fields }
self.attributes = defaultdict(list)
def verify(self, ast, context=None):
'''Check against an AST raises SchemaError upon error.
ast --- The ast being verified
context --- [optional] a SchemaContext.
'''
context = context if context is not None else SchemaContext()
return SchemaVerifier(self, context).visit(ast)
def __str__(self):
return "%s(name=%s, types=%s, rules=%s, attributes=%s)" % (
type(self).__name__,
self.name,
self.types,
self.dfns,
self.attributes)
def debug(self):
print(("Schema %s" % self.name))
for k, fs in self.types.items():
print((' --', k, fs))
for k, fs in self.dfns.items():
if fs.is_sum():
print((' ', k))
for f in fs.fields:
print((' |', f))
else:
print((' ', k, '=', ', '.join(map(str, fs.fields))))
class SchemaContext(object):
'''Keep information about context:
- builtin type handlers
User may expand the builtin type handlers. See `builtin_handlers`.
'''
def __init__(self):
self.__builtins = {}
self._add_default_handler()
def _add_default_handler(self):
self.builtin_handlers['identifier'] = _verify_identifier
self.builtin_handlers['int'] = _verify_int
self.builtin_handlers['string'] = _verify_string
self.builtin_handlers['object'] = _verify_object
self.builtin_handlers['bool'] = _verify_bool
@property
def builtin_handlers(self):
'''A dictionary of type name -> handler
A handler is just a callable like the following:
def handler(value):
return is_value_valid(value) # returns boolean
'''
return self.__builtins
class SchemaVerifier(ast.NodeVisitor):
'''A internal class that implement a the verification logic.
'''
def __init__(self, schema, context):
'''
schema --- a Schema object that defines a valid AST.
context --- a SchemaConctext object.
'''
self.schema = schema
self.context = context
self._debug_context = None
def visit(self, node):
'''Start verification at the node.
Verification can begin at any AST node. Can it will recursively
verify each and every subtree.
'''
current = self._get_type(node)
self._visit(node, current)
def _visit(self, node, current):
nodename = type(node).__name__
# traverse the children
for field in current:
with self._new_debug_context(node=nodename, field=field.name):
value = getattr(node, str(field.name), None)
if getattr(field, 'seq', False):
# is sequence?
children = getattr(node, str(field.name), None)
if children is None:
raise SchemaError(self._debug_context, "Missing field")
elif not _is_iterable(children):
raise SchemaError(self._debug_context,
"Field must be iterable")
for offset, child in enumerate(children):
with self._new_debug_context(node=nodename,
field=field.name,
offset=offset):
self._sentry_dfn(field.type, child)
elif value is None:
if not getattr(field, 'opt', False):
raise SchemaError(self._debug_context, "Missing field")
else:
pass
elif value is not None:
self._sentry_dfn(field.type, value)
else:
assert False, field
@contextlib.contextmanager
def _new_debug_context(self, **kws):
# push
old = self._debug_context
self._debug_context = _debuginfo(**kws)
yield
# pop
self._debug_context = old
def _sentry_child(self, child, subtypes):
childname = type(child).__name__
if childname not in subtypes:
raise SchemaError(self._debug_context, "Cannot be a %s" % childname)
def _sentry_dfn(self, name, value):
name = str(name)
if name in self.context.builtin_handlers:
# is a builtin type?
handler = self.context.builtin_handlers[name]
if not handler(value):
raise SchemaError(self._debug_context,
"Expected %s but got %s" % \
(name, type(value)))
else:
# not a builtin type?
rule = self._get_dfn(name)
if rule.is_sum:
self._sentry_child(value, rule.fields)
self.visit(value)
else:
assert rule.is_product
name0 = type(value).__name__
if name0 != name:
raise SchemaError(self._debug_context,
"Expecting %s but got %s" % \
(name, name0))
self._visit(value, rule.fields)
def _get_dfn(self, name):
ret = self.schema.dfns.get(str(name))
if ret is None:
raise SchemaError(self._debug_context,
"Missing definition for '%s' in the ASDL" % name)
return ret
def _get_type(self, cls_or_name):
name = (cls_or_name
if isinstance(cls_or_name, str)
else type(cls_or_name).__name__)
fields = self.schema.types.get(name)
attrs = self.schema.attributes.get(name)
if fields is None or attrs is None:
raise SchemaError(self._debug_context,
"Unknown AST node type: %s" % name)
return fields + attrs
class SchemaBuilder(pyasdl.VisitorBase):
'''A single instance of SchemaBuilder can be used build different
Schema from different ASDL.
Usage:
schblr = SchemaBuilder()
schblr.visit(some_asdl)
schema = schblr.schema # get the schema object
NOTE:
- It ignore the attributes of the nodes.
'''
def __init__(self):
super(SchemaBuilder, self).__init__()
def visitModule(self, mod):
self._schema = Schema(str(mod.name))
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
self.schema.dfns[str(name)] = _rule.sum([str(t.name)
for t in sum.types])
self.schema.attributes[name].extend(sum.attributes)
for t in sum.types:
self.visit(t, name, sum.attributes)
def visitConstructor(self, cons, name, attrs=[]):
typename = str(cons.name)
self.schema.types[typename].extend(cons.fields)
self.schema.attributes[typename].extend(attrs)
def visitField(self, field, name):
key = str(field.type)
def visitProduct(self, prod, name):
assert not hasattr(prod, 'attributes')
self.schema.dfns[str(name)] = _rule.product(prod.fields)
for f in prod.fields:
self.visit(f, name)
@property
def schema(self):
return self._schema
#
# Builtin types handler
#
def _verify_identifier(value):
return isinstance(value, str)
def _verify_int(value):
return isinstance(value, int) or isinstance(value, long)
def _verify_string(value):
return isinstance(value, str)
def _verify_object(value):
return isinstance(value, object)
def _verify_bool(value):
return isinstance(value, bool)
#
# Utilities
#
def _is_iterable(value):
try:
iter(value)
except TypeError:
return False
else:
return True
def verify_names(names):
for name in names:
if keyword.iskeyword(name):
raise ValueError("%r is a keyword" % (name,))
def verify_schema_keywords(schema):
"""
Verify schema, checking for the use of any Python keywords.
"""
verify_names(schema.dfns)
verify_names(schema.types)
verify_names([field.name for fields in schema.types.itervalues()
for field in fields])
########NEW FILE########
__FILENAME__ = support
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from numba.asdl import schema
def build_schema():
'''Build a schema from Python.asdl
'''
return schema.load('Python.asdl')
class SchemaTestCase(unittest.TestCase):
'''A base class for test cases that use the Python.asdl
'''
schema = build_schema()
def capture_error(self):
return self.assertRaises(schema.SchemaError)
########NEW FILE########
__FILENAME__ = test_bad
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# Use Python.asdl to test bad ast.
import unittest, ast, sys
from numba.asdl.tests import support
class TestBad(support.SchemaTestCase):
def make_verification_callable(self, the_ast):
def _verify():
self.schema.verify(the_ast)
return _verify
def test_module_missing_body(self):
the_ast = ast.Module()
if sys.version_info < (2,7):
self.assertRaises(support.schema.SchemaError,
self.make_verification_callable(the_ast))
else:
with self.capture_error() as cap:
self.schema.verify(the_ast)
self.assertEqual(str(cap.exception),
"At Module.body: Missing field")
def test_missing_expr_context(self):
a_name = ast.Name(id='x') # missing expr_context
a_binop = ast.BinOp(left=a_name, right =a_name, op=ast.Add())
the_ast = ast.Expr(value=a_binop)
if sys.version_info < (2,7):
self.assertRaises(support.schema.SchemaError,
self.make_verification_callable(the_ast))
else:
with self.capture_error() as cap:
self.schema.verify(the_ast)
self.assertEqual(str(cap.exception),
"At Name.ctx: Missing field")
def test_wrong_arg(self):
bad = ast.Raise() # doesn't matter what is inside
args = ast.arguments(args=[bad], defaults=[])
the_ast = ast.FunctionDef(name="haha",
args=args,
body=[],
decorator_list=[])
if sys.version_info < (2,7):
self.assertRaises(support.schema.SchemaError,
self.make_verification_callable(the_ast))
else:
with self.capture_error() as cap:
self.schema.verify(the_ast)
self.assertIn(
str(cap.exception),
["At arguments.args[0]: Cannot be a Raise",
"At arguments.args[0]: Expecting arg but got Raise"])
def test_return_missing_lineno(self):
the_ast = ast.Return(col_offset=0)
if sys.version_info < (2,7):
self.assertRaises(support.schema.SchemaError,
self.make_verification_callable(the_ast))
else:
with self.capture_error() as cap:
self.schema.verify(the_ast)
self.assertEqual(str(cap.exception),
"At Return.lineno: Missing field")
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_good
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# Uses Python.asdl to test against some python script
# and some manually built ast
import unittest
import ast, os, inspect
from numba.asdl.tests import support
class TestGood(support.SchemaTestCase):
def test_self(self):
# Cannot use __file__ because it may get the .pyc file instead
srcfile = inspect.getsourcefile(type(self))
self._test_script(srcfile)
def test_schema_dot_py(self):
self._test_script('../schema.py')
def test_return_optional_value(self):
the_ast = ast.Return(lineno=0,
col_offset=0)
self.schema.verify(the_ast)
the_ast = ast.Return(value=None,
lineno=0,
col_offset=0)
self.schema.verify(the_ast)
the_ast = ast.Return(value=ast.Name(id='x',
ctx=ast.Load(),
lineno=0,
col_offset=0),
lineno=0, col_offset=0)
self.schema.verify(the_ast) # should not raise
def _test_script(self, path):
if path.startswith('.'):
path = os.path.join(os.path.dirname(__file__), path)
with open(path) as the_file:
the_script = the_file.read()
the_ast = ast.parse(the_script)
self.schema.verify(the_ast) # should not raise
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = astsix
import sys
import ast
import pprint
from numba import error
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
class With(ast.stmt):
"""
Node for AST compatibility with python 3.3.
"""
_fields = ['context_expr', 'optional_vars', 'body']
class Raise(ast.stmt):
"""Py2 compatible Raise node"""
_fields = ['type', 'inst', 'tback']
class AST3to2(ast.NodeTransformer):
def _visit_list(self, alist):
new_values = []
for value in alist:
if isinstance(value, ast.AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, ast.AST):
new_values.extend(value)
continue
new_values.append(value)
return new_values
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
old_value[:] = self._visit_list(old_value)
elif isinstance(old_value, ast.AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def __visit_FunctionDef(self, node):
new_node = ast.FunctionDef(args=self.visit_arguments(node.args),
body=self._visit_list(node.body),
decorator_list=self._visit_list(node.decorator_list),
name=node.name)
ast.copy_location(new_node, node)
return new_node
def visit_Index(self, node):
if isinstance(node.value, ast.Ellipsis):
return node.value
return node
def visit_arguments(self, node):
ret = []
for arg_node in node.args:
if isinstance(arg_node, ast.arg):
new_node = ast.Name(ctx=ast.Param(), id=arg_node.arg)
ret.append(new_node)
elif isinstance(arg_node, ast.Name):
ret.append(arg_node)
else:
raise TypeError('Cannot transform node %r' % arg_node)
return ast.arguments(args=ret, defaults=node.defaults,
kwarg=node.kwarg, vararg=node.vararg)
def visit_With(self, node):
"""
Rewrite the With statement.
Python < 3.3:
With(expr context_expr, expr? optional_vars, stmt* body)
Python 3.3:
With(withitem* items, stmt* body)
withitem = (expr context_expr, expr? optional_vars)
"""
if sys.version_info[:2] >= (3, 3):
if len(node.items) > 1:
raise error.NumbaError(node,
"Only one 'with' context is support")
withitem = node.items[0]
new_node = With()
new_node.context_expr = withitem.context_expr
new_node.optional_vars = withitem.optional_vars
new_node.body = node.body
node = ast.copy_location(new_node, node)
self.generic_visit(node)
return node
def visit_Raise(self, node):
if node.cause:
raise error.NumbaError(node, "Cause to 'raise' not supported")
newnode = Raise(type=node.exc, inst=None, tback=None)
return ast.copy_location(newnode, node)
########NEW FILE########
__FILENAME__ = ast_constant_folding
# -*- coding: utf-8 -*-
'''
This module implements constant folding on the AST. It handles simple cases
such as
* 1 + 2 -> 3
* 2 ** 10 -> 1024
* N=1; N + 1 -> 2 (for N is assigned as global variable or a variable
that's only assigned once)
from __future__ import print_function, division, absolute_import
'''
import operator, ast
from functools import reduce
from . import visitors
# shamelessly copied from Cython
compile_time_binary_operators = {
'<' : operator.lt,
'<=' : operator.le,
'==' : operator.eq,
'!=' : operator.ne,
'>=' : operator.ge,
'>' : operator.gt,
'is' : operator.is_,
'is_not': operator.is_not,
'+' : operator.add,
'&' : operator.and_,
'/' : operator.truediv,
'//' : operator.floordiv,
'<<' : operator.lshift,
'%' : operator.mod,
'*' : operator.mul,
'|' : operator.or_,
'**' : operator.pow,
'>>' : operator.rshift,
'-' : operator.sub,
'^' : operator.xor,
'in' : operator.contains,
'not_in': lambda x, y: not operator.contains(x, y),
'and' : operator.and_,
'or' : operator.or_,
}
# shamelessly copied from Cython
compile_time_unary_operators = {
'not' : operator.not_,
'~' : operator.inv,
'-' : operator.neg,
'+' : operator.pos,
}
ast_to_binary_operator = {
ast.Add : '+',
ast.Sub : '-',
ast.Mult : '*',
ast.Div : '/',
ast.FloorDiv: '//',
ast.Pow : '**',
ast.LShift : '<<',
ast.RShift : '>>',
ast.BitOr : '|',
ast.BitAnd : '&',
ast.BitXor : '^',
ast.Lt : '<',
ast.LtE : '<=',
ast.Gt : '>',
ast.GtE : '>=',
ast.Eq : '==',
ast.NotEq : '!=',
ast.Is : 'is',
ast.IsNot : 'is_not',
ast.In : 'in',
ast.NotIn : 'not_in',
ast.And : 'and',
ast.Or : 'or',
}
ast_to_unary_operator = {
ast.Not : 'not',
ast.Invert : '~',
ast.USub : '-',
ast.UAdd : '+',
}
class NotConstExprError(Exception):
pass
class ConstantExprRecognizer(ast.NodeVisitor):
def __init__(self, const_name_set):
self.const_name_set = const_name_set
def visit_BinOp(self, node):
if not(self.visit(node.left) and self.visit(node.right)):
raise NotConstExprError
def visit_BoolOp(self, node):
if not all(self.visit(x) for x in node.values):
raise NotConstExprError
def visit_Compare(self, node):
if not(node.left and all(self.visit(x) for x in node.comparators)):
raise NotConstExprError
def generic_visit(self, node):
if not is_constant(node, self.const_name_set):
raise NotConstExprError
def __call__(self, node):
try:
self.visit(node)
except NotConstExprError as e:
return False
else:
return True
class ConstantMarker(visitors.NumbaVisitor):
'''A conservative constant marker. Conservative because we handle the
simplest cases only.
'''
def __init__(self, *args, **kws):
super(ConstantMarker, self).__init__(*args, **kws)
self._candidates = {} # variable name -> value (rhs) node
self._invalidated = set()
def visit_Assign(self, node):
targets = []
for target in node.targets:
targets.extend(self._flatten_aggregate(target))
# targets contains tuple/list
not_handled = len(node.targets) != len(targets)
for target in targets:
try:
name = target.id
except AttributeError:
# Only for assignment into simple name on the LHS
pass
else:
if name not in self._invalidated:
if not_handled or name in self._candidates:
self._invalidate(name)
else:
self._candidates[name] = node.value
def visit_AugAssign(self, node):
try:
name = node.target.id
except AttributeError:
# Only for assignment into simple name on the LHS
pass
else:
if name in self._candidates:
self._invalidate(name)
def visit_For(self, node):
targets = self._flatten_aggregate(node.target)
for t in targets:
self._invalidate(t.id)
for instr in node.body:
self.visit(instr)
def _flatten_aggregate(self, node):
assert isinstance(node.ctx, ast.Store)
if isinstance(node, ast.Tuple) or isinstance(node, ast.List):
ret = []
for i in node.elts:
ret.extend(self._flatten_aggregate(i))
return ret
else:
return [node]
def _invalidate(self, name):
self._invalidated.add(name)
try:
del self._candidates[name]
except KeyError:
pass
def get_constants(self):
'''Return a set of constant variable names
'''
const_names = set(self.varnames).difference(self._invalidated)
const_names |= set(self.func_globals)
constexpr_recognizer = ConstantExprRecognizer(const_names)
retvals = []
for k, v in self._candidates.items():
if constexpr_recognizer(v):
retvals.append(k)
return set(retvals)
class ConstantFolder(visitors.NumbaTransformer):
'''Perform constant folding on AST.
NOTE: Forbids assignment to True, False, None.
'''
def __init__(self, *args, **kws):
assert not hasattr(self, 'constvars') # not overwriting
assert not hasattr(self, 'constvalues') # not overwriting
self.constvars = kws.pop('constvars')
self.constvalues = {}
super(ConstantFolder, self).__init__(*args, **kws)
def visit_BinOp(self, node):
lval = node.left = self.visit(node.left)
rval = node.right = self.visit(node.right)
if self.is_constant(lval) and self.is_constant(rval):
return self.eval_binary_operation(node.op, lval, rval)
else:
return node
def visit_BoolOp(self, node):
values = node.values = [self.visit(nd) for nd in node.values]
if all(self.is_constant(v) for v in values):
operation = lambda x, y: self.eval_binary_operation(node.op, x, y)
return reduce(operation, values)
else:
return node
def visit_Compare(self, node):
left = node.left = self.visit(node.left)
comparators = node.comparators = [self.visit(nd)
for nd in node.comparators]
operands = [left] + comparators
operators = iter(reversed(node.ops))
def operation(x, y):
op = next(operators)
return self.eval_binary_operation(op, x, y)
if all(self.is_constant(nd) for nd in operands):
return reduce(operation, operands)
else:
return node
def visit_Assign(self, node):
'''Store the rhs value so we can inline them in future reference.
TODO: Remove assignment of constant
'''
# FIXME: does not handle assign to tuple
names = []
value = node.value = self.visit(node.value)
for left in node.targets:
try:
name = left.id
except AttributeError:
return node # escape
else:
names.append(name)
ct = 0
for name in names:
if name in self.constvars: # is known constant
assert name not in self.constvalues
self.constvalues[name] = value
self.constvars.remove(name)
ct += 1
return node
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load) and self.is_constant(node):
try:
return self.constvalues[node.id]
except KeyError:
val = self.func_globals.get(node.id)
if val and is_simple_value(val):
if isinstance(val, (int, long, float)):
return ast.Num(n=val)
elif isinstance(val, bool):
name = 'True' if val else 'False'
return ast.Name(id=name, ctx=ast.Load())
return node
def eval_binary_operation(self, op, left, right):
'''Evaluate the constant expression and return a ast.Num instance
containing the result.
'''
operator = ast_to_binary_operator[type(op)]
func = compile_time_binary_operators[operator]
ret = func(self.valueof(left), self.valueof(right))
if ret is True:
node = ast.Name(id='True', ctx=ast.Load())
elif ret is False:
node = ast.Name(id='False', ctx=ast.Load())
elif ret is None:
node = ast.Name(id='None', ctx=ast.Load())
else:
node = ast.Num(n=ret)
return ast.copy_location(node, left)
def valueof(self, node):
'''Get constant value from AST node.
'''
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Name) and isinstance(node.ctx, ast.Load):
if node.id == 'True':
return True
elif node.id == 'False':
return False
elif node.id == 'None':
return None
elif node.id in self.constvalues:
return self.valueof(self.constvalues[node.id])
else:
value = self.func_globals[node.id]
if not is_simple_value(value):
raise ValueError("%s is not a simple value.")
return value
raise ValueError("node %s is not a has constant value" % node)
def is_constant(self, node):
globals = set(self.func_globals).difference(self.local_names)
return is_constant(node, globals | set(self.constvalues))
def is_constant(node, constants=set()):
if isinstance(node, ast.Num):
return True
elif isinstance(node, ast.Name) and isinstance(node.ctx, ast.Load):
if node.id in ['True', 'False', 'None']:
return True
elif node.id in constants:
return True
return False
def is_simple_value(value):
return ( isinstance(value, int)
or isinstance(value, long)
or isinstance(value, float)
or value is True
or value is False
or value is None)
########NEW FILE########
__FILENAME__ = closures
# -*- coding: utf-8 -*-
"""
This module provides support for closures and inner functions.
@autojit
def outer():
a = 10 # this is a cellvar
@jit('void()')
def inner():
print a # this is a freevar
inner()
a = 12
return inner
The 'inner' function closes over the outer scope. Each function with
cellvars packs them into a heap-allocated structure, the closure scope.
The closure scope is passed into 'inner' when called from within outer.
The execution of 'def' creates a NumbaFunction, which has itself as the
m_self attribute. So when 'inner' is invoked from Python, the numba
wrapper function gets called with NumbaFunction object and the args
tuple. The closure scope is then set in NumbaFunction.func_closure.
The closure scope is an extension type with the cellvars as attributes.
Closure scopes are chained together, since multiple inner scopes may need
to share a single outer scope. E.g.
def outer(a):
def inner(b):
def inner_inner():
print a, b
return inner_inner
return inner(1), inner(2)
We have three live closure scopes here:
scope_outer = { 'a': a } # call to 'outer'
scope_inner_1 = { 'scope_outer': scope_outer, 'b': 1 } # call to 'inner' with b=1
scope_inner_2 = { 'scope_outer': scope_outer, 'b': 2 } # call to 'inner' with b=2
Function 'inner_inner' defines no new scope, since it contains no cellvars.
But it does contain a freevar from scope_outer and scope_inner, so it gets
scope_inner passed as first argument. scope_inner has a reference to scope
outer, so all variables can be resolved.
These scopes are instances of a numba extension class.
"""
from __future__ import print_function, division, absolute_import
import ast
import ctypes
import logging
import numba.decorators
from numba import *
from numba import error
from numba import visitors
from numba import nodes
from numba import typesystem
from numba import typedefs
from numba import numbawrapper
from numba.exttypes import extension_types
from numba import utils
from numba.type_inference import module_type_inference
from numba.symtab import Variable
from numba.exttypes import attributetable
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
def is_closure_signature(func_signature):
return (func_signature is not None and
func_signature.args and
func_signature.args[0].is_closure_scope)
#------------------------------------------------------------------------
# Closure Signature Validation (type inference of outer function)
#------------------------------------------------------------------------
# Handle closures during type inference. Mostly performs error checking
# for closure signatures.
def err_decorator(decorator):
raise error.NumbaError(
decorator, "Only @jit and @autojit and signature decorators "
"are supported")
def check_valid_argtype(argtype_node, argtype):
if not isinstance(argtype, typesystem.Type):
raise error.NumbaError(argtype_node, "Invalid type: %r" % (argtype,))
def assert_constant(visit_func, decorator, result_node):
result = visit_func(result_node)
if not result.variable.is_constant:
raise error.NumbaError(decorator, "Expected a constant")
return result.variable.constant_value
def parse_argtypes(visit_func, decorator, func_def, jit_args):
argtypes_node = jit_args['argtypes']
if argtypes_node is None:
raise error.NumbaError(func_def.args[0],
"Expected an argument type")
argtypes = assert_constant(visit_func, decorator, argtypes_node)
if not isinstance(argtypes, (list, tuple)):
raise error.NumbaError(argtypes_node,
'Invalid argument for argtypes')
for argtype in argtypes:
check_valid_argtype(argtypes_node, argtype)
return argtypes
def parse_restype(visit_func, decorator, jit_args):
restype_node = jit_args['restype']
if restype_node is not None:
restype = assert_constant(visit_func, decorator, restype_node)
if isinstance(restype, (str, unicode)):
signature = utils.process_signature(restype)
restype = signature.return_type
argtypes = signature.args
check_valid_argtype(restype_node, restype)
for argtype in argtypes:
check_valid_argtype(restype_node, argtype)
restype = restype(*argtypes)
else:
check_valid_argtype(restype_node, restype)
else:
raise error.NumbaError(restype_node, "Return type expected")
return restype
def handle_jit_decorator(visit_func, func_def, decorator):
jit_args = module_type_inference.parse_args(
decorator, ['restype', 'argtypes', 'backend',
'target', 'nopython'])
if decorator.args or decorator.keywords:
restype = parse_restype(visit_func, decorator, jit_args)
if restype is not None and restype.is_function:
signature = restype
else:
argtypes = parse_argtypes(visit_func, decorator, func_def, jit_args)
signature = typesystem.function(restype, argtypes,
name=func_def.name)
else: #elif func_def.args:
raise error.NumbaError(decorator,
"The argument types and return type "
"need to be specified")
# TODO: Analyse closure at call or outer function return time to
# TODO: infer return type
# TODO: parse out nopython argument
return signature
def check_signature_decorator(visit_func, decorator):
dec = visit_func(decorator)
type = dec.variable.type
if type.is_cast and type.dst_type.is_function:
return type.dst_type
else:
err_decorator(decorator)
def process_decorators(env, visit_func, node):
if not node.decorator_list:
func_env = env.translation.get_env(node)
if func_env:
return func_env.func_signature
raise error.NumbaError(
node, "Closure must be decorated with 'jit' or 'autojit'")
if len(node.decorator_list) > 1:
raise error.NumbaError(
node, "Only one decorator may be specified for "
"closure (@jit/@autojit)")
decorator, = node.decorator_list
if isinstance(decorator, ast.Name):
decorator_name = decorator.id
elif (not isinstance(decorator, ast.Call) or not
isinstance(decorator.func, ast.Name)):
err_decorator(decorator)
else:
decorator_name = decorator.func.id
if decorator_name not in ('jit', 'autojit'):
signature = check_signature_decorator(visit_func, decorator)
else:
if decorator_name == 'autojit':
raise error.NumbaError(
decorator, "Dynamic closures not yet supported, use @jit")
signature = handle_jit_decorator(visit_func, node, decorator)
del node.decorator_list[:]
if len(signature.args) != len(node.args.args):
raise error.NumbaError(
decorator,
"Expected %d arguments type(s), got %d" % (
len(signature.args), len(node.args.args)))
return signature
#------------------------------------------------------------------------
# Closure Type Inference
#------------------------------------------------------------------------
def outer_scope_field(scope_type):
return scope_type.attribute_table.to_struct().fields[0]
def lookup_scope_attribute(cur_scope, var_name, ctx=None):
"""
Look up a variable in the closure scope
"""
ctx = ctx or ast.Load()
scope_type = cur_scope.type
outer_scope_name, outer_scope_type = outer_scope_field(scope_type)
if var_name in scope_type.attributedict:
return nodes.ExtTypeAttribute.from_known_attribute(
value=cur_scope, attr=var_name, ctx=ctx, ext_type=scope_type)
elif outer_scope_type.is_closure_scope:
scope = nodes.ExtTypeAttribute.from_known_attribute(
value=cur_scope, attr=outer_scope_name, ctx=ctx, ext_type=scope_type)
try:
return lookup_scope_attribute(scope, var_name, ctx)
except error.InternalError as e:
# Re-raise with full scope type
pass
# This indicates a bug
raise error.InternalError(
scope_type, "Unable to look up attribute", var_name)
CLOSURE_SCOPE_ARG_NAME = '__numba_closure_scope'
class ClosureTransformer(visitors.NumbaTransformer):
@property
def outer_scope(self):
outer_scope = None
if CLOSURE_SCOPE_ARG_NAME in self.symtab:
outer_scope = ast.Name(id=CLOSURE_SCOPE_ARG_NAME, ctx=ast.Load())
outer_scope.variable = self.symtab[CLOSURE_SCOPE_ARG_NAME]
outer_scope.type = outer_scope.variable.type
return outer_scope
class ClosureTypeInferer(ClosureTransformer):
"""
Runs just after type inference after the outer variables types are
resolved.
1) run type inferencer on inner functions
2) build scope extension types pre-order
3) generate nodes to instantiate scope extension type at call time
"""
def __init__(self, *args, **kwargs):
super(ClosureTypeInferer, self).__init__(*args, **kwargs)
self.warn = kwargs["warn"]
def visit_FunctionDef(self, node):
if node.closure_scope is None:
# Process inner functions and determine cellvars and freevars
# codes = [c for c in self.constants
# if isinstance(c, types.CodeType)]
process_closures(self.env, node, self.symtab,
func_globals=self.func_globals,
closures=self.closures,
warn=self.warn)
# cellvars are the variables we own
cellvars = dict((name, var) for name, var in self.symtab.iteritems()
if var.is_cellvar)
node.cellvars = cellvars
logger.debug("Cellvars in function %s: %s", node.name, cellvars)
outer_scope = self.outer_scope
if outer_scope:
outer_scope_type = outer_scope.type
else:
outer_scope_type = None
if not cellvars:
# No cellvars, so use parent closure scope if this is a closure
if outer_scope:
self.update_closures(node, outer_scope_type, None)
return self.visit_func_children(node)
# Create closure scope extension type
cellvar_fields = [(name, var.type)
for name, var in cellvars.iteritems()]
fields = numba.struct(cellvar_fields).fields
if outer_scope:
fields.insert(0, ('__numba_base_scope', outer_scope_type))
class py_class(object):
pass
func_name = self.func_name
py_class.__name__ = '%s_scope' % func_name
scope_type = typesystem.ClosureScopeType(py_class, outer_scope_type)
scope_type.unmangled_symtab = dict(fields)
AttrTable = attributetable.AttributeTable
scope_type.attribute_table = AttrTable.from_list(py_class=None,
attributes=fields)
ext_type = extension_types.create_new_extension_type(
type, func_name , (object,), {}, scope_type, None)
# Instantiate closure scope
logger.debug("Generate closure %s %s %s", node.name, scope_type,
outer_scope)
cellvar_scope = nodes.InstantiateClosureScope(
node, ext_type, scope_type, outer_scope)
node.body.insert(0, cellvar_scope)
self.update_closures(node, scope_type, ext_type)
return self.visit_func_children(node)
def update_closures(self, func_def, scope_type, ext_type):
"""
Patch closures to get the closure scope as the first argument.
"""
for closure in func_def.closures:
# closure.scope_type = scope_type
closure.func_def.scope_type = scope_type
closure.ext_type = ext_type
# patch function parameters
param = ast.Name(id=CLOSURE_SCOPE_ARG_NAME, ctx=ast.Param())
param.variable = Variable(scope_type, is_local=True)
param.type = param.variable.type
closure.symtab[CLOSURE_SCOPE_ARG_NAME] = param.variable
closure.func_def.args.args.insert(0, param)
closure.need_closure_scope = True
# patch closure signature
closure.type.add_scope_arg(scope_type)
closure.func_env.func_signature = closure.type.signature
def get_locals(symtab):
return dict((name, var) for name, var in symtab.iteritems()
if var.is_local)
def process_closures(env, outer_func_def, outer_symtab, **kwds):
"""
Process closures recursively and for each variable in each function
determine whether it is a freevar, a cellvar, a local or otherwise.
"""
import numba.pipeline
outer_symtab = get_locals(outer_symtab)
# closure_scope is set on the FunctionDef by TypeInferer
if outer_func_def.closure_scope is not None:
closure_scope = dict(outer_func_def.closure_scope, **outer_symtab)
else:
closure_scope = outer_symtab
for closure in outer_func_def.closures:
logger.debug("process closures: %s %s", outer_func_def.name,
closure.func_def.name)
closure_py_func = None # closure.py_func
func_env, _ = numba.pipeline.run_pipeline2(
env,
closure_py_func,
closure.func_def,
closure.type.signature,
closure_scope=closure_scope,
function_globals=env.translation.crnt.function_globals,
pipeline_name='type_infer',
is_closure=True,
**kwds)
closure.func_env = func_env
closure.symtab = func_env.symtab
env.translation.push_env(func_env)
process_closures(env, closure.func_def, func_env.symtab, **kwds)
env.translation.pop()
#------------------------------------------------------------------------
# Closure Lowering
#------------------------------------------------------------------------
class ClosureSpecializer(ClosureTransformer):
"""
Lowering of closure instantiation and calling.
- Instantiates the closure scope and makes the necessary assignments
- Rewrites local variable accesses to accesses on the instantiated scope
- Instantiate function with closure scope
Also rewrite calls to closures.
"""
def __init__(self, *args, **kwargs):
super(ClosureSpecializer, self).__init__(*args, **kwargs)
if not self.ast.cellvars:
self.ast.cur_scope = self.outer_scope
def _load_name(self, var_name, is_cellvar=False):
src = ast.Name(var_name, ast.Load())
src.variable = Variable.from_variable(self.symtab[var_name])
src.variable.is_cellvar = is_cellvar
src.type = src.variable.type
return src
def visit_InstantiateClosureScope(self, node):
"""
Instantiate a closure scope.
After instantiation, assign the parent scope and all function
arguments that belong in the scope to the scope.
"""
ctor = nodes.objconst(node.ext_type.__new__)
ext_type_arg = nodes.objconst(node.ext_type)
create_scope = nodes.ObjectCallNode(
signature=node.scope_type(object_), func=ctor,
args=[ext_type_arg])
create_scope = create_scope.cloneable
scope = create_scope.clone
stats = [create_scope]
# Chain outer scope - if present - to current scope
outer_scope = self.outer_scope
if outer_scope:
outer_scope_name, outer_scope_type = outer_scope_field(scope.type)
dst = lookup_scope_attribute(scope, outer_scope_name,
ctx=ast.Store())
assmt = ast.Assign(targets=[dst], value=outer_scope)
stats.append(assmt)
# Assign function arguments that are cellvars
for arg in self.ast.args.args:
name = arg.id
if name in node.scope_type.unmangled_symtab:
dst = lookup_scope_attribute(scope, name, ast.Store())
src = self._load_name(name)
src.variable.assign_in_closure_scope = True
assmt = ast.Assign(targets=[dst], value=src)
stats.append(assmt)
logger.debug("instantiating %s", scope.type)
self.ast.cur_scope = scope
return self.visit(nodes.ExpressionNode(stmts=stats, expr=scope))
def get_qname(self, closure_node):
ns = '.'.join([self.module_name, self.func_name])
closure_name = closure_node.name
qname = "%s.__closure__.%s" % (ns, closure_name)
return qname
def visit_ClosureNode(self, node):
"""
Compile the inner function.
"""
# Compile closure, skip CFA and type inference
node.func_env.qualified_name = self.get_qname(node)
numba.pipeline.run_env(self.env, node.func_env,
pipeline_name='compile')
translator = node.func_env.translator
# translator.link()
node.lfunc = translator.lfunc
node.lfunc_pointer = translator.lfunc_pointer
if node.need_numba_func:
return self.create_numba_function(node, node.func_env)
else:
func_name = node.func_def.name
self.symtab[func_name] = Variable(name=func_name, type=node.type,
is_local=True)
# return nodes.LLVMValueRefNode(node.type, node.lfunc)
# TODO: Remove assignment altogether!
# return nodes.NoneNode()
return nodes.ObjectInjectNode(None, type=object_)
def create_numba_function(self, node, func_env):
from numba.codegen import llvmwrapper
closure_scope = self.ast.cur_scope
if closure_scope is None:
closure_scope = nodes.NULL
scope_type = void.pointer()
else:
assert node.func_def.args.args[0].variable.type
scope_type = closure_scope.type
self.env.translation.push_env(func_env)
try:
node.wrapper_func, node.wrapper_lfunc, methoddef = (
llvmwrapper.build_wrapper_function(self.env))
finally:
self.env.translation.pop()
# Keep methoddef alive
# assert methoddef in node.py_func.live_objects
modname = self.module_name
self.keep_alive(modname)
# Create function signature with closure scope at runtime
create_numbafunc_signature = node.type(
void.pointer(), # PyMethodDef *ml
object_, # PyObject *module
void.pointer(), # PyObject *code
scope_type, # PyObject *closure
void.pointer(), # void *native_func
object_, # PyObject *native_signature
object_, # PyObject *keep_alive
)
# Create function with closure scope at runtime
create_numbafunc = nodes.ptrfromint(
numbawrapper.NumbaFunction_NewEx_pointer,
create_numbafunc_signature.pointer())
methoddef_p = ctypes.cast(ctypes.byref(methoddef),
ctypes.c_void_p).value
args = [
nodes.const(methoddef_p, void.pointer()),
nodes.const(modname, object_),
nodes.NULL,
closure_scope,
nodes.const(node.lfunc_pointer, void.pointer()),
nodes.const(node.type.signature, object_),
nodes.NULL, # nodes.const(node.py_func, object_),
]
func_call = nodes.NativeFunctionCallNode(
signature=create_numbafunc_signature,
function_node=create_numbafunc,
args=args)
result = func_call
#stats = [nodes.inject_print(nodes.const("calling...", c_string_type)),
# result]
#result = ast.Suite(body=stats)
result = self.visit(result)
return result
def visit_Name(self, node):
"Resolve cellvars and freevars"
is_param = isinstance(node.ctx, ast.Param)
if not is_param and (node.variable.is_cellvar or
node.variable.is_freevar):
logger.debug("Function %s, lookup %s in scope %s: %s",
self.ast.name, node.id, self.ast.cur_scope.type,
self.ast.cur_scope.type.attribute_table)
attr = lookup_scope_attribute(self.ast.cur_scope,
var_name=node.id, ctx=node.ctx)
return self.visit(attr)
else:
return node
def retrieve_closure_from_numbafunc(self, node):
"""
Retrieve the closure scope from ((NumbaFunctionObject *)
numba_func).func_closure
"""
# TODO: use llvmwrapper.get_closure_scope()
pointer = nodes.ptrfromobj(node.func)
type = typedefs.NumbaFunctionObject.ref()
closure_obj_struct = nodes.CoercionNode(pointer, type)
cur_scope = nodes.StructAttribute(closure_obj_struct, 'func_closure',
ctx=ast.Load(), type=type)
return cur_scope
def visit_ClosureCallNode(self, node):
if node.closure_type.closure.need_closure_scope:
if (self.ast.cur_scope is None or
self.ast.cur_scope.type != node.closure_type):
# Call to closure from outside outer function
# TODO: optimize calling a closure from an inner function, e.g.
# def outer():
# a = 1
# def inner1(): print a
# def inner2(): inner1()
cur_scope = self.retrieve_closure_from_numbafunc(node)
else:
# Call to closure from within outer function
cur_scope = self.ast.cur_scope
# node.args[0] = cur_scope
node.args.insert(0, cur_scope)
self.generic_visit(node)
return node
def visit_ClosureScopeLoadNode(self, node):
return self.ast.cur_scope or nodes.NULL_obj
########NEW FILE########
__FILENAME__ = codeutils
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm
def llvm_alloca(lfunc, builder, ltype, name='', change_bb=True):
"Use alloca only at the entry bock of the function"
if change_bb:
bb = builder.basic_block
builder.position_at_beginning(lfunc.get_entry_basic_block())
lstackvar = builder.alloca(ltype, name=name)
if change_bb:
builder.position_at_end(bb)
return lstackvar
def if_badval(translator, llvm_result, badval, callback,
cmp=llvm.core.ICMP_EQ, name='cleanup'):
# Use llvm_cbuilder :(
b = translator.builder
bb_true = translator.append_basic_block('%s.if.true' % name)
bb_endif = translator.append_basic_block('%s.if.end' % name)
test = b.icmp(cmp, llvm_result, badval)
b.cbranch(test, bb_true, bb_endif)
b.position_at_end(bb_true)
callback(b, bb_true, bb_endif)
# b.branch(bb_endif)
b.position_at_end(bb_endif)
return llvm_result
########NEW FILE########
__FILENAME__ = coerce
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm
from numba import *
from numba import string_ as c_string_type
from numba import nodes
from numba.typesystem import is_obj, promote_to_native
from numba.codegen.codeutils import llvm_alloca, if_badval
from numba.codegen import debug
class ObjectCoercer(object):
"""
Object that knows how to convert to/from objects using Py_BuildValue
and PyArg_ParseTuple.
"""
# TODO: do all of this in a specializer
type_to_buildvalue_str = {
char: "c",
short: "h",
int_: "i",
long_: "l",
longlong: "L",
Py_ssize_t: "n",
npy_intp: "n", # ?
size_t: "n", # ?
uchar: "B",
ushort: "H",
uint: "I",
ulong: "k",
ulonglong: "K",
float_: "f",
double: "d",
complex128: "D",
object_: "O",
bool_: "b", # ?
char.pointer(): "s",
char.pointer() : "s",
c_string_type: "s",
}
def __init__(self, translator):
self.context = translator.context
self.translator = translator
self.builder = translator.builder
self.llvm_module = self.builder.basic_block.function.module
sig, self.py_buildvalue = self.context.external_library.declare(
self.llvm_module, 'Py_BuildValue')
sig, self.pyarg_parsetuple = self.context.external_library.declare(
self.llvm_module, 'PyArg_ParseTuple')
sig, self.pyerr_clear = self.context.external_library.declare(
self.llvm_module, 'PyErr_Clear')
self.function_cache = translator.function_cache
self.NULL = self.translator.visit(nodes.NULL_obj)
def check_err(self, llvm_result, callback=None, cmp=llvm.core.ICMP_EQ,
pos_node=None):
"""
Check for errors. If the result is NULL, and error should have been set
Jumps to translator.error_label if an exception occurred.
"""
assert llvm_result.type.kind == llvm.core.TYPE_POINTER, llvm_result.type
int_result = self.translator.builder.ptrtoint(llvm_result,
llvm_types._intp)
NULL = llvm.core.Constant.int(int_result.type, 0)
if callback:
if_badval(self.translator, int_result, NULL,
callback=callback or default_callback, cmp=cmp)
else:
test = self.builder.icmp(cmp, int_result, NULL)
name = 'no_error'
if hasattr(pos_node, 'lineno'):
name = 'no_error_%s' % error.format_pos(pos_node).rstrip(": ")
bb = self.translator.append_basic_block(name)
self.builder.cbranch(test, self.translator.error_label, bb)
self.builder.position_at_end(bb)
return llvm_result
def check_err_int(self, llvm_result, badval):
llvm_badval = llvm.core.Constant.int(llvm_result.type, badval)
if_badval(self.translator, llvm_result, llvm_badval,
callback=lambda b, *args: b.branch(self.translator.error_label))
def _create_llvm_string(self, str):
return self.translator.visit(nodes.ConstNode(str, char.pointer()))
def lstr(self, types, fmt=None):
"Get an llvm format string for the given types"
typestrs = []
result_types = []
for type in types:
if is_obj(type):
type = object_
elif type.is_int:
type = promote_to_native(type)
result_types.append(type)
typestrs.append(self.type_to_buildvalue_str[type])
str = "".join(typestrs)
if fmt is not None:
str = fmt % str
if debug.debug_conversion:
self.translator.puts("fmt: %s" % str)
result = self._create_llvm_string(str)
return result_types, result
def buildvalue(self, types, *largs, **kwds):
# The caller should check for errors using check_err or by wrapping
# its node in an ObjectTempNode
name = kwds.get('name', '')
fmt = kwds.get('fmt', None)
types, lstr = self.lstr(types, fmt)
largs = (lstr,) + largs
if debug.debug_conversion:
self.translator.puts("building... %s" % name)
# func_type = object_(*types).pointer()
# py_buildvalue = self.builder.bitcast(
# self.py_buildvalue, func_type.to_llvm(self.context))
py_buildvalue = self.py_buildvalue
result = self.builder.call(py_buildvalue, largs, name=name)
if debug.debug_conversion:
self.translator.puts("done building... %s" % name)
nodes.print_llvm(self.translator.env, object_, result)
self.translator.puts("--------------------------")
return result
def npy_intp_to_py_ssize_t(self, llvm_result, type):
return llvm_result, type
def py_ssize_t_to_npy_intp(self, llvm_result, type):
return llvm_result, type
def convert_single_struct(self, llvm_result, type):
types = []
largs = []
for i, (field_name, field_type) in enumerate(type.fields):
types.extend((c_string_type, field_type))
largs.append(self._create_llvm_string(field_name))
struct_attr = self.builder.extract_value(llvm_result, i)
largs.append(struct_attr)
return self.buildvalue(types, *largs, name='struct', fmt="{%s}")
def convert_single(self, type, llvm_result, name=''):
"Generate code to convert an LLVM value to a Python object"
llvm_result, type = self.npy_intp_to_py_ssize_t(llvm_result, type)
if type.is_struct:
return self.convert_single_struct(llvm_result, type)
elif type.is_complex:
# We have a Py_complex value, construct a Py_complex * temporary
new_result = llvm_alloca(self.translator.lfunc, self.builder,
llvm_result.type, name='complex_temp')
self.builder.store(llvm_result, new_result)
llvm_result = new_result
return self.buildvalue([type], llvm_result, name=name)
def build_tuple(self, types, llvm_values):
"Build a tuple from a bunch of LLVM values"
assert len(types) == len(llvm_values)
return self.buildvalue(lstr, *llvm_values, name='tuple', fmt="(%s)")
def build_list(self, types, llvm_values):
"Build a tuple from a bunch of LLVM values"
assert len(types) == len(llvm_values)
return self.buildvalue(types, *llvm_values, name='list', fmt="[%s]")
def build_dict(self, key_types, value_types, llvm_keys, llvm_values):
"Build a dict from a bunch of LLVM values"
types = []
largs = []
for k, v, llvm_key, llvm_value in zip(key_types, value_types,
llvm_keys, llvm_values):
types.append(k)
types.append(v)
largs.append(llvm_key)
largs.append(llvm_value)
return self.buildvalue(types, *largs, name='dict', fmt="{%s}")
def parse_tuple(self, lstr, llvm_tuple, types, name=''):
"Unpack a Python tuple into typed llvm variables"
lresults = []
for i, type in enumerate(types):
var = llvm_alloca(self.translator.lfunc, self.builder,
type.to_llvm(self.context),
name=name and "%s%d" % (name, i))
lresults.append(var)
largs = [llvm_tuple, lstr] + lresults
if debug.debug_conversion:
self.translator.puts("parsing tuple... %s" % (types,))
nodes.print_llvm(self.translator.env, object_, llvm_tuple)
parse_result = self.builder.call(self.pyarg_parsetuple, largs)
self.check_err_int(parse_result, 0)
# Some conversion functions don't reset the exception state...
# self.builder.call(self.pyerr_clear, [])
if debug.debug_conversion:
self.translator.puts("successfully parsed tuple...")
return [self.builder.load(result) for result in lresults]
def to_native(self, type, llvm_tuple, name=''):
"Generate code to convert a Python object to an LLVM value"
types, lstr = self.lstr([type])
lresult, = self.parse_tuple(lstr, llvm_tuple, [type], name=name)
return lresult
########NEW FILE########
__FILENAME__ = complexsupport
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm.core
from numba import *
class ComplexSupportMixin(object):
"Support for complex numbers"
def _generate_complex_op(self, op, arg1, arg2):
(r1, i1), (r2, i2) = self._extract(arg1), self._extract(arg2)
real, imag = op(r1, i1, r2, i2)
return self._create_complex(real, imag)
def _extract(self, value):
"Extract the real and imaginary parts of the complex value"
return (self.builder.extract_value(value, 0),
self.builder.extract_value(value, 1))
def _create_complex(self, real, imag):
assert real.type == imag.type, (str(real.type), str(imag.type))
complex = llvm.core.Constant.undef(llvm.core.Type.struct([real.type,
real.type]))
complex = self.builder.insert_value(complex, real, 0)
complex = self.builder.insert_value(complex, imag, 1)
return complex
def _promote_complex(self, src_type, dst_type, value):
"Promote a complex value to value with a larger or smaller complex type"
real, imag = self._extract(value)
if dst_type.is_complex:
dst_type = dst_type.base_type
dst_ltype = dst_type.to_llvm(self.context)
real = self.caster.cast(real, dst_ltype)
imag = self.caster.cast(imag, dst_ltype)
return self._create_complex(real, imag)
def _complex_add(self, arg1r, arg1i, arg2r, arg2i):
return (self.builder.fadd(arg1r, arg2r),
self.builder.fadd(arg1i, arg2i))
def _complex_sub(self, arg1r, arg1i, arg2r, arg2i):
return (self.builder.fsub(arg1r, arg2r),
self.builder.fsub(arg1i, arg2i))
def _complex_mul(self, arg1r, arg1i, arg2r, arg2i):
return (self.builder.fsub(self.builder.fmul(arg1r, arg2r),
self.builder.fmul(arg1i, arg2i)),
self.builder.fadd(self.builder.fmul(arg1i, arg2r),
self.builder.fmul(arg1r, arg2i)))
def _complex_div(self, arg1r, arg1i, arg2r, arg2i):
divisor = self.builder.fadd(self.builder.fmul(arg2r, arg2r),
self.builder.fmul(arg2i, arg2i))
return (self.builder.fdiv(
self.builder.fadd(self.builder.fmul(arg1r, arg2r),
self.builder.fmul(arg1i, arg2i)),
divisor),
self.builder.fdiv(
self.builder.fsub(self.builder.fmul(arg1i, arg2r),
self.builder.fmul(arg1r, arg2i)),
divisor))
def _complex_floordiv(self, arg1r, arg1i, arg2r, arg2i):
real, imag = self._complex_div(arg1r, arg1i, arg2r, arg2i)
long_type = long_.to_llvm(self.context)
real = self.caster.cast(real, long_type, unsigned=False)
imag = self.caster.cast(imag, long_type, unsigned=False)
real = self.caster.cast(real, arg1r.type, unsigned=False)
imag = self.caster.cast(imag, arg1r.type, unsigned=False)
return real, imag
########NEW FILE########
__FILENAME__ = datetimesupport
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm.core
from numba import *
class DateTimeSupportMixin(object):
"Support for datetimes"
def _generate_datetime_op(self, op, arg1, arg2):
(timestamp1, units1), (timestamp2, units2) = \
self._extract_datetime(arg1), self._extract_datetime(arg2)
timestamp, units = op(timestamp1, units1, timestamp2, units2)
return self._create_datetime(timestamp, units)
def _extract_datetime(self, value):
"Extract the parts of the datetime"
return (self.builder.extract_value(value, 0),
self.builder.extract_value(value, 1))
def _extract_timedelta(self, value):
return (self.builder.extract_value(value, 0),
self.builder.extract_value(value, 1))
def _promote_datetime(self, src_type, dst_type, value):
"Promote a datetime value to value with a larger or smaller datetime type"
timestamp, units = self._extract_datetime(value)
dst_ltype = dst_type.to_llvm(self.context)
timestamp = self.caster.cast(timestamp, dst_type.subtypes[0])
units = self.caster.cast(units, dst_type.subtypes[1])
return self._create_datetime(timestamp, units)
def _promote_timedelta(self, src_type, dst_type, value):
diff, units = self._extract_timedelta(value)
dst_ltype = dst_type.to_llvm(self.context)
diff = self.caster.cast(diff, dst_type.subtypes[0])
units = self.caster.cast(units, dst_type.subtypes[1])
return self._create_timedelta(diff, units)
def _create_datetime(self, timestamp, units):
datetime = llvm.core.Constant.undef(llvm.core.Type.struct([timestamp.type,
units.type]))
datetime = self.builder.insert_value(datetime, timestamp, 0)
datetime = self.builder.insert_value(datetime, units, 1)
return datetime
def _create_timedelta(self, diff, units):
timedelta_struct = llvm.core.Constant.undef(llvm.core.Type.struct([diff.type,
units.type]))
timedelta_struct = self.builder.insert_value(timedelta_struct, diff, 0)
timedelta_struct = self.builder.insert_value(timedelta_struct, units, 1)
return timedelta_struct
########NEW FILE########
__FILENAME__ = debug
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import logging
logger = logging.getLogger(__name__)
debug_conversion = False
#logger.setLevel(logging.DEBUG)
#debug_conversion = True
########NEW FILE########
__FILENAME__ = globalconstants
import llvm.core
class LLVMConstantsManager(object):
"""
Manage global constants. The result should be linked into the consumer
LLVM module.
"""
def __init__(self):
self.module = llvm.core.Module.new("numba_constants")
# py_constant -> llvm value
self.constant_values = {}
def link(self, dst_module):
dst_module.link_in(self.module, preserve=True)
def get_string_constant(self, const_str):
if const_str in self.constant_values:
ret_val = self.constant_values[const_str]
else:
lconst_str = llvm.core.Constant.stringz(const_str)
ret_val = self.module.add_global_variable(
lconst_str.type, "__STR_%d" % (len(self.constant_values),))
ret_val.linkage = llvm.core.LINKAGE_LINKONCE_ODR
ret_val.initializer = lconst_str
ret_val.global_constant = True
self.constant_values[const_str] = ret_val
return ret_val
########NEW FILE########
__FILENAME__ = llvmcontext
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm
import llvm.core as lc
import llvm.passes as lp
import llvm.ee as le
from numba import *
from numba import nodes
from numba.typesystem import is_obj, promote_to_native
from numba.codegen.codeutils import llvm_alloca, if_badval
from numba.codegen.debug import *
class LLVMContextManager(object):
'''TODO: Make this class not a singleton.
A possible design is to let each Numba Context owns a
LLVMContextManager.
'''
__singleton = None
def __new__(cls, opt=3, cg=3, inline=1000):
'''
opt --- Optimization level for LLVM optimization pass [0 - 3].
cg --- Optimization level for code generator [0 - 3].
Use `3` for SSE support on Intel.
inline --- Inliner threshold.
'''
inst = cls.__singleton
if not inst:
inst = object.__new__(cls)
inst.__initialize(opt, cg, inline)
cls.__singleton = inst
return inst
def __initialize(self, opt, cg, inline):
assert self.__singleton is None
m = self.__module = lc.Module.new("numba_executable_module")
# Create the TargetMachine
features = ''
# try:
# from llvm.workaround.avx_support import detect_avx_support
# if not detect_avx_support():
# features = '-avx'
# except ImportError:
# # Old llvm, disable AVX for all
features = '-avx'
tm = self.__machine = le.TargetMachine.new(opt=cg, cm=le.CM_JITDEFAULT,
features=features)
# Create the ExceutionEngine
self.__engine = le.EngineBuilder.new(m).create(tm)
# Build a PassManager which will be used for every module/
has_loop_vectorizer = llvm.version >= (3, 2)
passmanagers = lp.build_pass_managers(tm, opt=opt,
inline_threshold=inline,
loop_vectorize=has_loop_vectorizer,
fpm=False)
self.__pm = passmanagers.pm
self.__string_constants = {}
@property
def module(self):
return self.__module
@property
def execution_engine(self):
return self.__engine
@property
def pass_manager(self):
return self.__pm
@property
def target_machine(self):
return self.__machine
def link(self, lfunc):
if lfunc.module is not self.module:
# optimize
self.pass_manager.run(lfunc.module)
# link module
func_name = lfunc.name
#
# print 'lfunc.module'.center(80, '-')
# print lfunc.module
#
# print 'self.module'.center(80, '-')
# print self.module
# XXX: Better safe than sorry.
# Check duplicated function definitions and remove them.
# This problem should not exists.
def is_duplicated_function(f):
if f.is_declaration:
return False
try:
self.module.get_function_named(f.name)
except llvm.LLVMException as e:
return False
else:
return True
lfunc_module = lfunc.module
#print "LINKING", lfunc.name, lfunc.module.id, "in", self.module.id
#print [f.name for f in lfunc.module.functions]
#print '-----'
for func in lfunc_module.functions:
if is_duplicated_function(func):
import warnings
if func is lfunc:
# If the duplicated function is the currently compiling
# function, rename it.
ct = 0
while is_duplicated_function(func):
func.name = "%s_duplicated%d" % (func_name, ct)
ct += 1
warnings.warn("Renamed duplicated function %s to %s" %
(func_name, func.name))
func_name = func.name
else:
# If the duplicated function is not the currently
# compiling function, ignore it.
# We assume this is a utility function.
assert func.linkage == lc.LINKAGE_LINKONCE_ODR, func.name
link_module(self.execution_engine, lfunc_module, self.module)
lfunc = self.module.get_function_named(func_name)
assert lfunc.module is self.module
self.verify(lfunc)
# print lfunc
return lfunc
def get_pointer_to_function(self, lfunc):
return self.execution_engine.get_pointer_to_function(lfunc)
def verify(self, lfunc):
lfunc.module.verify()
# XXX: remove the following extra checking before release
for bb in lfunc.basic_blocks:
for instr in bb.instructions:
if isinstance(instr, lc.CallOrInvokeInstruction):
callee = instr.called_function
if callee is not None:
assert callee.module is lfunc.module,\
"Inter module call for call to %s" % callee.name
# ______________________________________________________________________
handle = lambda llvm_value: llvm_value._ptr
def link_module(engine, src_module, dst_module, preserve=False):
"""
Link a source module into a destination module while preserving the
execution engine's global mapping of pointers.
"""
dst_module.link_in(src_module, preserve=preserve)
ptr = lambda gv: handle(engine).getPointerToGlobalIfAvailable(handle(gv))
def update_gv(src_gv, dst_gv):
if ptr(src_gv) != 0 and ptr(dst_gv) == 0:
engine.add_global_mapping(dst_gv, ptr(src_gv))
# Update function mapping
for function in src_module.functions:
dst_lfunc = dst_module.get_function_named(function.name)
update_gv(function, dst_lfunc)
# Update other global symbols' mapping
for src_gv in src_module.global_variables:
dst_gv = dst_module.get_global_variable_named(src_gv.name)
update_gv(src_gv, dst_gv)
########NEW FILE########
__FILENAME__ = llvmwrapper
# -*- coding: utf-8 -*-
"""
Module that creates wrapper around llvm functions. The wrapper is callable
from Python.
"""
from __future__ import print_function, division, absolute_import
import logging
logger = logging.getLogger(__name__)
import ast
import ctypes
import llvm.core
from numba import *
from numba import nodes
from numba import closures
from numba import typesystem
from numba import numbawrapper
from numba.functions import keep_alive
from numba.symtab import Variable
from numba.typesystem import is_obj
#------------------------------------------------------------------------
# Create a NumbaFunction (numbafunction.c)
#------------------------------------------------------------------------
def _create_methoddef(py_func, func_name, func_doc, func_pointer):
"""
Create a PyMethodDef ctypes struct.
struct PyMethodDef {
const char *ml_name; /* The name of the built-in function/method */
PyCFunction ml_meth; /* The C function that implements it */
int ml_flags; /* Combination of METH_xxx flags, which mostly
describe the args expected by the C func */
const char *ml_doc; /* The __doc__ attribute, or NULL */
};
"""
PyMethodDef = struct([('name', char.pointer()),
('method', void.pointer()),
('flags', int_),
('doc', char.pointer())])
c_PyMethodDef = PyMethodDef.to_ctypes()
PyCFunction_NewEx = ctypes.pythonapi.PyCFunction_NewEx
PyCFunction_NewEx.argtypes = [ctypes.POINTER(c_PyMethodDef),
ctypes.py_object,
ctypes.c_void_p]
PyCFunction_NewEx.restype = ctypes.py_object
# It is paramount to put these into variables first, since every
# access may return a new string object!
keep_alive(py_func, func_name)
keep_alive(py_func, func_doc)
methoddef = c_PyMethodDef()
if PY3:
if func_name is not None:
func_name = func_name.encode('utf-8')
if func_doc is not None:
func_doc = func_doc.encode('utf-8')
methoddef.name = func_name
methoddef.doc = func_doc
methoddef.method = ctypes.c_void_p(func_pointer)
methoddef.flags = 1 # METH_VARARGS
return methoddef
def numbafunction_new(py_func, func_name, func_doc, module_name, func_pointer,
wrapped_lfunc_pointer, wrapped_signature):
"Create a NumbaFunction (numbafunction.c)"
methoddef = _create_methoddef(py_func, func_name, func_doc, func_pointer)
keep_alive(py_func, methoddef)
keep_alive(py_func, module_name)
wrapper = numbawrapper.create_function(methoddef, py_func,
wrapped_lfunc_pointer,
wrapped_signature, module_name)
return methoddef, wrapper
#------------------------------------------------------------------------
# Ctypes wrapping
#------------------------------------------------------------------------
def get_ctypes_func(self, llvm=True):
import ctypes
sig = self.func_signature
restype = typesystem.convert_to_ctypes(sig.return_type)
# FIXME: Switch to PYFUNCTYPE so it does not release the GIL.
#
# prototype = ctypes.CFUNCTYPE(restype,
# *[_types.convert_to_ctypes(x)
# for x in sig.args])
prototype = ctypes.PYFUNCTYPE(restype,
*[typesystem.convert_to_ctypes(x)
for x in sig.args])
if hasattr(restype, 'make_ctypes_prototype_wrapper'):
# See numba.utils.ComplexMixin for an example of
# make_ctypes_prototype_wrapper().
prototype = restype.make_ctypes_prototype_wrapper(prototype)
if llvm:
# July 10, 2012: PY_CALL_TO_LLVM_CALL_MAP is removed recent commit.
#
# PY_CALL_TO_LLVM_CALL_MAP[self.func] = \
# self.build_call_to_translated_function
return prototype(self.lfunc_pointer)
else:
return prototype(self.func)
#------------------------------------------------------------------------
# NumbaFunction Wrapping
#------------------------------------------------------------------------
def fake_pyfunc(self, args):
"PyObject *(*)(PyObject *self, PyObject *args)"
pass
def get_closure_scope(func_signature, func_obj):
"""
Retrieve the closure from the NumbaFunction from the func_closure
attribute.
func_signature:
signature of closure function
func_obj:
LLVM Value referencing the closure function as a Python object
"""
closure_scope_type = func_signature.args[0]
offset = numbawrapper.numbafunc_closure_field_offset
closure = nodes.LLVMValueRefNode(void.pointer(), func_obj)
closure = nodes.CoercionNode(closure, char.pointer())
closure_field = nodes.pointer_add(closure, nodes.const(offset, size_t))
closure_field = nodes.CoercionNode(closure_field,
closure_scope_type.pointer())
closure_scope = nodes.DereferenceNode(closure_field)
return closure_scope
def build_wrapper_function_ast(env, wrapper_lfunc, llvm_module):
"""
Build AST for LLVM function wrapper.
lfunc: LLVM function to wrap
llvm_module: module the wrapper is being defined in
The resulting AST has a NativeCallNode to the wrapped function. The
arguments are LLVMValueRefNode nodes which still need their llvm_value
set to the object from the tuple. This happens in visit_FunctionWrapperNode
during codegen.
"""
func = env.crnt.func
func_signature = env.crnt.func_signature
func_name = env.crnt.func_name
# Insert external declaration
lfunc = llvm_module.get_or_insert_function(
func_signature.to_llvm(env.context),
env.crnt.lfunc.name)
# Build AST
wrapper = nodes.FunctionWrapperNode(lfunc,
func_signature,
func,
fake_pyfunc,
func_name)
error_return = ast.Return(nodes.CoercionNode(nodes.NULL_obj,
object_))
is_closure = bool(closures.is_closure_signature(func_signature))
nargs = len(func_signature.args) - is_closure
# Call wrapped function with unpacked object arguments
# (delay actual arguments)
args = [nodes.LLVMValueRefNode(object_, None)
for i in range(nargs)]
if is_closure:
# Insert m_self as scope argument type
closure_scope = get_closure_scope(func_signature, wrapper_lfunc.args[0])
args.insert(0, closure_scope)
func_call = nodes.NativeCallNode(func_signature, args, lfunc)
if not is_obj(func_signature.return_type):
# Check for error using PyErr_Occurred()
func_call = nodes.PyErr_OccurredNode(func_call)
# Coerce and return result
if func_signature.return_type.is_void:
wrapper.body = func_call
result_node = nodes.ObjectInjectNode(None)
else:
wrapper.body = None
result_node = func_call
wrapper.return_result = ast.Return(value=nodes.CoercionNode(result_node,
object_))
# Update wrapper
wrapper.error_return = error_return
wrapper.cellvars = []
wrapper.wrapped_nargs = nargs
wrapper.wrapped_args = args[is_closure:]
return wrapper
def build_wrapper_translation(env, llvm_module=None):
"""
Generate a wrapper function in the given llvm module.
"""
from numba import pipeline
if llvm_module:
wrapper_module = llvm_module
else:
wrapper_module = env.llvm_context.module
# Create wrapper code generator and wrapper AST
func_name = '__numba_wrapper_%s' % env.crnt.func_name
signature = object_(void.pointer(), object_)
symtab = dict(self=Variable(object_, is_local=True),
args=Variable(object_, is_local=True))
func_env = env.crnt.inherit(
func=fake_pyfunc,
name=func_name,
mangled_name=None, # Force FunctionEnvironment.init()
# to generate a new mangled name.
func_signature=signature,
locals={},
symtab=symtab,
refcount_args=False,
llvm_module=wrapper_module)
# Create wrapper LLVM function
func_env.lfunc = pipeline.get_lfunc(env, func_env)
# Build wrapper ast
wrapper_node = build_wrapper_function_ast(env,
wrapper_lfunc=func_env.lfunc,
llvm_module=wrapper_module)
func_env.ast = wrapper_node
# Specialize and compile wrapper
pipeline.run_env(env, func_env, pipeline_name='late_translate')
keep_alive(fake_pyfunc, func_env.lfunc)
return func_env.translator # TODO: Amend callers to eat func_env
def build_wrapper_function(env):
'''
Build a wrapper function for the currently translated function.
Return the interpreter-level wrapper function, the LLVM wrapper function,
and the method definition record.
'''
t = build_wrapper_translation(env)
# Return a PyCFunctionObject holding the wrapper
func_pointer = t.lfunc_pointer
methoddef, wrapper = numbafunction_new(
env.crnt.func,
env.crnt.func_name,
env.crnt.func_doc,
env.crnt.module_name,
func_pointer, # Wrapper
env.crnt.translator.lfunc_pointer, # Wrapped
env.crnt.func_signature)
return wrapper, t.lfunc, methoddef
def build_wrapper_module(env):
'''
Build a wrapper function for the currently translated
function, and return a tuple containing the separate LLVM
module, and the LLVM wrapper function.
'''
llvm_module = llvm.core.Module.new('%s_wrapper_module' % env.crnt.mangled_name)
t = build_wrapper_translation(env, llvm_module=llvm_module)
logger.debug('Wrapper module: %s' % llvm_module)
return llvm_module, t.lfunc
########NEW FILE########
__FILENAME__ = refcounting
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm.core
from numba import *
from numba.utility.cbuilder import refcounting
class RefcountingMixin(object):
def refcount(self, func, value):
"Refcount a value with a refcounting function"
assert not self.nopython
refcounter = self.context.cbuilder_library.declare(func, self.env,
self.llvm_module)
object_ltype = object_.to_llvm(self.context)
b = self.builder
return b.call(refcounter, [b.bitcast(value, object_ltype)])
def decref(self, value):
"Py_DECREF a value"
return self.refcount(refcounting.Py_DECREF, value)
def incref(self, value):
"Py_INCREF a value"
return self.refcount(refcounting.Py_INCREF, value)
def xdecref(self, value):
"Py_XDECREF a value"
return self.refcount(refcounting.Py_XDECREF, value)
def xincref(self, value):
"Py_XINCREF a value"
return self.refcount(refcounting.Py_XINCREF, value)
def xdecref_temp(self, temp):
"Py_XDECREF a temporary"
return self.xdecref(self.load_tbaa(temp, object_))
def xincref_temp(self, temp):
"Py_XINCREF a temporary"
return self.xincref(self.load_tbaa(temp, object_))
def xdecref_temp_cleanup(self, temp):
"""
Cleanup a temp at the end of the function:
* Save current basic block
* Generate code at cleanup path
* Restore basic block
"""
assert not self.nopython
bb = self.builder.basic_block
self.builder.position_at_end(self.current_cleanup_bb)
self.xdecref_temp(temp)
self.current_cleanup_bb = self.builder.basic_block
self.builder.position_at_end(bb)
########NEW FILE########
__FILENAME__ = translate
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast, collections
import llvm
import llvm.core as lc
import llvm.core
from numba.llvm_types import _int1, _int32, _LLVMCaster
from numba.multiarray_api import MultiarrayAPI # not used
from numba.typesystem import llvmtypes
from numba import typesystem
from numba import *
from numba.codegen import debug
from numba.codegen.debug import logger
from numba.codegen.codeutils import llvm_alloca
from numba.codegen import coerce, complexsupport, refcounting, datetimesupport
from numba.codegen.llvmcontext import LLVMContextManager
from numba import visitors, nodes, llvm_types, utils, function_util
from numba.minivect import minitypes, llvm_codegen
from numba import ndarray_helpers, error
from numba.utils import dump
from numba import metadata
from numba.control_flow import ssa
from numba.support.numpy_support import sliceutils
from numba.nodes import constnodes
from numba.typesystem import llvm_typesystem as lts
from numba.annotate.annotate import Annotation, A_type
from numba.annotate.ir_capture import IRBuilder, get_intermediate
from llvm_cbuilder import shortnames as C
_int32_zero = lc.Constant.int(_int32, 0)
_compare_mapping_float = {'>':lc.FCMP_OGT,
'<':lc.FCMP_OLT,
'==':lc.FCMP_OEQ, # (nan == nan) is False
'>=':lc.FCMP_OGE,
'<=':lc.FCMP_OLE,
'!=':lc.FCMP_UNE}
_compare_mapping_sint = {'>':lc.ICMP_SGT,
'<':lc.ICMP_SLT,
'==':lc.ICMP_EQ,
'>=':lc.ICMP_SGE,
'<=':lc.ICMP_SLE,
'!=':lc.ICMP_NE}
_compare_mapping_uint = {'>':lc.ICMP_UGT,
'<':lc.ICMP_ULT,
'==':lc.ICMP_EQ,
'>=':lc.ICMP_UGE,
'<=':lc.ICMP_ULE,
'!=':lc.ICMP_NE}
# TODO: use composition instead of mixins
class LLVMCodeGenerator(visitors.NumbaVisitor,
complexsupport.ComplexSupportMixin,
refcounting.RefcountingMixin,
visitors.NoPythonContextMixin,
datetimesupport.DateTimeSupportMixin):
"""
Translate a Python AST to LLVM. Each visit_* method should directly
return an LLVM value.
"""
multiarray_api = MultiarrayAPI()
# Values for True/False
bool_ltype = llvm.core.Type.int(1)
_bool_constants = {
False: llvm.core.Constant.int(bool_ltype, 0),
True: llvm.core.Constant.int(bool_ltype, 1),
}
def __init__(self, context, func, ast, func_signature, symtab,
optimize=True, nopython=False,
llvm_module=None, **kwds):
super(LLVMCodeGenerator, self).__init__(
context, func, ast, func_signature=func_signature,
nopython=nopython, symtab=symtab,
llvm_module=llvm_module,
**kwds)
# FIXME: Change mangled_name to some other attribute,
# optionally read in the environment. What we really want to
# distiguish between is the name of the LLVM function being
# generated and the name of the Python function being
# translated.
self.mangled_name = self.env.translation.crnt.mangled_name
self.func_signature = func_signature
self.blocks = {} # stores id => basic-block
self.refcount_args = self.env.crnt.refcount_args
# self.ma_obj = None # What is this?
self.optimize = optimize
self.flags = kwds
# internal states
self._nodes = [] # for tracking parent nodes
if self.env.crnt.annotate:
import inspect
source = inspect.getsource(func)
decorators = 0
while not source.lstrip().startswith('def'):
decorator, sep, source = source.partition('\n')
decorators += 1
for argname, argtype in zip(self.argnames, self.func_signature.args):
self.annotations[func.__code__.co_firstlineno + decorators] = \
[Annotation(A_type, (argname, str(argtype)))]
# ________________________ visitors __________________________
@property
def annotations(self):
return self.env.crnt.annotations
@property
def current_node(self):
return self._nodes[-1]
def update_pos(self, node):
"Update position for annotation"
if self.env.crnt.annotate and hasattr(node, 'lineno'):
self.builder.update_pos(node.lineno)
return self.builder.get_pos()
def reset_pos(self, pos):
"Reset position for annotation"
if self.env.crnt.annotate:
self.builder.update_pos(pos)
def visit(self, node):
# logger.debug('visiting %s', ast.dump(node))
pos = self.update_pos(node)
fn = getattr(self, 'visit_%s' % type(node).__name__)
try:
self._nodes.append(node) # push current node
result = fn(node)
self.reset_pos(pos)
return result
except Exception as e:
# logger.exception(e)
raise
finally:
self._nodes.pop() # pop current node
# _________________________________________________________________________
def _load_arg_by_ref(self, argtype, larg):
if (minitypes.pass_by_ref(argtype) and
self.func_signature.struct_by_reference):
larg = self.builder.load(larg)
return larg
def _allocate_arg_local(self, name, argtype, larg):
"""
Allocate a local variable on the stack.
"""
stackspace = self.alloca(argtype)
stackspace.name = name
self.builder.store(larg, stackspace)
return stackspace
def renameable(self, variable):
renameable = self.have_cfg and (not variable or variable.renameable)
return renameable
def incref_arg(self, argname, argtype, larg, variable):
# TODO: incref objects in structs
if not (self.nopython or argtype.is_closure_scope):
if self.is_obj(variable.type) and self.refcount_args:
if self.renameable(variable):
lvalue = self._allocate_arg_local(argname, argtype,
variable.lvalue)
else:
lvalue = variable.lvalue
self.object_local_temps[argname] = lvalue
self.incref(larg)
def _init_constants(self):
pass
# self.symtab["None"]
def _init_args(self):
"""
Unpack arguments:
1) Intialize SSA variables
2) Handle variables declared in the 'locals' dict
"""
for larg, argname, argtype in zip(self.lfunc.args, self.argnames,
self.func_signature.args):
larg.name = argname
variable = self.symtab.get(argname, None)
if self.renameable(variable):
if argtype.is_struct or argtype.is_reference:
larg = self._allocate_arg_local(argname, argtype, larg)
# Set value on first definition of the variable
if argtype.is_closure_scope:
variable = self.symtab[argname]
else:
variable = self.symtab.lookup_renamed(argname, 0)
variable.lvalue = self._load_arg_by_ref(argtype, larg)
elif argname in self.locals or variable.is_cellvar:
# Allocate on stack
variable = self.symtab[argname]
variable.lvalue = self._allocate_arg_local(argname, argtype,
larg)
#else:
# raise error.InternalError(argname, argtype)
self.incref_arg(argname, argtype, larg, variable)
if variable.type.is_array:
self.preload_attributes(variable, variable.lvalue)
def c_array_to_pointer(self, name, stackspace, var):
"Decay a C array to a pointer to allow pointer access"
ltype = var.type.base_type.pointer().to_llvm(self.context)
pointer = self.builder.alloca(ltype, name=name + "_p")
p = self.builder.gep(stackspace, [llvm_types.constant_int(0),
llvm_types.constant_int(0)])
self.builder.store(p, pointer)
stackspace = pointer
return stackspace
def _allocate_locals(self):
"""
Allocate local variables:
1) Intialize SSA variables
2) Handle variables declared in the 'locals' dict
"""
for name, var in self.symtab.items():
# FIXME: 'None' should be handled as a special case (probably).
if var.type.is_uninitialized and var.cf_references:
assert var.uninitialized_value, var
var.lvalue = self.visit(var.uninitialized_value)
elif name in self.locals and not name in self.argnames:
# var = self.symtab.lookup_renamed(name, 0)
name = 'var_%s' % var.name
if self.is_obj(var.type):
lvalue = self._null_obj_temp(name, type=var.ltype)
else:
lvalue = self.builder.alloca(var.ltype, name=name)
if var.type.is_struct:
# TODO: memset struct to 0
pass
elif var.type.is_carray:
lvalue = self.c_array_to_pointer(name, lvalue, var)
var.lvalue = lvalue
def setup_func(self):
have_return = getattr(self.ast, 'have_return', None)
if have_return is not None:
if not have_return and not self.func_signature.return_type.is_void:
self.error(self.ast, "Function with non-void return does "
"not return a value")
self.lfunc = self.env.translation.crnt.lfunc
assert self.lfunc
if not isinstance(self.ast, nodes.FunctionWrapperNode):
assert self.mangled_name == self.lfunc.name, \
"Redefinition of function %s (%s, %s)" % (self.func_name,
self.mangled_name,
self.lfunc.name)
entry = self.append_basic_block('entry')
self.builder = lc.Builder.new(entry)
if self.env.crnt.annotate:
self.builder = IRBuilder("llvm", self.builder)
self.caster = _LLVMCaster(self.builder)
self.object_coercer = coerce.ObjectCoercer(self)
self.multiarray_api.set_PyArray_API(self.llvm_module)
self.tbaa = metadata.TBAAMetadata(self.llvm_module)
self.object_local_temps = {}
self._init_constants()
self._init_args()
self._allocate_locals()
# TODO: Put current function into symbol table for recursive call
self.setup_return()
if self.have_cfg:
block0 = self.ast.flow.blocks[0]
block0.entry_block = entry
self.visitlist(block0.body)
block0.exit_block = self.builder.basic_block
self.flow_block = None
# self.visitlist(block0.body) # uninitialize constants for variables
self.flow_block = self.ast.flow.blocks[1]
else:
self.flow_block = None
self.in_loop = 0
self.loop_beginnings = []
self.loop_exits = []
def to_llvm(self, type):
return type.to_llvm(self.context)
def translate(self):
self.lfunc = None
try:
self.setup_func()
if isinstance(self.ast, ast.FunctionDef):
# Handle the doc string for the function
# FIXME: Ignoring it for now
if (isinstance(self.ast.body[0], ast.Expr) and
isinstance(self.ast.body[0].value, ast.Str)):
# Python doc string
logger.info('Ignoring python doc string.')
statements = self.ast.body[1:]
else:
statements = self.ast.body
for node in statements: # do codegen for each statement
self.visit(node)
else:
self.visit(self.ast)
if not self.is_block_terminated():
# self.builder.ret_void()
self.builder.branch(self.cleanup_label)
self.handle_phis()
self.terminate_cleanup_blocks()
if self.env.crnt.annotate:
self.env.crnt.intermediates.append(get_intermediate(self.builder))
# Done code generation
del self.builder # release the builder to make GC happy
if logger.level >= logging.DEBUG:
# logger.debug("ast translated function: %s" % self.lfunc)
logger.debug(self.llvm_module)
# Verify code generation
self.llvm_module.verify() # only Module level verification checks everything.
except:
# Delete the function to prevent an invalid function from living in the module
post_mortem = self.env.crnt.error_env.enable_post_mortem
if self.lfunc is not None and not post_mortem:
self.lfunc.delete()
raise
def handle_phis(self):
"""
Update all our phi nodes after translation is done and all Variables
have their llvm values set.
"""
if not self.have_cfg:
return
# Initialize uninitialized incoming values to bad values
for phi in ssa.iter_phi_vars(self.ast.flow):
if phi.type.is_uninitialized:
#print incoming_var.cf_references
#print phi_node.variable.cf_references
#print "incoming", phi_node.incoming, block
assert phi.uninitialized_value, phi
assert phi.lvalue is None
phi.lvalue = self.visit(phi.uninitialized_value)
# Add all incoming values to all our phi values
ssa.handle_phis(self.ast.flow)
def visit_FunctionWrapperNode(self, node):
# Disable debug coercion
was_debug_conversion = debug.debug_conversion
debug.debug_conversion = False
# Unpack tuple into arguments
arg_types = [object_] * node.wrapped_nargs
types, lstr = self.object_coercer.lstr(arg_types)
args_tuple = self.lfunc.args[1]
largs = self.object_coercer.parse_tuple(lstr, args_tuple, arg_types)
# Patch argument values in LLVMValueRefNode nodes
assert len(largs) == node.wrapped_nargs
for larg, arg_node in zip(largs, node.wrapped_args):
arg_node.llvm_value = larg
# Generate call to wrapped function
self.generic_visit(node)
debug.debug_conversion = was_debug_conversion
@property
def lfunc_pointer(self):
return LLVMContextManager().get_pointer_to_function(self.lfunc)
def _null_obj_temp(self, name, type=None, change_bb=False):
if change_bb:
bb = self.builder.basic_block
lhs = self.llvm_alloca(type or llvm_types._pyobject_head_struct_p,
name=name, change_bb=False)
self.generate_assign_stack(self.visit(nodes.NULL_obj), lhs,
tbaa_type=object_)
if change_bb:
self.builder.position_at_end(bb)
return lhs
def load_tbaa(self, ptr, tbaa_type, name=''):
"""
Load a pointer and annotate with Type Based Alias Analysis
metadata.
"""
instr = self.builder.load(ptr, name='')
self.tbaa.set_tbaa(instr, tbaa_type)
return instr
def store_tbaa(self, value, ptr, tbaa_type):
"""
Load a pointer and annotate with Type Based Alias Analysis
metadata.
"""
instr = self.builder.store(value, ptr)
if metadata.is_tbaa_type(tbaa_type):
self.tbaa.set_tbaa(instr, tbaa_type)
def puts(self, msg):
const = nodes.ConstNode(msg, c_string_type)
self.visit(function_util.external_call(self.context,
self.llvm_module,
'puts',
args=[const]))
def puts_llvm(self, llvm_string):
const = nodes.LLVMValueRefNode(c_string_type, llvm_string)
self.visit(function_util.external_call(self.context,
self.llvm_module,
'puts',
args=[const]))
def setup_return(self):
# Assign to this value which will be returned
self.is_void_return = \
self.func_signature.actual_signature.return_type.is_void
ret_by_ref = minitypes.pass_by_ref(self.func_signature.return_type)
if self.func_signature.struct_by_reference and ret_by_ref:
self.return_value = self.lfunc.args[-1]
assert self.return_value.type.kind == llvm.core.TYPE_POINTER
elif not self.is_void_return:
llvm_ret_type = self.func_signature.return_type.to_llvm(self.context)
self.return_value = self.builder.alloca(llvm_ret_type,
name="return_value")
# All non-NULL object emporaries are DECREFed here
self.cleanup_label = self.append_basic_block('cleanup_label')
self.current_cleanup_bb = self.cleanup_label
bb = self.builder.basic_block
# Jump here in case of an error
self.error_label = self.append_basic_block("error_label")
self.builder.position_at_end(self.error_label)
# Set error return value and jump to cleanup
self.visit(self.ast.error_return)
self.builder.position_at_end(bb)
def terminate_cleanup_blocks(self):
self.builder.position_at_end(self.current_cleanup_bb)
# Decref local variables
for name, stackspace in self.object_local_temps.iteritems():
self.xdecref_temp(stackspace)
if self.is_void_return:
self.builder.ret_void()
else:
ret_type = self.func_signature.return_type
self.builder.ret(self.builder.load(self.return_value))
def alloca(self, type, name='', change_bb=True):
return self.llvm_alloca(self.to_llvm(type), name, change_bb)
def llvm_alloca(self, ltype, name='', change_bb=True):
return llvm_alloca(self.lfunc, self.builder, ltype, name, change_bb)
def _handle_ctx(self, node, lptr, tbaa_type, name=''):
if isinstance(node.ctx, ast.Load):
return self.load_tbaa(lptr, tbaa_type,
name=name and 'load_' + name)
else:
return lptr
def generate_constant_int(self, val, ty=typesystem.int_):
lconstant = lc.Constant.int(ty.to_llvm(self.context), val)
return lconstant
# __________________________________________________________________________
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Pass(self, node):
pass
def visit_Attribute(self, node):
raise error.NumbaError("This node should have been replaced")
#------------------------------------------------------------------------
# Assignment
#------------------------------------------------------------------------
@property
def using_numpy_array(self):
return issubclass(self.env.crnt.array, ndarray_helpers.NumpyArray)
def is_obj(self, type):
if type.is_array:
return self.using_numpy_array
return type.is_object
def visit_Assign(self, node):
target_node = node.targets[0]
# print target_node
is_object = self.is_obj(target_node.type)
value = self.visit(node.value)
incref = is_object
decref = is_object
if (isinstance(target_node, ast.Name) and
self.renameable(target_node.variable)):
# phi nodes are in place for the variable
target_node.variable.lvalue = value
if not is_object:
# No worries about refcounting, we are done
return
# Refcount SSA variables
# TODO: use ObjectTempNode?
if target_node.id not in self.object_local_temps:
target = self._null_obj_temp(target_node.id, change_bb=True)
self.object_local_temps[target_node.id] = target
decref = bool(self.loop_beginnings)
else:
target = self.object_local_temps[target_node.id]
tbaa_node = self.tbaa.get_metadata(target_node.type)
else:
target = self.visit(target_node)
if isinstance(target_node, nodes.TempStoreNode):
# Use TBAA specific to only this temporary
tbaa_node = target_node.temp.get_tbaa_node(self.tbaa)
else:
# ast.Attribute | ast.Subscript store.
# These types don't have pointer types but rather
# scalar values
if self.is_obj(target_node.type):
target_type = object_
else:
target_type = target_node.type
tbaa_type = target_type.pointer()
tbaa_node = self.tbaa.get_metadata(tbaa_type)
# INCREF RHS. Note that new references are always in temporaries, and
# hence we can only borrow references, and have to make it an owned
# reference
self.generate_assign_stack(value, target, tbaa_node,
decref=decref, incref=incref)
if target_node.type.is_array:
self.preload_attributes(target_node.variable, value)
def generate_assign_stack(self, lvalue, ltarget,
tbaa_node=None, tbaa_type=None,
decref=False, incref=False):
"""
Generate assignment operation and automatically cast value to
match the target type.
"""
if lvalue.type != ltarget.type.pointee:
lvalue = self.caster.cast(lvalue, ltarget.type.pointee)
if incref:
self.incref(lvalue)
if decref:
# Py_XDECREF any previous object
self.xdecref_temp(ltarget)
instr = self.builder.store(lvalue, ltarget)
# Set TBAA on store instruction
if tbaa_node is None:
assert tbaa_type is not None
tbaa_node = self.tbaa.get_metadata(tbaa_type)
assert tbaa_node
self.tbaa.set_metadata(instr, tbaa_node)
def preload_attributes(self, var, value):
"""
Pre-load ndarray attributes data/shape/strides.
"""
if not var.renameable:
# Stack allocated variable
value = self.builder.load(value)
var.ndarray = self.ndarray(value, var.type)
# Trigger preload
var.ndarray.data
var.ndarray.shape
var.ndarray.strides
#------------------------------------------------------------------------
# Variables
#------------------------------------------------------------------------
def check_unbound_local(self, node, var):
if getattr(node, 'check_unbound', None):
# Path the LLVMValueRefNode, we don't want a Name since it would
# check for unbound variables recursively
int_type = Py_uintptr_t.to_llvm(self.context)
value_p = self.builder.ptrtoint(var.lvalue, int_type)
node.loaded_name.llvm_value = value_p
self.visit(node.check_unbound)
def visit_Name(self, node):
var = node.variable
if (var.lvalue is None and not var.renameable and
self.symtab[node.id].is_cellvar):
var = self.symtab[node.id]
assert var.lvalue, var
self.check_unbound_local(node, var)
should_load = (not var.renameable or
var.type.is_struct) and not var.is_constant
if should_load and isinstance(node.ctx, ast.Load):
# Not a renamed but an alloca'd variable
return self.load_tbaa(var.lvalue, var.type)
else:
if self.env.crnt.annotate and hasattr(node, 'lineno'):
if not node.lineno in self.annotations:
self.annotations[node.lineno] = []
annotation = Annotation(A_type, (node.name, str(node.type)))
self.annotations[node.lineno].append(annotation)
return var.lvalue
#------------------------------------------------------------------------
# Control Flow
#------------------------------------------------------------------------
def _init_phis(self, node):
"Set basic block and initialize LLVM phis"
for phi_node in node.phi_nodes:
ltype = phi_node.variable.type.to_llvm(self.context)
phi = self.builder.phi(ltype, phi_node.variable.unmangled_name)
phi_node.variable.lvalue = phi
def setblock(self, cfg_basic_block):
if cfg_basic_block.is_fabricated:
return
old = self.flow_block
if old and not old.exit_block:
if old.id == 1:
# Handle promotions from the entry block. This is somewhat
# of a hack, and needed since the CFG isn't properly merged
# in the AST
self.visitlist(old.body)
old.exit_block = self.builder.basic_block
self.flow_block = cfg_basic_block
def append_basic_block(self, name='unamed'):
idx = len(self.blocks)
#bb = self.lfunc.append_basic_block('%s_%d'%(name, idx))
bb = self.lfunc.append_basic_block(name)
self.blocks[idx] = bb
return bb
def visit_PromotionNode(self, node):
lvalue = self.visit(node.node)
node.variable.lvalue = lvalue
# Update basic block in case the promotion created a new block
self.flow_block.exit_block = self.builder.basic_block
_pending_block = None # Nested hack
def visit_ControlBlock(self, node, visit_body=True):
"""
Return a new basic block and handle phis and promotions. Promotions
are needed at merge (phi) points to have a consistent type.
"""
#
### Create entry basic block
#
if node is None:
# Fabricated If statement
label = 'fabricated_basic_block'
else:
label = node.label
self.setblock(node)
node.prev_block = self.builder.basic_block
node.entry_block = node.create_block(self, label)
if node.branch_here and not self.is_block_terminated():
self.builder.branch(node.entry_block)
self.builder.position_at_end(node.entry_block)
self._init_phis(node)
if self._pending_block:
self.visitlist(self._pending_block.body)
self._pending_block = None
if visit_body:
lbody = self.visitlist(node.body)
lbody = lbody[0] if len(lbody) == 1 else None
else:
lbody = None
if not node.exit_block:
node.exit_block = self.builder.basic_block
return lbody
def visit_LowLevelBasicBlockNode(self, node):
llvm_block = node.create_block(self)
if not self.is_block_terminated():
self.builder.branch(llvm_block)
self.builder.position_at_end(llvm_block)
return self.visit(node.body)
#------------------------------------------------------------------------
# Control Flow: If, For, While
#------------------------------------------------------------------------
def visit_If(self, node, is_while=False):
if not hasattr(node, 'cond_block'):
# We have a synthetic 'if' without a cfg, fabricate fake blocks
node = nodes.build_if(**vars(node))
# Visit condition
test = self.visit(node.test)
bb_cond = node.cond_block.entry_block
# test = self.visit(node.test)
if test.type != _int1:
test = self._generate_test(test)
# Create exit block
self.visit_ControlBlock(node.exit_block, visit_body=False)
bb_endif = node.exit_block.entry_block
if is_while:
self.setup_loop(node.continue_block, bb_cond, bb_endif)
# Visit if clauses
self.visitlist(node.body)
#if self.have_cfg:
# self.flow_block.exit_block = self.builder.basic_block
bb_true = node.if_block.entry_block
if is_while:
if not self.is_block_terminated():
self.builder.branch(bb_cond)
self.teardown_loop()
else:
self.term_block(bb_endif)
if node.orelse:
self.visitlist(node.orelse)
bb_false = node.else_block.entry_block
self.term_block(bb_endif)
else:
bb_false = bb_endif
# Mark current basic block and the exit block of the body
self.setblock(node.exit_block)
# Branch to block from condition
self.builder.position_at_end(node.cond_block.prev_block)
self.builder.branch(bb_cond)
self.builder.position_at_end(node.cond_block.exit_block)
# assert not self.is_block_terminated()
self.builder.cbranch(test, bb_true, bb_false)
### Gross hack, remove unparented basic blocks for which we track
### no incoming phi
if (not node.exit_block.parents and node.exit_block.id >= 0 and
node.exit_block.exit_block):
node.exit_block.exit_block.delete()
else:
self.builder.position_at_end(node.exit_block.exit_block)
# Swallow statements following the branch
node.exit_block.exit_block = None
self._pending_block = node.exit_block
def visit_IfExp(self, node):
test = self.visit(node.test)
if test.type != _int1:
test = self._generate_test(test)
then_block = self.append_basic_block('ifexp.then')
else_block = self.append_basic_block('ifexp.else')
merge_block = self.append_basic_block('ifexp.merge')
self.builder.cbranch(test, then_block, else_block)
self.builder.position_at_end(then_block)
then_value = self.visit(node.body)
then_block = self.builder.basic_block
self.builder.branch(merge_block)
self.builder.position_at_end(else_block)
else_value = self.visit(node.orelse)
else_block = self.builder.basic_block
self.builder.branch(merge_block)
self.builder.position_at_end(merge_block)
phi = self.builder.phi(then_value.type)
phi.add_incoming(then_value, then_block)
phi.add_incoming(else_value, else_block)
return phi
def visit_While(self, node):
self.visit_If(node, is_while=True)
def term_block(self, end_block):
if not self.is_block_terminated():
self.terminate_block(self.builder.basic_block, end_block)
def append_basic_block(self, name='unamed'):
idx = len(self.blocks)
#bb = self.lfunc.append_basic_block('%s_%d'%(name, idx))
bb = self.lfunc.append_basic_block(name)
self.blocks[idx] = bb
return bb
@property
def cur_bb(self):
return self.builder.basic_block
def is_block_terminated(self, basic_block=None):
'''
Check if the current basicblock is properly terminated.
That means the basicblock is ended with a branch or return
'''
basic_block = basic_block or self.cur_bb
instructions = basic_block.instructions
return instructions and instructions[-1].is_terminator
def terminate_block(self, block, end_block):
if not self.is_block_terminated(block):
bb = self.cur_bb
self.builder.position_at_end(block)
self.builder.branch(end_block)
self.builder.position_at_end(bb)
def setup_loop(self, continue_block, bb_cond, bb_exit):
if continue_block:
# Jump to target index increment block instead of while condition
# block for 'for i in range(...):' loops
bb_cond = continue_block.create_block(self)
self.loop_beginnings.append(bb_cond)
self.loop_exits.append(bb_exit)
self.in_loop += 1
def teardown_loop(self):
self.loop_beginnings.pop()
self.loop_exits.pop()
self.in_loop -= 1
def visit_For(self, node):
raise error.NumbaError(node, "This node should have been replaced")
#------------------------------------------------------------------------
# Control Flow: Break, Continue
#------------------------------------------------------------------------
def visit_Continue(self, node):
assert self.loop_beginnings # Python syntax should ensure this
self.builder.branch(self.loop_beginnings[-1])
def visit_Break(self, node):
assert self.loop_exits # Python syntax should ensure this
self.builder.branch(self.loop_exits[-1])
#------------------------------------------------------------------------
# Control Flow: Return
#------------------------------------------------------------------------
def visit_Return(self, node):
if node.value is not None:
rettype = self.func_signature.return_type
retval = self.visit(node.value)
if self.is_obj(rettype) or rettype.is_pointer:
retval = self.builder.bitcast(retval,
self.return_value.type.pointee)
if not retval.type == self.return_value.type.pointee:
dump(node)
logger.debug('%s != %s (in node %s)' % (
self.return_value.type.pointee, retval.type,
utils.pformat_ast(node)))
raise error.NumbaError(
node, 'Expected %s type in return, got %s!' %
(self.return_value.type.pointee, retval.type))
self.builder.store(retval, self.return_value)
ret_type = self.func_signature.return_type
if self.is_obj(rettype):
self.xincref_temp(self.return_value)
# Visitor class for looking for node with valid line number
class LineNumVisitor(ast.NodeVisitor):
lineno = -1
def generic_visit(self, node):
if hasattr(node, 'lineno'):
if node.lineno > -1:
self.lineno = node.lineno
v = LineNumVisitor()
v.visit(node)
if self.env.crnt.annotate and hasattr(node, 'lineno') and v.lineno > -1:
lineno = v.lineno
if not lineno in self.annotations:
self.annotations[lineno] = []
annotation = Annotation(A_type, ('return', str(node.value.type)))
self.annotations[lineno].append(annotation)
if not self.is_block_terminated():
self.builder.branch(self.cleanup_label)
# if node.value is not None:
# self.builder.ret(self.visit(node.value))
# else:
# self.builder.ret_void()
def visit_Suite(self, node):
self.visitlist(node.body)
return None
#------------------------------------------------------------------------
# Indexing
#------------------------------------------------------------------------
def visit_Subscript(self, node):
value_type = node.value.type
if not (value_type.is_carray or value_type.is_string or
value_type.is_pointer):
raise error.InternalError(node, "Unsupported type:", node.value.type)
value = self.visit(node.value)
index = self.visit(node.slice)
indices = [index]
if value.type.kind == llvm.core.TYPE_ARRAY:
lptr = self.builder.extract_value(value, index)
else:
lptr = self.builder.gep(value, indices)
if node.slice.type.is_int:
lptr = self._handle_ctx(node, lptr, node.value.type)
return lptr
#------------------------------------------------------------------------
# Binary Operations
#------------------------------------------------------------------------
# ____________________________________________________________
# BoolOp
def _generate_test(self, llval):
return self.builder.icmp(lc.ICMP_NE, llval,
lc.Constant.null(llval.type))
def visit_BoolOp(self, node):
# NOTE: Can have >2 values
assert len(node.values) >= 2
assert isinstance(node.op, ast.And) or isinstance(node.op, ast.Or)
count = len(node.values)
if isinstance(node.op, ast.And):
bb_true = self.append_basic_block('and.true')
bb_false = self.append_basic_block('and.false')
bb_next = [self.append_basic_block('and.rhs')
for i in range(count - 1)] + [bb_true]
bb_done = self.append_basic_block('and.done')
for i in range(count):
value = self.visit(node.values[i])
if value.type != _int1:
value = self._generate_test(value)
self.builder.cbranch(value, bb_next[i], bb_false)
self.builder.position_at_end(bb_next[i])
assert self.builder.basic_block is bb_true
self.builder.branch(bb_done)
self.builder.position_at_end(bb_false)
self.builder.branch(bb_done)
self.builder.position_at_end(bb_done)
elif isinstance(node.op, ast.Or):
bb_true = self.append_basic_block('or.true')
bb_false = self.append_basic_block('or.false')
bb_next = [self.append_basic_block('or.rhs')
for i in range(count - 1)] + [bb_false]
bb_done = self.append_basic_block('or.done')
for i in range(count):
value = self.visit(node.values[i])
if value.type != _int1:
value = self._generate_test(value)
self.builder.cbranch(value, bb_true, bb_next[i])
self.builder.position_at_end(bb_next[i])
assert self.builder.basic_block is bb_false
self.builder.branch(bb_done)
self.builder.position_at_end(bb_true)
self.builder.branch(bb_done)
self.builder.position_at_end(bb_done)
else:
raise Exception("internal erorr")
booltype = _int1
phi = self.builder.phi(booltype)
phi.add_incoming(lc.Constant.int(booltype, 1), bb_true)
phi.add_incoming(lc.Constant.int(booltype, 0), bb_false)
return phi
# ____________________________________________________________
# UnaryOp
def visit_UnaryOp(self, node):
operand_type = node.operand.type
operand = self.visit(node.operand)
operand_ltype = operand.type
op = node.op
if isinstance(op, ast.Not) and (operand_type.is_bool or
operand_type.is_int):
bb_false = self.builder.basic_block
bb_true = self.append_basic_block('not.true')
bb_done = self.append_basic_block('not.done')
self.builder.cbranch(
self.builder.icmp(lc.ICMP_NE, operand,
lc.Constant.null(operand_ltype)),
bb_true, bb_done)
self.builder.position_at_end(bb_true)
self.builder.branch(bb_done)
self.builder.position_at_end(bb_done)
phi = self.builder.phi(operand_ltype)
phi.add_incoming(lc.Constant.int(operand_ltype, 1), bb_false)
phi.add_incoming(lc.Constant.int(operand_ltype, 0), bb_true)
return phi
elif isinstance(op, ast.USub) and operand_type.is_numeric:
if operand_type.is_float:
return self.builder.fsub(lc.Constant.null(operand_ltype),
operand)
elif operand_type.is_int and operand_type.signed:
return self.builder.sub(lc.Constant.null(operand_ltype),
operand)
elif isinstance(op, ast.UAdd) and operand_type.is_numeric:
return operand
elif isinstance(op, ast.Invert) and operand_type.is_int:
return self.builder.xor(lc.Constant.int(operand_ltype, -1), operand)
raise error.NumbaError(node, "Unary operator %s" % node.op)
# ____________________________________________________________
# Compare
_cmp_op_map = {
ast.Gt : '>',
ast.Lt : '<',
ast.GtE : '>=',
ast.LtE : '<=',
ast.Eq : '==',
ast.NotEq : '!=',
}
def visit_Compare(self, node):
op = node.ops[0]
lhs = node.left
rhs = node.comparators[0]
lhs_lvalue = self.visit(lhs)
rhs_lvalue = self.visit(rhs)
op = self._cmp_op_map[type(op)]
if lhs.type.is_float and rhs.type.is_float:
lfunc = self.builder.fcmp
lop = _compare_mapping_float[op]
elif lhs.type.is_int and rhs.type.is_int:
lfunc = self.builder.icmp
if lhs.type.signed:
mapping = _compare_mapping_sint
else:
mapping = _compare_mapping_uint
lop = mapping[op]
else:
# These errors should be issued by the type inferencer or a
# separate error checking pass
raise error.NumbaError(node, "Comparisons of types %s and %s are not yet "
"supported" % (lhs.type, rhs.type))
return lfunc(lop, lhs_lvalue, rhs_lvalue)
# ____________________________________________________________
# BinOp
_binops = {
ast.Add: ('fadd', ('add', 'add')),
ast.Sub: ('fsub', ('sub', 'sub')),
ast.Mult: ('fmul', ('mul', 'mul')),
ast.Div: ('fdiv', ('udiv', 'sdiv')),
ast.BitAnd: ('and_', ('and_', 'and_')),
ast.BitOr: ('or_', ('or_', 'or_')),
ast.BitXor: ('xor', ('xor', 'xor')),
ast.LShift: ('shl', ('shl', 'shl')), # shift left
ast.RShift: ('ashr', ('lshr', 'ashr')), # arithmetic shift right
}
_opnames = {
ast.Mult: 'mul',
}
def opname(self, op):
if op in self._opnames:
return self._opnames[op]
else:
return op.__name__.lower()
def _handle_mod(self, node, lhs, rhs):
from numba.utility import math_utilities
py_modulo = math_utilities.py_modulo(node.type, (node.left.type,
node.right.type))
lfunc = self.env.crnt.llvm_module.get_or_insert_function(
py_modulo.lfunc.type.pointee, py_modulo.lfunc.name)
return self.builder.call(lfunc, (lhs, rhs))
def _handle_complex_binop(self, lhs, op, rhs):
opname = self.opname(op)
if opname in ('add', 'sub', 'mul', 'div', 'floordiv'):
m = getattr(self, '_complex_' + opname)
result = self._generate_complex_op(m, lhs, rhs)
else:
raise error.NumbaError("Unsupported binary operation "
"for complex numbers: %s" % opname)
return result
def _handle_numeric_binop(self, lhs, node, op, rhs):
llvm_method_name = self._binops[op][node.type.is_int]
if node.type.is_int:
llvm_method_name = llvm_method_name[node.type.signed]
meth = getattr(self.builder, llvm_method_name)
if not lhs.type == rhs.type:
print((lhs.type, rhs.type))
assert False, ast.dump(node)
result = meth(lhs, rhs)
return result
def visit_BinOp(self, node):
lhs = self.visit(node.left)
rhs = self.visit(node.right)
op = type(node.op)
pointer_type = self.have(node.left.type, node.right.type,
"is_pointer", "is_int")
if (node.type.is_int or node.type.is_float) and op in self._binops:
result = self._handle_numeric_binop(lhs, node, op, rhs)
elif (node.type.is_int or node.type.is_float) and op == ast.Mod:
return self._handle_mod(node, lhs, rhs)
elif node.type.is_complex:
result = self._handle_complex_binop(lhs, op, rhs)
elif pointer_type:
if not node.left.type.is_pointer:
lhs, rhs = rhs, lhs
result = self.builder.gep(lhs, [rhs])
else:
logger.debug('Unrecognized node type "%s"' % node.type)
logger.debug(ast.dump(node))
raise error.NumbaError(
node, "Binary operations %s on values typed %s and %s "
"not (yet) supported" % (self.opname(op),
node.left.type,
node.right.type))
return result
#------------------------------------------------------------------------
# Coercions
#------------------------------------------------------------------------
def visit_CoercionNode(self, node, val=None):
if val is None:
val = self.visit(node.node)
if node.type == node.node.type:
return val
# logger.debug('Coerce %s --> %s', node.node.type, node.dst_type)
node_type = node.node.type
dst_type = node.dst_type
ldst_type = dst_type.to_llvm(self.context)
if node_type.is_pointer and dst_type.is_int:
val = self.builder.ptrtoint(val, ldst_type)
elif node_type.is_int and dst_type.is_pointer:
val = self.builder.inttoptr(val, ldst_type)
elif (dst_type.is_pointer or
dst_type.is_reference) and node_type.is_pointer:
val = self.builder.bitcast(val, ldst_type)
elif dst_type.is_complex and node_type.is_complex:
val = self._promote_complex(node_type, dst_type, val)
elif dst_type.is_complex and node_type.is_numeric:
ldst_base_type = dst_type.base_type.to_llvm(self.context)
real = val
if node_type != dst_type.base_type:
flags = {}
add_cast_flag_unsigned(flags, node_type, dst_type.base_type)
real = self.caster.cast(real, ldst_base_type, **flags)
imag = llvm.core.Constant.real(ldst_base_type, 0.0)
val = self._create_complex(real, imag)
elif dst_type.is_int and node_type.is_numpy_datetime and \
not isinstance(node.node, nodes.DateTimeAttributeNode):
return self.builder.extract_value(val, 0)
else:
flags = {}
add_cast_flag_unsigned(flags, node_type, dst_type)
val = self.caster.cast(val, node.dst_type.to_llvm(self.context),
**flags)
if debug.debug_conversion:
self.puts("Coercing %s to %s" % (node_type, dst_type))
return val
def visit_CoerceToObject(self, node):
from_type = node.node.type
result = self.visit(node.node)
if not self.is_obj(from_type):
result = self.object_coercer.convert_single(from_type, result,
name=node.name)
return result
def visit_CoerceToNative(self, node):
assert node.node.type.is_tuple
val = self.visit(node.node)
return self.object_coercer.to_native(node.dst_type, val,
name=node.name)
#------------------------------------------------------------------------
# Call Nodes
#------------------------------------------------------------------------
def visit_Call(self, node):
raise error.InternalError(node, "This node should have been replaced")
def visit_ObjectCallNode(self, node):
args_tuple = self.visit(node.args_tuple)
kwargs_dict = self.visit(node.kwargs_dict)
if node.function is None:
node.function = nodes.ObjectInjectNode(node.py_func)
lfunc_addr = self.visit(node.function)
# call PyObject_Call
largs = [lfunc_addr, args_tuple, kwargs_dict]
_, pyobject_call = self.context.external_library.declare(
self.llvm_module, 'PyObject_Call')
res = self.builder.call(pyobject_call, largs)
return self.caster.cast(res, node.variable.type.to_llvm(self.context))
def visit_NativeCallNode(self, node, largs=None):
if largs is None:
largs = self.visitlist(node.args)
return_value = llvm_codegen.handle_struct_passing(
self.builder, self.alloca, largs, node.signature)
if hasattr(node.llvm_func, 'module') and node.llvm_func.module != self.llvm_module:
lfunc = self.llvm_module.get_or_insert_function(node.llvm_func.type.pointee,
node.llvm_func.name)
else:
lfunc = node.llvm_func
result = self.builder.call(lfunc, largs)
if node.signature.struct_by_reference:
if minitypes.pass_by_ref(node.signature.return_type):
# TODO: TBAA
result = self.builder.load(return_value)
return result
def visit_NativeFunctionCallNode(self, node):
lfunc = self.visit(node.function)
node.llvm_func = lfunc
return self.visit_NativeCallNode(node)
def visit_LLMacroNode (self, node):
return node.macro(self.context, self.builder,
*self.visitlist(node.args))
def visit_LLVMExternalFunctionNode(self, node):
lfunc_type = node.signature.to_llvm(self.context)
return self.llvm_module.get_or_insert_function(lfunc_type, node.fname)
def visit_LLVMIntrinsicNode(self, node):
intr = getattr(llvm.core, 'INTR_' + node.func_name)
largs = self.visitlist(node.args)
if largs:
ltypes = [largs[0].type]
else:
ltypes = []
node.llvm_func = llvm.core.Function.intrinsic(self.llvm_module,
intr,
ltypes)
return self.visit_NativeCallNode(node, largs=largs)
def visit_MathCallNode(self, node):
# Make sure we don't pass anything by reference
resty = node.signature.return_type.to_llvm()
argtys = [a.to_llvm() for a in node.signature.args]
lfunc_type = llvmtypes.function(resty, argtys)
type_namespace = map(str, argtys)
lfunc = self.llvm_module.get_or_insert_function(
lfunc_type, 'numba.math.%s.%s' % (type_namespace, node.name))
node.llvm_func = lfunc
largs = self.visitlist(node.args)
return self.builder.call(lfunc, largs)
def visit_IntrinsicNode(self, node):
args = self.visitlist(node.args)
return node.intrinsic.emit_code(self.lfunc, self.builder, args)
def visit_PointerCallNode(self, node):
node.llvm_func = self.visit(node.function)
return self.visit_NativeCallNode(node)
def visit_ClosureCallNode(self, node):
lfunc = node.closure_type.closure.lfunc
assert lfunc is not None
assert len(node.args) == node.expected_nargs + node.need_closure_scope
self.visit(node.func)
node.llvm_func = lfunc
return self.visit_NativeCallNode(node)
#------------------------------------------------------------------------
# Objects
#------------------------------------------------------------------------
def visit_List(self, node):
types = [n.type for n in node.elts]
largs = self.visitlist(node.elts)
return self.object_coercer.build_list(types, largs)
def visit_Tuple(self, node):
raise error.InternalError(node, "This node should have been replaced")
def visit_Dict(self, node):
key_types = [k.type for k in node.keys]
value_types = [v.type for v in node.values]
llvm_keys = self.visitlist(node.keys)
llvm_values = self.visitlist(node.values)
result = self.object_coercer.build_dict(key_types, value_types,
llvm_keys, llvm_values)
return result
def visit_ObjectInjectNode(self, node):
# FIXME: Currently uses the runtime address of the python function.
# Sounds like a hack.
self.keep_alive(node.object)
addr = id(node.object)
obj_addr_int = self.generate_constant_int(addr, typesystem.Py_ssize_t)
obj = self.builder.inttoptr(obj_addr_int,
node.type.to_llvm(self.context))
return obj
def visit_NoneNode(self, node):
try:
self.llvm_module.add_global_variable(object_.to_llvm(self.context),
"Py_None")
except llvm.LLVMException:
pass
return self.llvm_module.get_global_variable_named("Py_None")
#------------------------------------------------------------------------
# Complex Numbers
#------------------------------------------------------------------------
def visit_ComplexNode(self, node):
real = self.visit(node.real)
imag = self.visit(node.imag)
return self._create_complex(real, imag)
def visit_ComplexConjugateNode(self, node):
lcomplex = self.visit(node.complex_node)
elem_ltyp = node.type.base_type.to_llvm(self.context)
zero = llvm.core.Constant.real(elem_ltyp, 0)
imag = self.builder.extract_value(lcomplex, 1)
new_imag_lval = self.builder.fsub(zero, imag)
assert hasattr(self.builder, 'insert_value'), (
"llvm-py support for LLVMBuildInsertValue() required to build "
"code for complex conjugates.")
return self.builder.insert_value(lcomplex, new_imag_lval, 1)
def visit_ComplexAttributeNode(self, node):
result = self.visit(node.value)
if node.value.type.is_complex:
assert result.type.kind == llvm.core.TYPE_STRUCT, result.type
if node.attr == 'real':
return self.builder.extract_value(result, 0)
elif node.attr == 'imag':
return self.builder.extract_value(result, 1)
#------------------------------------------------------------------------
# DateTime
#------------------------------------------------------------------------
def visit_DateTimeNode(self, node):
timestamp = self.visit(node.timestamp)
units = self.visit(node.units)
return self._create_datetime(timestamp, units)
def visit_DateTimeAttributeNode(self, node):
result = self.visit(node.value)
if node.value.type.is_datetime:
assert result.type.kind == llvm.core.TYPE_STRUCT, result.type
if node.attr == 'timestamp':
return self.builder.extract_value(result, 0)
elif node.attr == 'units':
return self.builder.extract_value(result, 1)
def visit_NumpyDateTimeNode(self, node):
timestamp_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_datetime_str_to_timestamp", args=[node.datetime_string])
units_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_datetime_str_to_units", args=[node.datetime_string])
newnode = nodes.DateTimeNode(timestamp_func, units_func)
return self.visit(newnode)
def visit_TimeDeltaNode(self, node):
diff = self.visit(node.diff)
units = self.visit(node.units)
return self._create_timedelta(diff, units)
def visit_NumpyTimeDeltaNode(self, node):
units_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_timedelta_units_str", args=[node.units_str])
newnode = nodes.TimeDeltaNode(nodes.CoercionNode(node.diff, int64),
units_func)
return self.visit(newnode)
def visit_TimeDeltaAttributeNode(self, node):
result = self.visit(node.value)
if node.value.type.is_timedelta:
assert result.type.kind == llvm.core.TYPE_STRUCT, result.type
if node.attr == 'diff':
return self.builder.extract_value(result, 0)
elif node.attr == 'units':
return self.builder.extract_value(result, 1)
#------------------------------------------------------------------------
# Structs
#------------------------------------------------------------------------
def struct_field(self, node, value):
value = self.builder.gep(
value, [llvm_types.constant_int(0),
llvm_types.constant_int(node.field_idx)])
return value
def visit_StructAttribute(self, node):
result = self.visit(node.value)
value_is_reference = node.value.type.is_reference
# print "referencing", node.struct_type, node.field_idx, node.attr
# TODO: TBAA for loads
if isinstance(node.ctx, ast.Load):
if value_is_reference:
# Struct reference, load result
result = self.struct_field(node, result)
result = self.builder.load(result)
else:
result = self.builder.extract_value(result, node.field_idx)
else:
if value_is_reference:
# Load alloca-ed struct pointer
result = self.builder.load(result)
result = self.struct_field(node, result)
#result = self.builder.insert_value(result, self.rhs_lvalue,
# node.field_idx)
return result
def visit_StructVariable(self, node):
return self.visit(node.node)
#------------------------------------------------------------------------
# Reference Counting
#------------------------------------------------------------------------
def visit_IncrefNode(self, node):
obj = self.visit(node.value)
self.incref(obj)
return obj
def visit_DecrefNode(self, node):
obj = self.visit(node.value)
self.decref(obj)
return obj
#------------------------------------------------------------------------
# Temporaries
#------------------------------------------------------------------------
def visit_TempNode(self, node):
if node.llvm_temp is None:
kwds = {}
if node.name:
kwds['name'] = node.name
value = self.alloca(node.type, **kwds)
node.llvm_temp = value
return node.llvm_temp
def visit_TempLoadNode(self, node):
# TODO: use unique type for each temporary load and store pair
temp = self.visit(node.temp)
instr = self.builder.load(temp, invariant=node.invariant)
self.tbaa.set_metadata(instr, node.temp.get_tbaa_node(self.tbaa))
return instr
def visit_TempStoreNode(self, node):
return self.visit(node.temp)
def visit_ObjectTempNode(self, node):
if isinstance(node.node, nodes.ObjectTempNode):
return self.visit(node.node)
bb = self.builder.basic_block
# Initialize temp to NULL at beginning of function
self.builder.position_at_beginning(self.lfunc.get_entry_basic_block())
lhs = self._null_obj_temp('objtemp')
node.llvm_temp = lhs
# Assign value
self.builder.position_at_end(bb)
rhs = self.visit(node.node)
self.generate_assign_stack(rhs, lhs, tbaa_type=object_,
decref=self.in_loop)
# goto error if NULL
# self.puts("checking error... %s" % error.format_pos(node))
self.object_coercer.check_err(rhs, pos_node=node.node)
# self.puts("all good at %s" % error.format_pos(node))
if node.incref:
self.incref(self.load_tbaa(lhs, object_))
# Generate Py_XDECREF(temp) at end-of-function cleanup path
self.xdecref_temp_cleanup(lhs)
result = self.load_tbaa(lhs, object_, name=lhs.name + '_load')
if not node.type == object_:
dst_type = node.type.to_llvm(self.context)
result = self.builder.bitcast(result, dst_type)
return result
def visit_PropagateNode(self, node):
# self.puts("ERROR! %s" % error.format_pos(node))
self.builder.branch(self.error_label)
def visit_ObjectTempRefNode(self, node):
return node.obj_temp_node.llvm_temp
#------------------------------------------------------------------------
# Arrays
#------------------------------------------------------------------------
def visit_DataPointerNode(self, node):
assert node.node.type.is_array
lvalue = self.visit(node.node)
lindices = self.visit(node.slice)
array_var = node.node.variable
ndarray = array_var.ndarray or self.ndarray(lvalue, node.node.type)
if not isinstance(lindices, collections.Iterable):
lindices = (lindices,)
lptr = ndarray.getptr(*lindices)
return self._handle_ctx(node, lptr, node.type.pointer())
#def visit_Index(self, node):
# return self.visit(node.value)
def visit_ExtSlice(self, node):
return self.visitlist(node.dims)
def visit_MultiArrayAPINode(self, node):
meth = getattr(self.multiarray_api, 'load_' + node.func_name)
lfunc = meth(self.llvm_module, self.builder)
lsignature = node.signature.pointer().to_llvm(self.context)
node.llvm_func = self.builder.bitcast(lfunc, lsignature)
result = self.visit_NativeCallNode(node)
return result
def pyarray_accessor(self, llvm_array_ptr, dtype):
return ndarray_helpers.PyArrayAccessor(self.builder, llvm_array_ptr,
self.tbaa, dtype)
def ndarray(self, llvm_array_ptr, type):
if issubclass(self.env.crnt.array, ndarray_helpers.NumpyArray):
return ndarray_helpers.NumpyArray(llvm_array_ptr, self.builder,
self.tbaa, type)
else:
return self.env.crnt.array(llvm_array_ptr, self.builder)
def visit_ArrayAttributeNode(self, node):
l_array = self.visit(node.array)
ndarray = self.ndarray(l_array, node.array.type)
if node.attr_name in ('shape', 'strides'):
attr_name = node.attr_name + '_ptr'
else:
attr_name = node.attr_name
result = getattr(ndarray, attr_name)
ltype = node.type.to_llvm(self.context)
if node.attr_name == 'data':
result = self.builder.bitcast(result, ltype)
return result
visit_ShapeAttributeNode = visit_ArrayAttributeNode
#------------------------------------------------------------------------
# Array Slicing
#------------------------------------------------------------------------
def declare(self, cbuilder_func):
func_def = self.context.cbuilder_library.declare(
cbuilder_func,
self.env,
self.llvm_module)
return func_def
def visit_NativeSliceNode(self, node):
"""
Slice an array. Allocate fake PyArray and allocate shape/strides
"""
llvmtype = lambda t: t.to_llvm()
shape_ltype = llvmtype(npy_intp.pointer())
# Create PyArrayObject accessors
view = self.visit(node.value)
view_accessor = ndarray_helpers.PyArrayAccessor(self.builder, view)
# TODO: change this attribute name to stack_allocate or something
if node.nopython:
# Stack-allocate array object
array_struct_ltype = llvmtype(float_[:]).pointee
view_copy = self.llvm_alloca(array_struct_ltype)
array_struct = self.builder.load(view)
self.builder.store(array_struct, view_copy)
view_copy_accessor = ndarray_helpers.PyArrayAccessor(self.builder,
view_copy)
else:
class NonMutatingPyArrayAccessor(object):
pass
view_copy_accessor = NonMutatingPyArrayAccessor()
# Stack-allocate shape/strides and update accessors
shape = self.alloca(node.shape_type)
strides = self.alloca(node.shape_type)
view_copy_accessor.data = view_accessor.data
view_copy_accessor.shape = self.builder.bitcast(shape, shape_ltype)
view_copy_accessor.strides = self.builder.bitcast(strides, shape_ltype)
# Patch and visit all children
for subslice in node.subslices:
subslice.view_accessor = view_accessor
subslice.view_copy_accessor = view_copy_accessor
# print ast.dump(node)
self.visitlist(node.subslices)
# Return fake or actual array
if node.nopython:
return view_copy
else:
# Update LLVMValueRefNode fields, build actual numpy array
void_p = void.pointer().to_llvm(self.context)
node.dst_data.llvm_value = self.builder.bitcast(
view_copy_accessor.data, void_p)
node.dst_shape.llvm_value = view_copy_accessor.shape
node.dst_strides.llvm_value = view_copy_accessor.strides
return self.visit(node.build_array_node)
def visit_SliceSliceNode(self, node):
"Handle slicing"
start, stop, step = node.start, node.stop, node.step
if start is not None:
start = self.visit(node.start)
if stop is not None:
stop = self.visit(node.stop)
if step is not None:
step = self.visit(node.step)
slice_func_def = sliceutils.SliceArray(self.context,
start is not None,
stop is not None,
step is not None)
slice_func = slice_func_def(self.llvm_module)
slice_func.linkage = llvm.core.LINKAGE_LINKONCE_ODR
data = node.view_copy_accessor.data
in_shape = node.view_accessor.shape
in_strides = node.view_accessor.strides
out_shape = node.view_copy_accessor.shape
out_strides = node.view_copy_accessor.strides
src_dim = llvm_types.constant_int(node.src_dim)
dst_dim = llvm_types.constant_int(node.dst_dim)
default = llvm_types.constant_int(0, C.npy_intp)
args = [data, in_shape, in_strides, out_shape, out_strides,
start or default, stop or default, step or default,
src_dim, dst_dim]
data_p = self.builder.call(slice_func, args)
node.view_copy_accessor.data = data_p
return None
def visit_SliceDimNode(self, node):
"Handle indexing and newaxes in a slice operation"
acc_copy = node.view_copy_accessor
acc = node.view_accessor
index_func = self.declare(sliceutils.IndexAxis)
newaxis_func = self.declare(sliceutils.NewAxis)
if node.type.is_int:
value = self.visit(nodes.CoercionNode(node.subslice, npy_intp))
args = [acc_copy.data, acc.shape, acc.strides,
llvm_types.constant_int(node.src_dim, C.npy_intp), value]
result = self.builder.call(index_func, args)
acc_copy.data = result
else:
args = [acc_copy.shape, acc_copy.strides,
llvm_types.constant_int(node.dst_dim)]
self.builder.call(newaxis_func, args)
return None
def visit_BroadcastNode(self, node):
shape = self.alloca(node.shape_type)
shape = self.builder.bitcast(shape, node.type.to_llvm(self.context))
# Initialize shape to ones
default_extent = llvm.core.Constant.int(C.npy_intp, 1)
for i in range(node.array_type.ndim):
dst = self.builder.gep(shape, [llvm.core.Constant.int(C.int, i)])
self.builder.store(default_extent, dst)
# Obtain broadcast function
func_def = self.declare(sliceutils.Broadcast)
# Broadcast all operands
for op in node.operands:
op_result = self.visit(op)
acc = ndarray_helpers.PyArrayAccessor(self.builder, op_result)
if op.type.is_array:
args = [shape, acc.shape, acc.strides,
llvm_types.constant_int(node.array_type.ndim),
llvm_types.constant_int(op.type.ndim)]
lresult = self.builder.call(func_def, args)
node.broadcast_retvals[op].llvm_value = lresult
# See if we had any errors broadcasting
self.visitlist(node.check_errors)
return shape
#------------------------------------------------------------------------
# Pointer Nodes
#------------------------------------------------------------------------
def visit_DereferenceNode(self, node):
result = self.visit(node.pointer)
return self.load_tbaa(result, node.type.pointer())
def visit_PointerFromObject(self, node):
result = self.visit(node.node)
return self.builder.bitcast(result, node.type.to_llvm())
#------------------------------------------------------------------------
# Constant Nodes
#------------------------------------------------------------------------
def visit_ConstNode(self, node):
type = node.type
ltype = type.to_llvm(self.context)
constant = node.pyval
if constnodes.is_null_constant(constant):
lvalue = llvm.core.Constant.null(ltype)
elif type.is_float:
lvalue = llvm.core.Constant.real(ltype, constant)
elif type.is_int:
if type.signed:
lvalue = llvm.core.Constant.int_signextend(ltype, constant)
else:
lvalue = llvm.core.Constant.int(ltype, constant)
elif type.is_string:
lvalue = self.env.constants_manager.get_string_constant(constant)
type_char_p = lts.pointer(lts.char)
lvalue = self.builder.bitcast(lvalue, type_char_p)
elif type.is_bool:
return self._bool_constants[constant]
elif type.is_function:
# lvalue = map_to_function(constant, type, self.mod)
raise NotImplementedError
elif type.is_object and not constnodes.is_null_constant(constant):
raise NotImplementedError("Use ObjectInjectNode")
else:
raise NotImplementedError("Constant %s of type %s" %
(constant, type))
return lvalue
#------------------------------------------------------------------------
# General Purpose Nodes
#------------------------------------------------------------------------
def visit_ExpressionNode(self, node):
self.visitlist(node.stmts)
return self.visit(node.expr)
def visit_LLVMValueRefNode(self, node):
assert node.llvm_value
return node.llvm_value
def visit_BadValue(self, node):
ltype = node.type.to_llvm(self.context)
node.llvm_value = llvm.core.Constant.undef(ltype)
return node.llvm_value
def visit_CloneNode(self, node):
return node.llvm_value
def visit_CloneableNode(self, node):
llvm_value = self.visit(node.node)
for clone_node in node.clone_nodes:
clone_node.llvm_value = llvm_value
return llvm_value
#------------------------------------------------------------------------
# User nodes
#------------------------------------------------------------------------
def visit_UserNode(self, node):
return node.codegen(self)
#
# Util
#
def add_cast_flag_unsigned(flags, lty, rty):
if lty.is_int:
flags['unsigned'] = not lty.signed
elif rty.is_int:
flags['unsigned'] = not rty.signed
########NEW FILE########
__FILENAME__ = config
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
class Config(object):
# Add color to printed source code
colour = True
# Terminal background colour ("light" or "dark")
terminal_background = "dark"
config = Config()
########NEW FILE########
__FILENAME__ = orderedcontainer
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba as nb
from numba import *
from numba import nodes
from numba import typesystem
from numba.typesystem import get_type
import numpy as np
GROW = 2
def notimplemented(msg):
raise NotImplementedError("'%s' method" % msg)
def container_methods(item_type, notimplemented):
# NOTE: numba will use the global 'notimplemented' function, not the
# one passed in :(
@item_type(Py_ssize_t) # TODO: slicing!
def getitem(self, key):
if not (0 <= key < self.size):
# TODO: Implement raise !
# raise IndexError(key)
[][key] # tee hee
return self.buf[key]
@void(Py_ssize_t, item_type) # TODO: slice assignment!
def setitem(self, key, value):
if not (0 <= key < self.size):
# TODO: Implement raise !
# raise IndexError(key)
[][key]
self.buf[key] = value
@void(item_type)
def append(self, value):
size = self.size
if size >= self.buf.shape[0]:
# NOTE: initial bufsize must be greater than zero
self.buf.resize(int(size * GROW), refcheck=False)
self.buf[size] = value
self.size = size + 1
@void(object_)
def extend(self, iterable):
# TODO: something fast for common cases (e.g. typedlist,
# np.ndarray, etc)
for obj in iterable:
self.append(obj)
@Py_ssize_t(item_type)
def index(self, value):
# TODO: comparison of complex numbers (#121)
buf = self.buf
for i in range(self.size):
if buf[i] == value:
return i
[].index(value) # raise ValueError
@Py_ssize_t(item_type)
def count(self, value):
# TODO: comparison of complex numbers (#121)
count = 0
buf = self.buf
for i in range(self.size):
# TODO: promotion of (bool_, int_)
if buf[i] == value:
count += 1
return count
return locals()
#-----------------------------------------------------------------------
# Infer types for typed containers (typedlist, typedtuple)
#-----------------------------------------------------------------------
def typedcontainer_infer(compile_typedcontainer, type_node, iterable_node):
"""
Type inferer for typed containers, register with numba.register_inferer().
:param compile_typedcontainer: item_type -> typed container extension class
:param type_node: type parameter to typed container constructor
:param iterable_node: value parameter to typed container constructor (optional)
"""
assert type_node is not None
type = get_type(type_node)
if type.is_cast:
elem_type = type.dst_type
# Pre-compile typed list implementation
typedcontainer_ctor = compile_typedcontainer(elem_type)
# Inject the typedlist directly to avoid runtime implementation lookup
iterable_node = iterable_node or nodes.const(None, object_)
result = nodes.call_pyfunc(typedcontainer_ctor, (iterable_node,))
return nodes.CoercionNode(result, typedcontainer_ctor.exttype)
return object_
########NEW FILE########
__FILENAME__ = register
from functools import partial
from numba.containers import typedlist
from numba.containers import typedtuple
from numba.containers import orderedcontainer
from numba.type_inference.module_type_inference import register_inferer
#-----------------------------------------------------------------------
# Register type function for typedlist construction
#-----------------------------------------------------------------------
def infer_tlist(type_node, iterable_node):
return orderedcontainer.typedcontainer_infer(
typedlist.compile_typedlist, type_node, iterable_node)
register_inferer(typedlist, 'typedlist', infer_tlist, pass_in_types=False)
#-----------------------------------------------------------------------
# Register type function for typedtuple construction
#-----------------------------------------------------------------------
def infer_ttuple(type_node, iterable_node):
return orderedcontainer.typedcontainer_infer(
typedtuple.compile_typedtuple, type_node, iterable_node)
register_inferer(typedtuple, 'typedtuple', infer_ttuple, pass_in_types=False)
########NEW FILE########
__FILENAME__ = test_typed_list
from numba import *
import numba as nb
from numba.testing.test_support import autojit_py3doc
@autojit
def index(type):
"""
>>> index(int_)
['[0, 1, 2]', '0', '1', '2']
>>> assert index(int_) == index.py_func(int_)
>>> index(float_)
['[0.0, 1.0, 2.0]', '0.0', '1.0', '2.0']
>>> assert index(float_) == index.py_func(float_)
>>> index(complex128)
['[0j, (1+0j), (2+0j)]', '0j', '(1+0j)', '(2+0j)']
>>> assert index(complex128) == index.py_func(complex128)
"""
tlist = nb.typedlist(type)
tlist.append(0)
tlist.append(1)
tlist.append(2)
return [str(tlist), str(tlist[0]), str(tlist[1]), str(tlist[2])]
@autojit
def index_error(type):
"""
>>> index_error(int_)
Traceback (most recent call last):
...
IndexError: list index out of range
>>> index_error(float_)
Traceback (most recent call last):
...
IndexError: list index out of range
"""
tlist = nb.typedlist(type)
tlist.append(0)
tlist.append(1)
tlist.append(2)
return tlist[4]
@autojit_py3doc
def append(type):
"""
>>> append(int_)
(0, 1, 2, 3)
"""
tlist = nb.typedlist(type)
l1 = len(tlist)
tlist.append(0)
l2 = len(tlist)
tlist.append(1)
l3 = len(tlist)
tlist.append(2)
l4 = len(tlist)
return l1, l2, l3, l4
@autojit_py3doc
def append_many(type):
"""
>>> append_many(int_)
1000
"""
tlist = nb.typedlist(type)
for i in range(1000):
tlist.append(i)
return len(tlist)
@autojit_py3doc
def pop(type):
"""
>>> pop(int_)
2
1
0
(3, 2, 1, 0)
"""
tlist = nb.typedlist(type)
for i in range(3):
tlist.append(i)
l1 = len(tlist)
print((tlist.pop()))
l2 = len(tlist)
print((tlist.pop()))
l3 = len(tlist)
print((tlist.pop()))
l4 = len(tlist)
return l1, l2, l3, l4
@autojit_py3doc
def pop_many(type):
"""
>>> pop_many(int_)
(1000, 0)
"""
tlist = nb.typedlist(type)
for i in range(1000):
tlist.append(i)
initial_length = len(tlist)
for i in range(1000):
tlist.pop()
return initial_length, len(tlist)
@autojit_py3doc
def from_iterable(type, iterable):
"""
>>> from_iterable(int_, [1, 2, 3])
[1, 2, 3]
>>> from_iterable(int_, (1, 2, 3))
[1, 2, 3]
>>> from_iterable(int_, (x for x in [1, 2, 3]))
[1, 2, 3]
>>> from_iterable(float_, [1, 2, 3])
[1.0, 2.0, 3.0]
>>> from_iterable(float_, (1, 2, 3))
[1.0, 2.0, 3.0]
>>> from_iterable(float_, (x for x in [1, 2, 3]))
[1.0, 2.0, 3.0]
>>> from_iterable(int_, [1, object(), 3])
Traceback (most recent call last):
...
TypeError: an integer is required
>>> from_iterable(int_, object())
Traceback (most recent call last):
...
TypeError: 'object' object is not iterable
"""
return nb.typedlist(type, iterable)
@autojit_py3doc
def test_insert(type):
"""
>>> test_insert(int_)
[0, 1, 2, 3, 4, 5]
"""
tlist = nb.typedlist(type, [1,3])
tlist.insert(0,0)
tlist.insert(2,2)
tlist.insert(4,4)
tlist.insert(8,5)
return tlist
@autojit_py3doc
def test_remove(type):
"""
>>> test_remove(int_)
4
3
2
[1, 3]
"""
tlist = nb.typedlist(type, range(5))
tlist.remove(0)
print (len(tlist))
tlist.remove(2)
print (len(tlist))
tlist.remove(4)
print (len(tlist))
return tlist
@autojit_py3doc
def test_count(type, L):
"""
>>> test_count(int_, [1, 2, 3, 4, 5, 1, 2])
(0, 1, 2)
"""
tlist = nb.typedlist(type, L)
return tlist.count(0), tlist.count(3), tlist.count(1)
@autojit_py3doc
def test_count_complex(type, L):
"""
>>> test_count_complex(complex128, [1+1j, 1+2j, 2+1j, 2+2j, 1+1j, 2+2j, 2+2j])
(1, 2, 3)
"""
tlist = nb.typedlist(type, L)
return tlist.count(1+2j), tlist.count(1+1j), tlist.count(2+2j)
@autojit_py3doc
def test_index(type):
"""
>>> test_index(int_)
(0, 2, 4)
"""
tlist = nb.typedlist(type, [5, 4, 3, 2, 1])
return tlist.index(5), tlist.index(3), tlist.index(1)
@autojit
def test_reverse(type, value):
"""
>>> test_reverse(int_, range(10))
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> test_reverse(int_, range(11))
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> test_reverse(float_, range(10))
[9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0]
>>> test_reverse(float_, range(11))
[10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0]
"""
tlist = nb.typedlist(type, value)
tlist.reverse()
return tlist
#@autojit
#def test_sort(type, value):
# """
# >>> test_sort(int_, range(5, 10) + range(5) + range(10, 15))
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
# """
# tlist = nb.typedlist(type, value)
# tlist.sort()
# return tlist
def test(module):
nb.testing.testmod(module)
if __name__ == "__main__":
import __main__ as module
else:
import test_typed_list as module
test(module)
__test__ = {}
########NEW FILE########
__FILENAME__ = test_typed_tuple
from numba import *
import numba as nb
@autojit
def test_count(type):
ttuple = nb.typedtuple(type, [1, 2, 3, 4, 5, 1, 2])
return ttuple.count(0), ttuple.count(3), ttuple.count(1)
def test(module):
assert test_count(int_) == (0, 1, 2)
if __name__ == "__main__":
import __main__ as module
else:
import test_typed_tuple as module
test(module)
__test__ = {}
########NEW FILE########
__FILENAME__ = typedlist
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba as nb
from numba import *
from numba.containers import orderedcontainer
import numpy as np
INITIAL_BUFSIZE = 10
SHRINK = 1.5
GROW = 2
def notimplemented(msg):
raise NotImplementedError("'%s' method of type 'typedlist'" % msg)
_list_cache = {}
#-----------------------------------------------------------------------
# Runtime Constructor
#-----------------------------------------------------------------------
def typedlist(item_type, iterable=None):
"""
>>> typedlist(int_)
[]
>>> tlist = typedlist(int_, range(10))
>>> tlist
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> tlist[5]
5L
>>> typedlist(float_, range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
"""
typedlist_ctor = compile_typedlist(item_type)
return typedlist_ctor(iterable)
#-----------------------------------------------------------------------
# Typedlist implementation
#-----------------------------------------------------------------------
def compile_typedlist(item_type, _list_cache=_list_cache):
# item_type_t = typesystem.CastType(item_type)
# dtype_t = typesystem.numpy_dtype(item_type)
if item_type in _list_cache:
return _list_cache[item_type]
dtype = item_type.get_dtype()
methods = orderedcontainer.container_methods(item_type, notimplemented)
@nb.jit(warn=False)
class typedlist(object):
@void(object_)
def __init__(self, iterable):
self.size = 0
# TODO: Use length hint of iterable for initial buffer size
self.buf = np.empty(INITIAL_BUFSIZE, dtype=dtype)
# TODO: implement 'is'/'is not'
if iterable != None:
self.extend(iterable)
# TODO: Jit __getitem__/__setitem__ of numba extension types
__getitem__ = methods['getitem']
__setitem__ = methods['setitem']
append = methods['append']
extend = methods['extend']
index = methods['index']
count = methods['count']
@item_type()
def pop(self):
# TODO: Optional argument 'index'
size = self.size - 1
if size<0:
[].pop()
item = self.buf[size]
self.size = size
if INITIAL_BUFSIZE < size < self.buf.shape[0] / 2:
self.buf.resize(int(SHRINK * size), refcheck=False)
return item
@void(Py_ssize_t, item_type)
def insert(self, index, value):
size = self.size
if size >= self.buf.shape[0]:
self.buf.resize(int(size * GROW), refcheck=False)
if index > size:
self.append(value)
else:
current = self.buf[index]
self.buf[index] = value
for i in range(index+1, size+1):
next = self.buf[i]
self.buf[i] = current
current = next
self.size = size + 1
@void(item_type)
def remove(self, value):
size = self.size
position = 0
found = False
if INITIAL_BUFSIZE < size < self.buf.shape[0]/2:
self.buf.resize(int(SHRINK * size), refcheck=False)
while position < size and not found:
if self.buf[position] == value:
found = True
else:
position += 1
if found:
for i in range(position, size):
self.buf[i] = self.buf[i+1]
self.size = size -1
# raise ValueError 'not in list'
@void()
def reverse(self):
buf = self.buf
size = self.size - 1
for i in range(self.size / 2):
tmp = buf[i]
buf[i] = buf[size - i]
buf[size - i] = tmp
@void()
def sort(self):
# TODO: optional arguments cmp, key, reverse
self.buf[:self.size].sort()
@Py_ssize_t()
def __len__(self):
return self.size
@nb.c_string_type()
def __repr__(self):
buf = ", ".join([str(self.buf[i]) for i in range(self.size)])
return "[" + buf + "]"
_list_cache[item_type] = typedlist
return typedlist
if __name__ == "__main__":
import doctest
doctest.testmod()
########NEW FILE########
__FILENAME__ = typedtuple
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from functools import partial
import numba as nb
from numba.containers import orderedcontainer
import numpy as np
INITIAL_BUFSIZE = 5
def notimplemented(msg):
raise NotImplementedError("'%s' method of type 'typedtuple'" % msg)
_tuple_cache = {}
#-----------------------------------------------------------------------
# Runtime Constructor
#-----------------------------------------------------------------------
def typedtuple(item_type, iterable=None, _tuple_cache=_tuple_cache):
"""
>>> typedtuple(nb.int_)
()
>>> ttuple = typedtuple(nb.int_, range(10))
>>> ttuple
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
>>> ttuple[5]
5L
>>> typedtuple(nb.float_, range(10))
(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
"""
typedtuple_ctor = compile_typedtuple(item_type)
return typedtuple_ctor(iterable)
#-----------------------------------------------------------------------
# Typedlist implementation
#-----------------------------------------------------------------------
def compile_typedtuple(item_type, _tuple_cache=_tuple_cache):
if item_type in _tuple_cache:
return _tuple_cache[item_type]
dtype = item_type.get_dtype()
methods = orderedcontainer.container_methods(item_type, notimplemented)
@nb.jit(warn=False)
class typedtuple(object):
@nb.void(nb.object_)
def __init__(self, iterable):
self.size = 0
# TODO: Use length hint of iterable for initial buffer size
self.buf = np.empty(INITIAL_BUFSIZE, dtype=dtype)
if iterable != None:
self.__extend(iterable)
__getitem__ = methods['getitem']
__append = methods['append']
index = methods['index']
count = methods['count']
@nb.void(nb.object_)
def __extend(self, iterable):
for obj in iterable:
self.__append(obj)
@nb.Py_ssize_t()
def __len__(self):
return self.size
@nb.c_string_type()
def __repr__(self):
buf = ", ".join([str(self.buf[i]) for i in range(self.size)])
return "(" + buf + ")"
_tuple_cache[item_type] = typedtuple
return typedtuple
if __name__ == "__main__":
import doctest
doctest.testmod()
########NEW FILE########
__FILENAME__ = block
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
########NEW FILE########
__FILENAME__ = cfstats
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import nodes
from numba.reporting import getpos
class StatementDescr(object):
is_assignment = False
class LoopDescr(object):
def __init__(self, next_block, loop_block):
self.next_block = next_block
self.loop_block = loop_block
self.exceptions = []
class ExceptionDescr(object):
"""Exception handling helper.
entry_point ControlBlock Exception handling entry point
finally_enter ControlBlock Normal finally clause entry point
finally_exit ControlBlock Normal finally clause exit point
"""
def __init__(self, entry_point, finally_enter=None, finally_exit=None):
self.entry_point = entry_point
self.finally_enter = finally_enter
self.finally_exit = finally_exit
class NameAssignment(object):
is_assignment = True
def __init__(self, lhs, rhs, entry, assignment_node, warn_unused=True):
if not hasattr(lhs, 'cf_state'):
lhs.cf_state = set()
if not hasattr(lhs, 'cf_is_null'):
lhs.cf_is_null = False
self.lhs = lhs
self.rhs = rhs
self.assignment_node = assignment_node
self.entry = entry
self.pos = getpos(lhs)
self.refs = set()
self.is_arg = False
self.is_deletion = False
# NOTE: this is imperfect, since it means warnings are disabled for
# *all* definitions in the function...
self.entry.warn_unused = warn_unused
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
def infer_type(self, scope):
return self.rhs.infer_type(scope)
def type_dependencies(self, scope):
return self.rhs.type_dependencies(scope)
class AttributeAssignment(object):
"""
Assignment to some attribute. We need to detect assignments in the
constructor of extension types.
"""
def __init__(self, assmnt):
self.assignment_node = assmnt
self.lhs = assmnt.targets[0]
self.rhs = assmnt.value
class Argument(NameAssignment):
def __init__(self, lhs, rhs, entry):
NameAssignment.__init__(self, lhs, rhs, entry)
self.is_arg = True
class PhiNode(nodes.Node):
def __init__(self, block, variable):
self.block = block
# Unrenamed variable. This will be replaced by the renamed version
self.variable = variable
self.type = None
# self.incoming_blocks = []
# Set of incoming variables
self.incoming = set()
self.phis = set()
self.assignment_node = self
@property
def entry(self):
return self.variable
def add_incoming_block(self, block):
self.incoming_blocks.append(block)
def add(self, block, assmnt):
if assmnt is not self:
self.phis.add((block, assmnt))
def __repr__(self):
lhs = self.variable.name
if self.variable.renamed_name:
lhs = self.variable.unmangled_name
incoming = ", ".join("var(%s, %s)" % (var_in.unmangled_name, var_in.type)
for var_in in self.incoming)
if self.variable.type:
type = str(self.variable.type)
else:
type = ""
return "%s %s = phi(%s)" % (type, lhs, incoming)
def find_incoming(self):
for parent_block in self.block.parents:
name = self.variable.name
incoming_var = parent_block.symtab.lookup_most_recent(name)
yield parent_block, incoming_var
class NameDeletion(NameAssignment):
def __init__(self, lhs, entry):
NameAssignment.__init__(self, lhs, lhs, entry)
self.is_deletion = True
class Uninitialized(object):
pass
class NameReference(object):
def __init__(self, node, entry):
if not hasattr(node, 'cf_state'):
node.cf_state = set()
self.node = node
self.entry = entry
self.pos = getpos(node)
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
########NEW FILE########
__FILENAME__ = control_flow
# -*- coding: utf-8 -*-
"""
Control flow for the AST backend.
Adapted from Cython/Compiler/FlowControl.py
"""
from __future__ import print_function, division, absolute_import
import re
import ast
import copy
from functools import reduce
from numba import error, visitors, symtab, nodes, reporting
from numba import *
from numba.control_flow import graphviz, reaching
from numba.control_flow.cfstats import *
from numba.control_flow.debug import *
class ControlBlock(nodes.LowLevelBasicBlockNode):
"""
Control flow graph node. Sequence of assignments and name references.
This is simultaneously an AST node.
children set of children nodes
parents set of parent nodes
positions set of position markers
stats list of block statements
gen dict of assignments generated by this block
bound set of entries that are definitely bounded in this block
Example:
a = 1
b = a + c # 'c' is already bounded or exception here
stats = [Assignment(a), NameReference(a), NameReference(c),
Assignment(b)]
gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)}
bound = set([Entry(a), Entry(c)])
"""
_fields = ['phi_nodes', 'body']
def __init__(self, id, label='empty', have_code=True,
is_expr=False, is_exit=False, pos=None,
is_fabricated=False):
if pos:
label = "%s_%s" % (label, error.format_pos(pos).rstrip(": "))
super(ControlBlock, self).__init__(body=[], label=label)
self.id = id
self.children = set()
self.parents = set()
self.positions = set()
self.stats = []
self.gen = {}
self.bound = set()
# Same as i_input/i_output but for reaching defs with sets
self.input = set()
self.output = set()
self.i_input = 0
self.i_output = 0
self.i_gen = 0
self.i_kill = 0
self.i_state = 0
self.is_expr = is_expr
self.is_exit = is_exit
self.have_code = have_code
# TODO: Make these bits
# Set of blocks that dominate this block
self.dominators = set()
# Set of blocks where our dominance stops
self.dominance_frontier = set()
# SSA ฮฆ locations. Maps Variables to a list of (basic_block, definition)
# There can be only one reaching definition, since each variable is
# assigned only once
self.phis = {}
self.phi_nodes = []
# Promotions at the end of the block to have a consistent promoted
# ฮฆ type at one of our children.
self.promotions = {} # (renamed_var_name, dst_type) -> promotion_node
# LLVM entry and exit blocks. The entry block is the block before the
# body is evaluated, the exit block the block after the body is
# evaluated.
self.exit_block = None
self.phi_block = None
self.exit_block = None
self.promotions = set()
self.symtab = None
self.is_fabricated = is_fabricated
# If set to True, branch from the previous basic block to this basic
# block
self.branch_here = False
def empty(self):
return (not self.stats and not self.positions and not self.phis)
def detach(self):
"""Detach block from parents and children."""
for child in self.children:
child.parents.remove(self)
for parent in self.parents:
parent.children.remove(self)
self.parents.clear()
self.children.clear()
def add_child(self, block):
self.children.add(block)
block.parents.add(self)
def reparent(self, new_block):
"""
Re-parent all children to the new block
"""
for child in self.children:
child.parents.remove(self)
new_block.add_child(child)
def delete(self, flow):
"""
Delete a block from the cfg.
"""
for parent in self.parents:
parent.children.remove(self)
for child in self.children:
child.parents.remove(self)
flow.blocks.remove(self)
def __repr__(self):
return 'Block(%d)' % self.id
def __getattr__(self, attr):
if attr in ('variable', 'type', 'ctx'):
return getattr(self.body[0], attr)
raise AttributeError
def __setattr__(self, attr, value):
if attr in ('variable', 'type'):
setattr(self.body[0], attr, value)
else:
super(ControlBlock, self).__setattr__(attr, value)
class ExitBlock(ControlBlock):
"""Non-empty exit point block."""
def empty(self):
return False
class AssignmentList:
def __init__(self):
self.stats = []
class ControlFlow(object):
"""
Control-flow graph.
entry_point ControlBlock entry point for this graph
exit_point ControlBlock normal exit point
block ControlBlock current block
blocks set children nodes
entries set tracked entries
loops list stack for loop descriptors
exceptions list stack for exception descriptors
"""
def __init__(self, env, source_descr):
self.env = env
self.source_descr = source_descr
self.blocks = []
self.entries = set()
self.loops = []
self.exceptions = []
self.entry_point = ControlBlock(-1, label='entry')
self.exit_point = ExitBlock(0, label='exit')
self.block = self.entry_point
def newblock(self, parent=None, **kwargs):
"""
Create floating block linked to `parent` if given.
Does NOT set the current block to the new block.
"""
block = ControlBlock(len(self.blocks), **kwargs)
self.blocks.append(block)
if parent:
parent.add_child(block)
return block
def nextblock(self, parent=None, **kwargs):
"""
Create child block linked to current or `parent` if given.
Sets the current block to the new block.
"""
block = self.newblock(parent, **kwargs)
if not parent and self.block:
self.block.add_child(block)
self.block = block
return block
def exit_block(self, parent=None, **kwargs):
"""
Create a floating exit block. This can later be added to self.blocks.
This is useful to ensure topological order.
"""
block = self.newblock(parent, have_code=False, is_exit=True, **kwargs)
self.blocks.pop()
return block
def add_exit(self, exit_block):
"Add an exit block after visiting the body"
exit_block.id = len(self.blocks)
self.blocks.append(exit_block)
def is_listcomp_var(self, name):
return re.match(r"_\[\d+\]", name)
def is_tracked(self, entry):
return (# entry.renameable and not
entry.name not in self.env.translation.crnt.locals and not
self.is_listcomp_var(entry.name))
def mark_position(self, node):
"""Mark position, will be used to draw graph nodes."""
if self.block:
src_descr = self.source_descr
pos = (src_descr,) + getpos(node)
self.block.positions.add(pos)
def mark_assignment(self, lhs, rhs, entry, assignment, warn_unused=True):
if self.block:
if not self.is_tracked(entry):
return
assignment = NameAssignment(lhs, rhs, entry, assignment,
warn_unused=warn_unused)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
return assignment
def mark_argument(self, lhs, rhs, entry):
if self.block and self.is_tracked(entry):
assignment = Argument(lhs, rhs, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
def mark_deletion(self, node, entry):
if self.block and self.is_tracked(entry):
assignment = NameDeletion(node, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = Uninitialized
self.entries.add(entry)
def mark_reference(self, node, entry):
if self.block and self.is_tracked(entry):
self.block.stats.append(NameReference(node, entry))
# Local variable is definitely bound after this reference
if not reaching.allow_null(node):
self.block.bound.add(entry)
self.entries.add(entry)
def normalize(self):
"""Delete unreachable and orphan blocks."""
blocks = set(self.blocks)
queue = set([self.entry_point])
visited = set()
while queue:
root = queue.pop()
visited.add(root)
for child in root.children:
if child not in visited:
queue.add(child)
unreachable = blocks - visited
for block in unreachable:
block.detach()
visited.remove(self.entry_point)
for block in visited:
if block.empty():
for parent in block.parents: # Re-parent
for child in block.children:
parent.add_child(child)
block.detach()
unreachable.add(block)
blocks -= unreachable
self.blocks = [block for block in self.blocks if block in blocks]
def initialize(self):
"""Set initial state, map assignments to bits."""
self.assmts = {}
offset = 0
for entry in self.entries:
assmts = AssignmentList()
assmts.bit = 1 << offset
assmts.mask = assmts.bit
self.assmts[entry] = assmts
offset += 1
for block in self.blocks:
block.stats = block.phis.values() + block.stats
for stat in block.stats:
if isinstance(stat, (PhiNode, NameAssignment)):
stat.bit = 1 << offset
assmts = self.assmts[stat.entry]
assmts.stats.append(stat)
assmts.mask |= stat.bit
offset += 1
for block in self.blocks:
for entry, stat in block.gen.items():
assmts = self.assmts[entry]
if stat is Uninitialized:
block.i_gen |= assmts.bit
else:
block.i_gen |= stat.bit
block.i_kill |= assmts.mask
block.i_output = block.i_gen
for entry in block.bound:
block.i_kill |= self.assmts[entry].bit
for assmts in self.assmts.itervalues():
self.entry_point.i_gen |= assmts.bit
self.entry_point.i_output = self.entry_point.i_gen
def map_one(self, istate, entry):
"Map the bitstate of a variable to the definitions it represents"
ret = set()
assmts = self.assmts[entry]
if istate & assmts.bit:
ret.add(Uninitialized)
for assmt in assmts.stats:
if istate & assmt.bit:
ret.add(assmt)
return ret
def reaching_definitions(self):
"""Per-block reaching definitions analysis."""
dirty = True
while dirty:
dirty = False
for block in self.blocks:
i_input = 0
for parent in block.parents:
i_input |= parent.i_output
i_output = (i_input & ~block.i_kill) | block.i_gen
if i_output != block.i_output:
dirty = True
block.i_input = i_input
block.i_output = i_output
def initialize_sets(self):
"""
Set initial state, run after SSA. There is only ever one live
definition of a variable in a block, so we can simply track input
and output definitions as the Variable/Entry they came as.
"""
for block in self.blocks:
# Insert phi nodes from SSA stage into the assignments of the block
for phi in block.phis:
block.gen.setdefault(phi, []).insert(0, phi)
# Update the kill set with the variables that are assigned to in
# the block
block.kill = set(block.gen)
block.output = set(block.gen)
#for entry in block.bound:
# block.i_kill |= self.assmts[entry].bit
for assmts in self.assmts.itervalues():
self.entry_point.i_gen |= assmts.bit
self.entry_point.i_output = self.entry_point.i_gen
def compute_dominators(self):
"""
Compute the dominators for the CFG, i.e. for each basic block the
set of basic blocks that dominate that block. This mean from the
entry block to that block must go through the blocks in the dominator
set.
dominators(x) = {x} โช (โฉ dominators(y) for y โ preds(x))
"""
blocks = set(self.blocks)
for block in self.blocks:
block.dominators = blocks
changed = True
while changed:
changed = False
for block in self.blocks:
parent_dominators = [parent.dominators for parent in block.parents]
new_doms = set.intersection(block.dominators, *parent_dominators)
new_doms.add(block)
if new_doms != block.dominators:
block.dominators = new_doms
changed = True
def immediate_dominator(self, x):
"""
The dominator of x that is dominated by all other dominators of x.
This is the block that has the largest dominator set.
"""
candidates = x.dominators - set([x])
if not candidates:
return None
result = max(candidates, key=lambda b: len(b.dominators))
ndoms = len(result.dominators)
assert len([b for b in candidates if len(b.dominators) == ndoms]) == 1
return result
def compute_dominance_frontier(self):
"""
Compute the dominance frontier for all blocks. This indicates for
each block where dominance stops in the CFG. We use this as the place
to insert ฮฆ functions, since at the dominance frontier there are
multiple control flow paths to the block, which means multiple
variable definitions can reach there.
"""
if debug:
print("Dominator sets:")
for block in self.blocks:
print((block.id, sorted(block.dominators, key=lambda b: b.id)))
blocks = []
for block in self.blocks:
if block.parents:
block.idom = self.immediate_dominator(block)
block.visited = False
blocks.append(block)
self.blocks = blocks
def visit(block, result):
block.visited = True
for child in block.children:
if not child.visited:
visit(child, result)
result.append(block)
#postorder = []
#visit(self.blocks[0], postorder)
postorder = self.blocks[::-1]
# Compute dominance frontier
for x in postorder:
for y in x.children:
if y.idom is not x:
# We are not an immediate dominator of our successor, add
# to frontier
x.dominance_frontier.add(y)
for z in self.blocks:
if z.idom is x:
for y in z.dominance_frontier:
if y.idom is not x:
x.dominance_frontier.add(y)
def update_for_ssa(self, ast, symbol_table):
"""
1) Compute phi nodes
for each variable v
1) insert empty phi nodes in dominance frontier of each block
that defines v
2) this phi defines a new assignment in each block in which
it is inserted, so propagate (recursively)
2) Reaching definitions
Set block-local symbol table for each block.
This is a rudimentary form of reaching definitions, but we can
do it in a single pass because all assignments are known (since
we inserted the phi functions, which also count as assignments).
This means the output set is known up front for each block
and never changes. After setting all output sets, we can compute
the input sets in a single pass:
1) compute output sets for each block
2) compute input sets for each block
3) Update phis with incoming variables. The incoming variables are
last assignments of the predecessor blocks in the CFG.
"""
# Print dominance frontier
if debug:
print("Dominance frontier:")
for block in self.blocks:
print(('DF(%d) = %s' % (block.id, block.dominance_frontier)))
argnames = [name.id for name in ast.args.args]
#
### 1) Insert phi nodes in the right places
#
for name, variable in symbol_table.iteritems():
if not variable.renameable:
continue
defining = []
for b in self.blocks:
if variable in b.gen:
defining.append(b)
for defining_block in defining:
for f in defining_block.dominance_frontier:
phi = f.phis.get(variable, None)
if phi is None:
phi = PhiNode(f, variable)
f.phis[variable] = phi
defining.append(f)
#
### 2) Reaching definitions and variable renaming
#
# Set originating block for each variable (as if each variable were
# initialized at the start of the function) and start renaming of
# variables
symbol_table.counters = dict.fromkeys(symbol_table, -1) # var_name -> counter
self.blocks[0].symtab = symbol_table
for var_name, var in symbol_table.items():
if var.renameable:
new_var = symbol_table.rename(var, self.blocks[0])
new_var.uninitialized = var.name not in argnames
self.rename_assignments(self.blocks[0])
for block in self.blocks[1:]:
block.symtab = symtab.Symtab(parent=block.idom.symtab)
for var, phi_node in block.phis.iteritems():
phi_node.variable = block.symtab.rename(var, block)
phi_node.variable.name_assignment = phi_node
phi_node.variable.is_phi = True
self.rename_assignments(block)
#
### 3) Update the phis with all incoming entries
#
for block in self.blocks:
# Insert phis in AST
block.phi_nodes = block.phis.values()
for variable, phi in block.phis.iteritems():
for parent in block.parents:
incoming_var = parent.symtab.lookup_most_recent(variable.name)
phi.incoming.add(incoming_var)
phi.variable.uninitialized |= incoming_var.uninitialized
# Update def-use chain
incoming_var.cf_references.append(phi)
def rename_assignments(self, block):
lastvars = dict(block.symtab)
for stat in block.stats:
if (isinstance(stat, NameAssignment) and
stat.assignment_node and
stat.entry.renameable):
# print "setting", stat.lhs, hex(id(stat.lhs))
stat.lhs.variable = block.symtab.rename(stat.entry, block)
stat.lhs.variable.name_assignment = stat
elif isinstance(stat, NameReference) and stat.entry.renameable:
current_var = block.symtab.lookup_most_recent(stat.entry.name)
stat.node.variable = current_var
current_var.cf_references.append(stat.node)
class FuncDefExprNode(nodes.Node):
"""
Wraps an inner function node until the closure code kicks in.
"""
_fields = ['func_def']
class ControlFlowAnalysis(visitors.NumbaTransformer):
"""
Control flow analysis pass that builds the CFG and injects the blocks
into the AST (but not all blocks are injected).
The CFG must be build in topological DFS order, e.g. the 'if' condition
block must precede the clauses and the clauses must precede the exit.
"""
graphviz = False
gv_ctx = None
source_descr = None
function_level = 0
def __init__(self, context, func, ast, allow_rebind_args, env, **kwargs):
super(ControlFlowAnalysis, self).__init__(context, func, ast, env=env,
**kwargs)
self.visitchildren = self.generic_visit
self.current_directives = kwargs.get('directives', None) or {}
self.current_directives['warn'] = kwargs.get('warn', True)
self.set_default_directives()
self.symtab = self.initialize_symtab(allow_rebind_args)
self.graphviz = self.current_directives['control_flow.dot_output']
if self.graphviz:
self.gv_ctx = graphviz.GVContext()
self.source_descr = reporting.SourceDescr(func, ast)
# Stack of control flow blocks
self.stack = []
flow = ControlFlow(self.env, self.source_descr)
self.env.translation.crnt.flow = flow
self.flow = flow
# TODO: Use the message collection from the environment
# messages = reporting.MessageCollection()
messages = env.crnt.error_env.collection
self.warner = reaching.CFWarner(messages, self.current_directives)
if env:
if hasattr(env, 'translation'):
env.translation.crnt.cfg_transform = self
def set_default_directives(self):
"Set some defaults for warnings"
warn = self.current_directives['warn']
self.current_directives.setdefault('warn.maybe_uninitialized', warn)
self.current_directives.setdefault('warn.unused_result', False)
self.current_directives.setdefault('warn.unused', warn)
self.current_directives.setdefault('warn.unused_arg', warn)
self.current_directives.setdefault('control_flow.dot_output', dot_output_graph)
self.current_directives.setdefault('control_flow.dot_annotate_defs', False)
def initialize_symtab(self, allow_rebind_args):
"""
Populate the symbol table with variables and set their renaming status.
Variables appearing in locals, or arguments typed through the 'jit'
decorator are not renameable.
"""
symbols = symtab.Symtab(self.symtab)
for var_name in self.local_names:
variable = symtab.Variable(None, name=var_name, is_local=True)
# Set cellvar status. Free variables are not assignments, and
# are caught in the type inferencer
variable.is_cellvar = var_name in self.cellvars
# variable.is_freevar = var_name in self.freevars
variable.renameable = (
var_name not in self.locals and not
(variable.is_cellvar or variable.is_freevar) and
(var_name not in self.argnames or allow_rebind_args))
symbols[var_name] = variable
return symbols
def visit(self, node):
if hasattr(node, 'lineno'):
self.mark_position(node)
if not self.flow.block:
# Unreachable code
# NOTE: removing this here means there is no validation of the
# unreachable code!
self.warner.warn_unreachable(node)
return None
return super(ControlFlowAnalysis, self).visit(node)
def handle_inner_function(self, node):
"Create assignment code for inner functions and mark the assignment"
lhs = ast.Name(node.name, ast.Store())
ast.copy_location(lhs, node)
rhs = FuncDefExprNode(func_def=node)
ast.copy_location(rhs, node)
fields = rhs._fields
rhs._fields = []
assmnt = ast.Assign(targets=[lhs], value=rhs)
result = self.visit(assmnt)
rhs._fields = fields
return result
def visit_FunctionDef(self, node):
#for arg in node.args:
# if arg.default:
# self.visitchildren(arg)
if self.function_level:
return self.handle_inner_function(node)
self.function_level += 1
self.visitlist(node.decorator_list)
self.stack.append(self.flow)
# Collect all entries
for var_name, var in self.symtab.iteritems():
if var_name not in self.locals:
self.flow.entries.add(var)
self.flow.nextblock(label='entry')
self.mark_position(node)
# Function body block
node.body_block = self.flow.nextblock()
for arg in node.args.args:
if hasattr(arg, 'id') and hasattr(arg, 'ctx'):
self.visit_Name(arg)
else:
self.visit_arg(arg, node.lineno, 0)
self.visitlist(node.body)
self.function_level -= 1
# Exit point
self.flow.add_exit(self.flow.exit_point)
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
# Cleanup graph
# self.flow.normalize()
reaching.check_definitions(self.env, self.flow, self.warner)
# self.render_gv(node)
self.flow.compute_dominators()
self.flow.compute_dominance_frontier()
self.flow.update_for_ssa(self.ast, self.symtab)
return node
def render_gv(self, node):
graphviz.render_gv(node, self.gv_ctx, self.flow, self.current_directives)
def mark_assignment(self, lhs, rhs=None, assignment=None, warn_unused=True):
assert self.flow.block
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
if not rhs:
rhs = None
lhs = self.visit(lhs)
name_assignment = None
if isinstance(lhs, ast.Name):
name_assignment = self.flow.mark_assignment(
lhs, rhs, self.symtab[lhs.name], assignment,
warn_unused=warn_unused)
# TODO: Generate fake RHS for for iteration target variable
elif (isinstance(lhs, (ast.Attribute, nodes.TempStoreNode)) and
self.flow.block and assignment is not None):
self.flow.block.stats.append(AttributeAssignment(assignment))
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
return lhs, name_assignment
def mark_position(self, node):
"""Mark position if DOT output is enabled."""
if self.current_directives['control_flow.dot_output']:
self.flow.mark_position(node)
def visit_Assign(self, node):
node.value = self.visit(node.value)
if len(node.targets) == 1 and isinstance(node.targets[0],
(ast.Tuple, ast.List)):
node.targets = node.targets[0].elts
for i, target in enumerate(node.targets):
# target = self.visit(target)
maybe_unused_node = isinstance(target, nodes.MaybeUnusedNode)
if maybe_unused_node:
target = target.name_node
lhs, name_assignment = self.mark_assignment(target, node.value,
assignment=node,
warn_unused=not maybe_unused_node)
node.targets[i] = lhs
# print "mark assignment", self.flow.block, lhs
return node
def visit_AugAssign(self, node):
"""
Inplace assignment.
Resolve a += b to a = a + b. Set 'inplace_op' attribute of the
Assign node so later stages may recognize inplace assignment.
Do this now, so that we can correctly mark the RHS reference.
"""
target = node.target
rhs_target = copy.deepcopy(target)
rhs_target.ctx = ast.Load()
ast.fix_missing_locations(rhs_target)
bin_op = ast.BinOp(rhs_target, node.op, node.value)
assignment = ast.Assign([target], bin_op)
assignment.inplace_op = node.op
return self.visit(assignment)
def visit_arg(self, old_node, lineno, col_offset):
node = nodes.Name(old_node.arg, ast.Param())
node.lineno = lineno
node.col_offset = col_offset
return self._visit_Name(node)
def visit_Name(self, old_node):
node = nodes.Name(old_node.id, old_node.ctx)
ast.copy_location(node, old_node)
return self._visit_Name(node)
def _visit_Name(self, node):
# Set some defaults
node.cf_maybe_null = True
node.cf_is_null = False
node.allow_null = False
node.name = node.id
if isinstance(node.ctx, ast.Param):
var = self.symtab[node.name]
var.is_arg = True
self.flow.mark_assignment(node, None, var, assignment=None)
elif isinstance(node.ctx, ast.Load):
var = self.symtab.lookup(node.name)
if var:
# Local variable
self.flow.mark_reference(node, var)
# Set position of assignment of this definition
if isinstance(node.ctx, (ast.Param, ast.Store)):
var = self.symtab[node.name]
if var.lineno == -1:
var.lineno = getattr(node, "lineno", 0)
var.col_offset = getattr(node, "col_offset", 0)
return node
def visit_MaybeUnusedNode(self, node):
self.symtab[node.name_node.id].warn_unused = False
return self.visit(node.name_node)
def visit_Suite(self, node):
if self.flow.block:
for i, stat in enumerate(node.body):
node.body[i] = self.visit(stat)
if not self.flow.block:
stat.is_terminator = True
break
return node
def visit_ImportFrom(self, node):
for name, target in node.names:
if name != "*":
self.mark_assignment(target, assignment=node)
self.visitchildren(node)
return node
def exit_block(self, exit_block, node):
node.exit_block = exit_block
self.flow.add_exit(exit_block)
if exit_block.parents:
self.flow.block = exit_block
else:
self.flow.block = None
return node
def visit_If(self, node):
exit_block = self.flow.exit_block(label='exit_if', pos=node)
# Condition
cond_block = self.flow.nextblock(self.flow.block, label='if_cond',
is_expr=True, pos=node.test)
node.test = self.visit(node.test)
# Body
if_block = self.flow.nextblock(label='if_body', pos=node.body[0])
self.visitlist(node.body)
if self.flow.block:
self.flow.block.add_child(exit_block)
# Else clause
if node.orelse:
else_block = self.flow.nextblock(cond_block,
label='else_body',
pos=node.orelse[0])
self.visitlist(node.orelse)
if self.flow.block:
self.flow.block.add_child(exit_block)
else:
cond_block.add_child(exit_block)
else_block = None
new_node = nodes.build_if(cond_block=cond_block, test=node.test,
if_block=if_block, body=node.body,
else_block=else_block, orelse=node.orelse,
exit_block=exit_block)
ast.copy_location(new_node, node)
return self.exit_block(exit_block, new_node)
def _visit_loop_body(self, node, if_block=None, is_for=None):
"""
Visit body of while and for loops and handle 'else' clause
"""
loop_name = "for" if is_for else "while"
if if_block:
node.if_block = if_block
else:
node.if_block = self.flow.nextblock(label="%s_body" % loop_name,
pos=node.body[0])
self.visitlist(node.body)
self.flow.loops.pop()
if self.flow.block:
# Add back-edge
self.flow.block.add_child(node.cond_block)
# Else clause
if node.orelse:
node.else_block = self.flow.nextblock(
parent=node.cond_block,
label="else_clause_%s" % loop_name,
pos=node.orelse[0])
self.visitlist(node.orelse)
if self.flow.block:
self.flow.block.add_child(node.exit_block)
else:
node.cond_block.add_child(node.exit_block)
self.exit_block(node.exit_block, node)
def visit_While(self, node):
node.cond_block = self.flow.nextblock(label='while_condition',
pos=node.test)
node.exit_block = self.flow.exit_block(label='exit_while', pos=node)
# Condition block
self.flow.loops.append(LoopDescr(node.exit_block, node.cond_block))
node.test = self.visit(node.test)
self._visit_loop_body(node)
return ast.copy_location(nodes.build_while(**vars(node)), node)
def visit_For(self, node):
# Evaluate iterator in previous block
node.iter = self.visit(node.iter)
# Start condition block
node.cond_block = self.flow.nextblock(label='for_condition',
pos=node.iter)
node.exit_block = self.flow.exit_block(label='exit_for', pos=node)
self.flow.loops.append(LoopDescr(node.exit_block, node.cond_block))
# Target assignment
if_block = self.flow.nextblock(label='loop_body', pos=node.body[0])
#node.target_block = self.flow.nextblock(label='for_target',
# pos=node.target)
node.target, name_assignment = self.mark_assignment(
node.target, assignment=None, warn_unused=False)
self._visit_loop_body(node, if_block=if_block, is_for=True)
node = ast.copy_location(nodes.For(**vars(node)), node)
if name_assignment:
name_assignment.assignment_node = node
return node
def visit_ListComp(self, node):
"""
Rewrite list comprehensions to the equivalent for loops.
AST syntax:
ListComp(expr elt, comprehension* generators)
comprehension = (expr target, expr iter, expr* ifs)
'ifs' represent a chain of ANDs
"""
assert len(node.generators) > 0
# Create innermost body, i.e. list.append(expr)
# TODO: size hint for PyList_New
list_create = ast.List(elts=[], ctx=ast.Load())
list_create.type = object_ # typesystem.list_()
list_create = nodes.CloneableNode(list_create)
list_value = nodes.CloneNode(list_create)
list_append = ast.Attribute(list_value, "append", ast.Load())
append_call = ast.Call(func=list_append, args=[node.elt],
keywords=[], starargs=None, kwargs=None)
# Build up the loops from inwards to outwards
body = append_call
for comprehension in reversed(node.generators):
# Hanlde the 'if' clause
ifs = comprehension.ifs
if len(ifs) > 1:
make_boolop = lambda op1_op2: ast.BoolOp(op=ast.And(),
values=op1_op2)
if_test = reduce(make_boolop, ifs)
elif len(ifs) == 1:
if_test, = ifs
else:
if_test = None
if if_test is not None:
body = ast.If(test=if_test, body=[body], orelse=[])
# Wrap list.append() call or inner loops
body = ast.For(target=comprehension.target,
iter=comprehension.iter, body=[body], orelse=[])
expr = nodes.ExpressionNode(stmts=[list_create, body], expr=list_value)
return self.visit(expr)
def visit_GeneratorExp(self, node):
raise error.NumbaError(
node, "Generator comprehensions are not yet supported")
def visit_SetComp(self, node):
raise error.NumbaError(
node, "Set comprehensions are not yet supported")
def visit_DictComp(self, node):
raise error.NumbaError(
node, "Dict comprehensions are not yet supported")
def visit_With(self, node):
node.context_expr = self.visit(node.context_expr)
if node.optional_vars:
# TODO: Mark these as assignments!
# Note: This is current caught in validators.py !
node.optional_vars = self.visit(node.optional_vars)
self.visitlist(node.body)
return node
def visit_Raise(self, node):
self.visitchildren(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
# self.flow.block = None
return node
def visit_Return(self, node):
self.visitchildren(node)
for exception in self.flow.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(self.flow.exit_point)
break
else:
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
self.flow.block = None
return node
def visit_Break(self, node):
if not self.flow.loops:
#error(node.pos, "break statement not inside loop")
return node
loop = self.flow.loops[-1]
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.next_block)
break
else:
self.flow.block.add_child(loop.next_block)
#self.flow.nextblock(parent=loop.next_block)
self.flow.block = None
return node
def visit_Continue(self, node):
if not self.flow.loops:
#error(node.pos, "continue statement not inside loop")
return node
loop = self.flow.loops[-1]
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.loop_block)
break
else:
self.flow.block.add_child(loop.loop_block)
self.flow.block = None
return node
def visit_Print(self, node):
self.generic_visit(node)
return node
########NEW FILE########
__FILENAME__ = debug
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import logging
debug = False
#debug = True
logger = logging.getLogger(__name__)
if debug:
logger.setLevel(logging.DEBUG)
debug_cfg = False
#debug_cfg = True
if debug_cfg:
dot_output_graph = os.path.expanduser("~/cfg.dot")
else:
dot_output_graph = False
########NEW FILE########
__FILENAME__ = delete_cfnode
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
from numba import visitors
class DeleteStatement(visitors.NumbaVisitor):
"""
Delete a (compound) statement that contains basic blocks.
The statement must be at the start of the entry block.
idom: the immediate dominator of
"""
def __init__(self, flow):
self.flow = flow
def visit_If(self, node):
self.generic_visit(node)
# Visit ControlBlocks
self.visit(node.cond_block)
self.visit(node.if_block)
if node.orelse:
self.visit(node.else_block)
if node.exit_block:
self.visit(node.exit_block)
visit_While = visit_If
def visit_For(self, node):
self.generic_visit(node)
# Visit ControlBlocks
self.visit(node.cond_block)
self.visit(node.if_block)
if node.orelse:
self.visit(node.else_block)
if node.exit_block:
self.visit(node.exit_block)
def visit_ControlBlock(self, node):
#print "deleting block", node
for phi in node.phi_nodes:
for incoming in phi.incoming:
#print "deleting", incoming, phi
incoming.cf_references.remove(phi)
self.generic_visit(node)
node.delete(self.flow)
def visit_Name(self, node):
references = node.variable.cf_references
if isinstance(node.ctx, ast.Load) and node in references:
references.remove(node)
########NEW FILE########
__FILENAME__ = graphviz
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import subprocess
from numba.control_flow.debug import *
from numba.control_flow.cfstats import NameReference, NameAssignment
class GVContext(object):
"""Graphviz subgraph object."""
def __init__(self):
self.blockids = {}
self.nextid = 0
self.children = []
self.sources = {}
def add(self, child):
self.children.append(child)
def nodeid(self, block):
if block not in self.blockids:
self.blockids[block] = 'block%d' % self.nextid
self.nextid += 1
return self.blockids[block]
def extract_sources(self, block):
if not block.positions:
return ''
start = min(block.positions)
stop = max(block.positions)
srcdescr = start[0]
if not srcdescr in self.sources:
self.sources[srcdescr] = list(srcdescr.get_lines())
lines = self.sources[srcdescr]
src_descr, begin_line, begin_col = start
src_descr, end_line, end_col = stop
lines = lines[begin_line - 1:end_line]
if not lines:
return ''
#lines[0] = lines[0][begin_col:]
#lines[-1] = lines[-1][:end_col]
return '\\n'.join([line.strip() for line in lines if line.strip()])
def render(self, fp, name, annotate_defs=False):
"""Render graphviz dot graph"""
fp.write('digraph %s {\n' % name)
fp.write(' node [shape=box];\n')
for child in self.children:
child.render(fp, self, annotate_defs)
fp.write('}\n')
def escape(self, text):
return text.replace('"', '\\"').replace('\n', '\\n')
class GV(object):
"""
Graphviz DOT renderer.
"""
def __init__(self, name, flow):
self.name = name
self.flow = flow
def format_phis(self, block):
result = "\\l".join(str(phi) for var, phi in block.phis.iteritems())
return result
def render(self, fp, ctx, annotate_defs=False):
fp.write(' subgraph %s {\n' % self.name)
for block in self.flow.blocks:
if block.have_code:
code = ctx.extract_sources(block)
if annotate_defs:
for stat in block.stats:
if isinstance(stat, NameAssignment):
code += '\n %s [definition]' % stat.entry.name
elif isinstance(stat, NameReference):
if stat.entry:
code += '\n %s [reference]' % stat.entry.name
else:
code = ""
if block.have_code and block.label == 'empty':
label = ''
else:
label = '%s: ' % block.label
phis = self.format_phis(block)
label = '%d\\l%s%s\\n%s' % (block.id, label, phis, code)
pid = ctx.nodeid(block)
fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label)))
for block in self.flow.blocks:
pid = ctx.nodeid(block)
for child in block.children:
fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child)))
fp.write(' }\n')
#----------------------------------------------------------------------------
# Graphviz Rendering
#----------------------------------------------------------------------------
def get_png_output_name(dot_output):
prefix, ext = os.path.splitext(dot_output)
i = 0
while True:
png_output = "%s%d.png" % (prefix, i)
if not os.path.exists(png_output):
break
i += 1
return png_output
def write_dotfile(current_directives, dot_output, gv_ctx):
annotate_defs = current_directives['control_flow.dot_annotate_defs']
fp = open(dot_output, 'wt')
try:
gv_ctx.render(fp, 'module', annotate_defs=annotate_defs)
finally:
fp.close()
def write_image(dot_output):
png_output = get_png_output_name(dot_output)
fp = open(png_output, 'wb')
try:
p = subprocess.Popen(['dot', '-Tpng', dot_output],
stdout=fp.fileno(),
stderr=subprocess.PIPE)
p.wait()
except EnvironmentError as e:
logger.warn("Unable to write png: %s (did you install the "
"'dot' program?). Wrote %s" % (e, dot_output))
else:
logger.warn("Wrote %s" % png_output)
finally:
fp.close()
def render_gv(node, gv_ctx, flow, current_directives):
gv_ctx.add(GV(node.name, flow))
dot_output = current_directives['control_flow.dot_output']
if dot_output:
write_dotfile(current_directives, dot_output, gv_ctx)
write_image(dot_output)
########NEW FILE########
__FILENAME__ = reaching
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import traits
from numba.control_flow.cfstats import (
NameReference, NameAssignment, Uninitialized)
def allow_null(node):
return False
def check_definitions(env, flow, warner):
flow.initialize()
flow.reaching_definitions()
# Track down state
assignments = set()
# Node to entry map
references = {}
assmt_nodes = set()
for block in flow.blocks:
i_state = block.i_input
for stat in block.stats:
if not isinstance(stat, (NameAssignment, NameReference)):
continue
i_assmts = flow.assmts[stat.entry]
state = flow.map_one(i_state, stat.entry)
if isinstance(stat, NameAssignment):
stat.lhs.cf_state.update(state)
assmt_nodes.add(stat.lhs)
i_state = i_state & ~i_assmts.mask
if stat.is_deletion:
i_state |= i_assmts.bit
else:
i_state |= stat.bit
assignments.add(stat)
# if stat.rhs is not fake_rhs_expr:
stat.entry.cf_assignments.append(stat)
elif isinstance(stat, NameReference):
references[stat.node] = stat.entry
stat.entry.cf_references.append(stat)
stat.node.cf_state.update(state)
if not allow_null(stat.node):
i_state &= ~i_assmts.bit
state.discard(Uninitialized)
for assmt in state:
assmt.refs.add(stat)
# assignment hints
for node in assmt_nodes:
maybe_null = Uninitialized in node.cf_state
node.cf_maybe_null = maybe_null
node.cf_is_null = maybe_null and len(node.cf_state) == 1
warner.check_uninitialized(references)
warner.warn_unused_result(assignments)
warner.warn_unused_entries(flow)
if warner.have_errors:
warner.messages.report(post_mortem=False)
for node in assmt_nodes:
node.cf_state = None #ControlFlowState(node.cf_state)
for node in references:
node.cf_state = None #ControlFlowState(node.cf_state)
@traits.traits
class CFWarner(object):
"Generate control flow related warnings."
have_errors = traits.Delegate('messages')
def __init__(self, message_collection, directives):
self.messages = message_collection
self.directives = directives
def check_uninitialized(self, references):
"Find uninitialized references and cf-hints"
warn_maybe_uninitialized = self.directives['warn.maybe_uninitialized']
for node, entry in references.iteritems():
if Uninitialized in node.cf_state:
node.cf_maybe_null = True
from_closure = False # entry.from_closure
if not from_closure and len(node.cf_state) == 1:
node.cf_is_null = True
if allow_null(node) or from_closure: # or entry.is_pyclass_attr:
pass # Can be uninitialized here
elif node.cf_is_null:
is_object = True #entry.type.is_pyobject
is_unspecified = False #entry.type.is_unspecified
error_on_uninitialized = False #entry.error_on_uninitialized
if entry.renameable and (is_object or is_unspecified or
error_on_uninitialized):
self.messages.error(
node,
"local variable '%s' referenced before assignment"
% entry.name)
else:
self.messages.warning(
node,
"local variable '%s' referenced before assignment"
% entry.name)
elif warn_maybe_uninitialized:
self.messages.warning(
node,
"local variable '%s' might be referenced before assignment"
% entry.name)
else:
node.cf_is_null = False
node.cf_maybe_null = False
def warn_unused_entries(self, flow):
"""
Generate warnings for unused variables or arguments. This is issues when
an argument or variable is unused entirely in the function.
"""
warn_unused = self.directives['warn.unused']
warn_unused_arg = self.directives['warn.unused_arg']
for entry in flow.entries:
if (not entry.cf_references and not entry.is_cellvar and
entry.renameable): # and not entry.is_pyclass_attr
if entry.is_arg:
if warn_unused_arg:
self.messages.warning(
entry, "Unused argument '%s'" % entry.name)
else:
if (warn_unused and entry.warn_unused and
not entry.name.startswith('_') and
flow.is_tracked(entry)):
if getattr(entry, 'lineno', 1) > 0:
self.messages.warning(
entry, "Unused variable '%s'" % entry.name)
entry.cf_used = False
def warn_unused_result(self, assignments):
"""
Warn about unused variable definitions. This is issued for individual
definitions, e.g.
i = 0 # this definition generates a warning
i = 1
print i
"""
warn_unused_result = self.directives['warn.unused_result']
for assmt in assignments:
if not assmt.refs:
if assmt.entry.cf_references and warn_unused_result:
if assmt.is_arg:
self.messages.warning(
assmt, "Unused argument value '%s'" %
assmt.entry.name)
else:
self.messages.warning(
assmt, "Unused result in '%s'" %
assmt.entry.name)
assmt.lhs.cf_used = False
def warn_unreachable(self, node):
"Generate a warning for unreachable code"
if hasattr(node, 'lineno'):
self.messages.warning(node, "Unreachable code")
########NEW FILE########
__FILENAME__ = ssa
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
from itertools import chain
from numba import nodes
from .debug import logger
#------------------------------------------------------------------------
# Kill unused Phis
#------------------------------------------------------------------------
def kill_unused_phis(cfg):
changed = True
while changed:
changed = _kill_unused_phis(cfg)
def kill_phi(block, phi):
logger.debug("Killing phi: %s", phi)
block.symtab.pop(phi.variable.renamed_name)
for incoming_var in phi.incoming:
# A single definition can reach a block multiple times,
# remove all references
refs = [ref for ref in incoming_var.cf_references
if ref.variable is not phi.variable]
incoming_var.cf_references = refs
def kill_unused_phis(cfg):
"""
Used before running type inference.
Kill phis which are not referenced. We need to do this bottom-up,
i.e. in reverse topological dominator-tree order, since in SSA
a definition always lexically precedes a reference.
This is important, since it kills any unnecessary promotions (e.g.
ones to object, which LLVM wouldn't be able to optimize out).
TODO: Kill phi cycles, or do reachability analysis before inserting phis.
"""
changed = False
for block in cfg.blocks[::-1]:
phi_nodes = []
for i, phi in enumerate(block.phi_nodes):
if phi.variable.cf_references:
# Used phi
# logger.info("Used phi %s, %s" % (phi, phi.variable.cf_references))
phi_nodes.append(phi)
else:
# Unused phi
changed = True
kill_phi(block, phi)
block.phi_nodes = phi_nodes
return changed
#------------------------------------------------------------------------
# Iterate over all phi nodes or variables
#------------------------------------------------------------------------
def iter_phis(flow):
"Iterate over all phi nodes"
return chain(*[block.phi_nodes for block in flow.blocks])
def iter_phi_vars(flow):
"Iterate over all phi nodes"
for phi_node in iter_phis(flow):
yield phi_node.variable
#------------------------------------------------------------------------
# Specialization code for SSA
#------------------------------------------------------------------------
def specialize_ssa(funcdef):
"""
Handle phi nodes:
1) Handle incoming variables which are not initialized. Set
incoming_variable.uninitialized_value to a constant 'bad'
value (e.g. 0xbad for integers, NaN for floats, NULL for
objects)
2) Handle incoming variables which need promotions. An incoming
variable needs a promotion if it has a different type than
the the phi. The promotion happens in each ancestor block that
defines the variable which reaches us.
Promotions are set separately in the symbol table, since the
ancestor may not be our immediate parent, we cannot introduce
a rename and look up the latest version since there may be
multiple different promotions. So during codegen, we first
check whether incoming_type == phi_type, and otherwise we
look up the promotion in the parent block or an ancestor.
"""
for phi_node in iter_phis(funcdef.flow):
specialize_phi(phi_node)
def specialize_phi(node):
for parent_block, incoming_var in node.find_incoming():
if incoming_var.type.is_uninitialized:
incoming_type = incoming_var.type.base_type or node.type
bad = nodes.badval(incoming_type)
incoming_var.type.base_type = incoming_type
incoming_var.uninitialized_value = bad
# print incoming_var
elif not incoming_var.type == node.type:
# Create promotions for variables with phi nodes in successor
# blocks.
incoming_symtab = incoming_var.block.symtab
if (incoming_var, node.type) not in node.block.promotions:
# Make sure we only coerce once for each destination type and
# each variable
incoming_var.block.promotions.add((incoming_var, node.type))
# Create promotion node
name_node = nodes.Name(id=incoming_var.renamed_name,
ctx=ast.Load())
name_node.variable = incoming_var
name_node.type = incoming_var.type
coercion = name_node.coerce(node.type)
promotion = nodes.PromotionNode(node=coercion)
# Add promotion node to block body
incoming_var.block.body.append(promotion)
promotion.variable.block = incoming_var.block
# Update symtab
incoming_symtab.promotions[incoming_var.name,
node.type] = promotion
else:
promotion = incoming_symtab.lookup_promotion(
incoming_var.name, node.type)
return node
#------------------------------------------------------------------------
# Handle phis during code generation
#------------------------------------------------------------------------
def process_incoming(phi_node):
"""
Add all incoming phis to the phi instruction.
Handle promotions by using the promoted value from the incoming block.
E.g.
bb0: if C:
bb1: x = 2
else:
bb2: x = 2.0
bb3: x = phi(x_bb1, x_bb2)
has a promotion for 'x' in basic block 1 (from int to float).
"""
var = phi_node.variable
phi = var.lvalue
for parent_block, incoming_var in phi_node.find_incoming():
if incoming_var.type.is_uninitialized:
pass
elif not incoming_var.type == phi_node.type:
promotion = parent_block.symtab.lookup_promotion(var.name,
phi_node.type)
incoming_var = promotion.variable
assert incoming_var.lvalue, incoming_var
assert parent_block.exit_block, parent_block
phi.add_incoming(incoming_var.lvalue,
parent_block.exit_block)
def handle_phis(flow):
"""
Update all our phi nodes after translation is done and all Variables
have their llvm values set.
"""
if flow is None:
return
for phi_node in iter_phis(flow):
process_incoming(phi_node)
########NEW FILE########
__FILENAME__ = test_ast_cfg
import numba
from numba import *
if not numba.PY3:
#@jit(void(int_)) # directives={'control_flow.dot_output': 'out.dot'})
#@jit(void, [int_], backend='bytecode')
@jit(void(int_))
def func(x):
i = 0
#y = 12
h = 30
print(i)
while i < 10:
if x > i:
print(x)
y = 14
else:
print(y)
i = i + 1
print(y)
print(i)
print(y)
#@jit(void())
def _for_loop_fn_0():
acc = 0.
for value in range(10):
acc += value
return acc
#@jit(void(int_, float_))
def func(a, b):
if a:
c = 2
else:
c = double(4)
if a:
c = 4
#while a < 4:
# for i in range(10):
# b = 9
print(b)
if __name__ == '__main__':
pass
########NEW FILE########
__FILENAME__ = test_cfg_type_infer
import numpy as np
from numba.testing.test_support import *
from numba import typesystem
from numba import pipeline, environment, functions, error
def construct_infer_pipeline():
env = environment.NumbaEnvironment.get_environment()
return env.get_pipeline('type_infer')
def functype(restype=None, argtypes=()):
return typesystem.function(return_type=restype, args=list(argtypes))
def lookup(block, var_name):
var = None
try:
var = block.symtab.lookup_most_recent(var_name)
except (AssertionError, KeyError):
if block.idom:
var = lookup(block.idom, var_name)
return var
def types(symtab, *varnames):
return tuple(symtab[varname].type for varname in varnames)
def infer(func, signature=functype(), warn=True, **kwargs):
func_ast = functions._get_ast(func)
env = environment.NumbaEnvironment.get_environment(kwargs.get('env', None))
infer_pipe = env.get_or_add_pipeline('infer', construct_infer_pipeline)
kwargs.update(warn=warn, pipeline_name='infer')
pipe, (signature, symtab, func_ast) = pipeline.run_pipeline2(
env, func, func_ast, signature, **kwargs)
last_block = func_ast.flow.blocks[-2]
symbols = {}
#for block in ast.flow.blocks: print block.symtab
for var_name, var in symtab.iteritems():
if not var.parent_var and not var.is_constant:
var = lookup(last_block, var_name)
if var:
symbols[var_name] = var
return signature, symbols
class Value(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "Value(%s)" % self.value
def __int__(self):
return self.value
values = [Value(i) for i in range(10)]
@autojit
def test_reassign(obj):
"""
>>> test_reassign(object())
'hello'
>>> sig, syms = infer(test_reassign.py_func, functype(None, [object_]))
>>> sig
string (*)(object)
>>> syms['obj'].type
string
"""
obj = 1
obj = 1.0
obj = 1 + 4j
obj = 2
obj = "hello"
return obj
@autojit
def test_if_reassign(obj1, obj2):
"""
>>> test_if_reassign(*values[:2])
(4.0, 5.0)
>>> sig, syms = infer(test_if_reassign.py_func,
... functype(None, [object_] * 2))
>>> types(syms, 'obj1', 'obj2')
(float64, object)
"""
x = 4.0
y = 5.0
z = 6.0
if int(obj1) < int(obj2):
obj1 = x
obj2 = y
else:
obj1 = z
return obj1, obj2
@autojit
def test_if_reassign2(value, obj1, obj2):
"""
>>> test_if_reassign2(0, *values[:2])
(4.0, 5.0, 'egel')
>>> test_if_reassign2(1, *values[:2])
('hello', 'world', 'hedgehog')
>>> test_if_reassign2(2, *values[:2])
([Value(0)], Value(12), 'igel')
>>> sig, syms = infer(test_if_reassign2.py_func,
... functype(None, [int_, object_, object_]))
>>> types(syms, 'obj1', 'obj2', 'obj3')
(object, object, string)
"""
x = 4.0
y = 5.0
z = "hedgehog"
if value < 1:
obj1 = x
obj2 = y
obj3 = "egel"
elif value < 2:
obj1 = "hello"
obj2 = "world"
obj3 = z
else:
obj1 = [obj1]
obj2 = Value(12)
obj3 = "igel"
return obj1, obj2, obj3
@autojit_py3doc
def test_for_reassign(obj1, obj2, obj3, obj4):
"""
>>> test_for_reassign(*values[:4])
(9, Value(1), 2, 5)
>>> sig, syms = infer(test_for_reassign.py_func,
... functype(None, [object_] * 4))
>>> types(syms, 'obj1', 'obj2', 'obj3')
(object, object, int)
"""
for i in range(10):
obj1 = i
for i in range(0):
obj2 = i
for i in range(10):
obj3 = i
else:
obj3 = 2 # This definition kills any previous definition
for i in range(5, 10):
obj4 = i
break
else:
obj4 = 0
return obj1, obj2, obj3, obj4
@autojit_py3doc
def test_while_reassign(obj1, obj2, obj3, obj4):
"""
>>> test_while_reassign(*values[:4])
(9, Value(1), 2, 5)
>>> sig, syms = infer(test_while_reassign.py_func,
... functype(None, [object_] * 4))
>>> types(syms, 'obj1', 'obj2', 'obj3', 'obj4')
(object, object, int, int)
"""
i = 0
while i < 10:
obj1 = i
i += 1
i = 0
while i < 0:
obj2 = i
i += 1
i = 0
while i < 10:
obj3 = i
i += 1
else:
obj3 = 2 # This definition kills any previous definition
i = 5
while i < 10:
obj4 = i
i += 1
break
else:
obj4 = 0
return obj1, obj2, obj3, obj4
@autojit(warn=False)
def test_conditional_assignment(value):
"""
>>> test_conditional_assignment(0)
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], dtype=float32)
>>> test_conditional_assignment(1)
Traceback (most recent call last):
...
UnboundLocalError: 210:11: obj1
"""
if value < 1:
obj1 = np.ones(10, dtype=np.float32)
return obj1
#
### Test for errors
#
# @autojit
# def test_error_array_variable1(value, obj1):
# """
# >>> test_error_array_variable1(0, object())
# Traceback (most recent call last):
# ...
# TypeError: Arrays must have consistent types in assignment for variable 'obj1': 'float32[:]' and 'object_'
# """
# if value < 1:
# obj1 = np.empty(10, dtype=np.float32)
#
# return obj1
def test():
from . import test_cfg_type_infer
testmod(test_cfg_type_infer)
if __name__ == '__main__':
testmod()
#else:
# test()
########NEW FILE########
__FILENAME__ = test_circular_type_inference
from numba.control_flow.tests.test_cfg_type_infer import *
from numba.testing.test_support import autojit_py3doc
@autojit_py3doc(warnstyle='simple', warn=False)
def test_circular_error():
"""
>>> test_circular_error()
Traceback (most recent call last):
...
NumbaError: Unable to infer type for assignment to ..., insert a cast or initialize the variable.
"""
for i in range(10):
if i > 5:
var1 = var2
else:
var2 = var1
@autojit(warnstyle='simple')
def test_simple_circular():
"""
>>> test_simple_circular()
Warning 29:16: local variable 'y' might be referenced before assignment
"""
x = 2.0
for i in range(10):
if i > 5:
x = y
else:
y = x
@autojit(warnstyle='simple')
def test_simple_circular2():
"""
>>> test_simple_circular2()
Warning 44:16: local variable 'x' might be referenced before assignment
"""
y = 2.0
for i in range(10):
if i > 5:
x = y
else:
y = x
@autojit
def test_simple_circular3():
"""
>>> test_simple_circular3()
(Value(5), Value(5))
>>> sig, syms = infer(test_simple_circular3.py_func,
... functype(None, []))
>>> types(syms, 'x', 'y')
(object, object)
"""
x = values[5]
y = 2.0
for i in range(10):
if i > 5:
x = y
else:
y = x
return x, y
@autojit
def test_simple_circular_promotion():
"""
>>> test_simple_circular_promotion()
((3-3j), (1-3j))
>>> sig, syms = infer(test_simple_circular_promotion.py_func,
... functype(None, []))
>>> types(syms, 'x', 'y')
(complex128, complex128)
"""
x = 1
y = 2
for i in range(10):
if i > 5:
x = y + 2.0
else:
y = x - 3.0j
return x, y
@autojit
def test_simple_circular_binop_promotion():
"""
>>> test_simple_circular_binop_promotion()
((3-3j), (3+0j))
>>> sig, syms = infer(test_simple_circular_binop_promotion.py_func,
... functype(None, []))
>>> types(syms, 'x', 'y')
(complex128, complex128)
"""
x = 1
y = 2
for i in range(10):
if i > 5:
x = y - 3.0j
else:
y = x + 2.0 # In pure python, y would always be a float
return x, y
#------------------------------------------------------------------------
# Test Unary/Binary Operations and Comparisons
#------------------------------------------------------------------------
@autojit_py3doc(warn=False)
def test_circular_binop():
"""
>>> test_circular_binop()
(1.0, 2.0, 1.0, -3)
>>> sig, syms = infer(test_circular_binop.py_func,
... functype(None, []), warn=False)
>>> types(syms, 'x', 'y', 'z', 'a')
(float64, float64, float64, int)
"""
x = 1
y = 2
for i in range(10):
if i > 5:
x = y - z
z = 1.0
else:
z = int(x + y)
y = x + z - y
a = -z
return x, y, z, a
@autojit(warn=False)
def test_circular_compare():
"""
>>> test_circular_compare()
(5.0, 1.0)
>>> sig, syms = infer(test_circular_compare.py_func,
... functype(None, []), warn=False)
>>> types(syms, 'x', 'y')
(float64, float64)
"""
x = 1
for i in range(10):
if i == 0:
y = float(x)
if x < 5:
x += y
return x, y
@autojit(warn=False)
def test_circular_compare2():
"""
>>> test_circular_compare2()
(2.0, 1.0)
>>> sig, syms = infer(test_circular_compare.py_func,
... functype(None, []), warn=False)
>>> types(syms, 'x', 'y')
(float64, float64)
"""
x = 1
for i in range(10):
if i == 0:
y = float(x)
if x < 5 and (x > 2 or i == 0):
x += y
return x, y
@autojit_py3doc(warn=False)
def test_circular_compare3():
"""
>>> test_circular_compare3()
1
2
3
4
(False, 10)
>>> sig, syms = infer(test_circular_compare3.py_func,
... functype(None, []), warn=False)
>>> types(syms, 'cond')
(bool,)
>>> t, = types(syms, 'x'); assert t.is_int
>>> assert t.itemsize == Py_ssize_t.itemsize
"""
x = 1
cond = True
for i in range(10):
if cond:
x = i
else:
x = i + 1
cond = x > 1 and x < 5
if cond:
x = cond or x < i
cond = x
x = i
print(i)
return cond, x
#------------------------------------------------------------------------
# Test Indexing
#------------------------------------------------------------------------
@autojit_py3doc(warn=False)
def test_delayed_array_indexing():
"""
>>> test_delayed_array_indexing()
(array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]), 1.0, 10)
>>> sig, syms = infer(test_delayed_array_indexing.py_func,
... functype(None, []), warn=False)
>>> types(syms, 'array', 'var', 'x')
(float64[:], float64, int)
"""
array = np.ones(10, dtype=np.float64)
x = 0
for i in range(11):
var = array[x]
array[x] = var * x
x = int(i * 1.0)
return array, var, x
@autojit(warn=False)
def test_delayed_array_slicing():
"""
>>> array, row = test_delayed_array_slicing()
>>> array2, row2 = test_delayed_array_slicing.py_func()
>>> assert np.all(array == array2)
>>> assert np.all(row == row2)
>>> sig, syms = infer(test_delayed_array_slicing.py_func,
... functype(None, []), warn=False)
>>> types(syms, 'array', 'row')
(float64[:, :], float64[:])
"""
array = np.ones((8, 10), dtype=np.float64)
for i in range(8):
row = array[i, :]
array[i, i] = row[i] * i
array = array[:, :]
return array, row
@autojit(warn=False)
def test_delayed_array_slicing2():
"""
>>> array, row = test_delayed_array_slicing2()
>>> array2, row2 = test_delayed_array_slicing2.py_func()
>>> assert np.all(array == array2)
>>> assert np.all(row == row2)
>>> sig, syms = infer(test_delayed_array_slicing.py_func,
... functype(None, []), warn=False)
>>> types(syms, 'array', 'row')
(float64[:, :], float64[:])
"""
for i in range(8):
if i == 0:
array = np.ones((8, 10), dtype=np.float64)
row = array[i, :]
array[i, i] = row[i] * i
array = array[:, :]
return array, row
@autojit_py3doc(warn=False)
def test_delayed_string_indexing_simple():
"""
>>> test_delayed_string_indexing_simple()
('eggs', 3)
>>> sig, syms = infer(test_delayed_string_indexing_simple.py_func,
... functype(None, []), warn=False)
>>> types(syms, 's', 'x')
(string, Py_ssize_t)
"""
s = "spam ham eggs"
for i in range(4):
if i < 3:
x = i + i
s = s[x:]
x = i
return s[1:], x
@autojit_py3doc(warn=False)
def test_delayed_string_indexing():
"""
>>> test_delayed_string_indexing()
('ham eggs', 3)
>>> sig, syms = infer(test_delayed_string_indexing.py_func,
... functype(None, []), warn=False)
>>> types(syms, 's', 'x')
(string, Py_ssize_t)
"""
s = "spam ham eggs"
for i in range(4):
if i < 3:
x = i
tmp1 = s[x:]
tmp2 = tmp1
s = tmp2
elif i < 5:
s = tmp1[x:]
else:
s = "hello"
x = i
return s, x
@autojit_py3doc(warn=False)
def test_delayed_string_indexing2():
"""
>>> test_delayed_string_indexing2()
('ham eggs', 3)
>>> sig, syms = infer(test_delayed_string_indexing2.py_func,
... functype(None, []), warn=False)
>>> types(syms, 's', 'x')
(string, Py_ssize_t)
"""
for i in range(4):
if i == 0:
s = "spam ham eggs"
if i < 3:
x = i
tmp1 = s[x:]
tmp2 = tmp1
s = tmp2
elif i < 5:
s = tmp1[x:]
else:
s = "hello"
x = i
return s, x
@autojit_py3doc(warn=False, warnstyle='simple')
def test_string_indexing_error():
"""
>>> try: test_string_indexing_error()
... except Exception as e: print(e)
Cannot promote types string and char
"""
for i in range(4):
if i == 0:
s = "spam ham eggs"
if i < 3:
s = s[i]
elif i < 5:
s = s[i:]
@autojit_py3doc(warn=False, warnstyle='simple')
def test_string_indexing_error2():
"""
>>> try: chr(test_string_indexing_error2())
... except Exception as e: print(e)
Cannot promote types string and char
"""
for i in range(4):
if i == 0:
s = "spam ham eggs"
s = s[i]
return s
@autojit(warn=False, warnstyle='simple')
def test_string_indexing_valid():
"""
>>> test_string_indexing_valid() == b'm'
True
"""
for i in range(4):
s = "spam ham eggs"
s = s[i]
return s
#------------------------------------------------------------------------
# Test circular Calling of functions
#------------------------------------------------------------------------
@autojit
def simple_func(x):
y = x * x + 4
return y
@autojit_py3doc(warn=False, warnstyle='simple')
def test_simple_call():
"""
>>> test_simple_call()
1091100052
>>> infer_simple(test_simple_call, 'x')
(int,)
"""
x = 0
for i in range(10):
x = simple_func(x)
return x
@autojit
def func_with_promotion(x):
y = x * x + 4.0
return y
@autojit(warn=False)
def test_simple_call_promotion():
"""
>>> test_simple_call_promotion()
26640768404.0
>>> infer_simple(test_simple_call_promotion, 'x')
(float64,)
"""
x = 0
for i in range(5):
x = func_with_promotion(x)
return x
#print test_simple_call_promotion.py_func()
@autojit
def func_with_promotion2(x):
y = x * x + 4.0
return np.sqrt(y) + 1j
@autojit(warn=False, warnstyle='simple')
def test_simple_call_promotion2():
"""
>>> result =test_simple_call_promotion2()
>>> "%.4f" % round(result.real, 4)
'3.9818'
>>> round(result.imag, 4)
3.9312
>>> infer_simple(test_simple_call_promotion2, 'x')
(complex128,)
"""
x = 0
for i in range(5):
x = func_with_promotion2(x)
return x
#print test_simple_call_promotion2.py_func()
#------------------------------------------------------------------------
# Delayed Attributes
#------------------------------------------------------------------------
@autojit(warn=False)
def test_delayed_attributes1(A):
"""
>>> A = np.empty(2, dtype=[('a', np.int32), ('b', np.float64)])
>>> list(test_delayed_attributes1(A))
[(1, 2.0), (2, 4.0)]
"""
idx = 0
for i in range(A.shape[0]):
A[idx].a = i + 1
A[idx].b = A[idx].a * 2
idx += 1
if idx > 5:
idx = 5
return A
#------------------------------------------------------------------------
# Test Utilities
#------------------------------------------------------------------------
def infer_simple(numba_func, *varnames):
sig, syms = infer(numba_func.py_func, functype(None, []), warn=False)
return types(syms, *varnames)
testmod()
########NEW FILE########
__FILENAME__ = test_w_uninitialized
from numba import *
jitv = jit(void(), warnstyle='simple')
jitvi = jit(void(int_), warnstyle='simple')
jitvii = jit(void(int_, int_), warnstyle='simple')
jitii = jit(int_(int_), warnstyle='simple')
jitiii = jit(int_(int_, int_), warnstyle='simple')
def simple():
"""
>>> jitv(simple)
Traceback (most recent call last):
...
NumbaError: 17:10: local variable 'a' referenced before assignment
"""
print(a)
a = 0
def simple2(arg):
"""
>>> result = jitii(simple2)
Warning 27:11: local variable 'a' might be referenced before assignment
"""
if arg > 0:
a = 1
return a
def simple_pos(arg):
"""
>>> result = jitii(simple_pos)
"""
if arg > 0:
a = 1
else:
a = 0
return a
def ifelif(c1, c2):
"""
>>> result = jitiii(ifelif)
Warning 51:11: local variable 'a' might be referenced before assignment
"""
if c1 == 1:
if c2:
a = 1
else:
a = 2
elif c1 == 2:
a = 3
return a
def nowimpossible(a):
"""
>>> result = jitvi(nowimpossible)
Warning 61:14: local variable 'b' might be referenced before assignment
"""
if a:
b = 1
if a:
print(b)
def fromclosure():
"""
>> result = jitv(fromclosure)
"""
def bar():
print(a)
a = 1
return bar
# Should work ok in both py2 and py3
def list_comp(a):
return [i for i in a]
def set_comp(a):
return set(i for i in a)
#def dict_comp(a):
# return {i: j for i, j in a}
# args and kwargs
def generic_args_call(*args, **kwargs):
return args, kwargs
def cascaded(x):
print((a, b))
a = b = x
def from_import():
print(bar)
from foo import bar
def regular_import():
print(foo)
import foo
def raise_stat():
try:
raise exc(msg)
except:
pass
exc = ValueError
msg = 'dummy'
def defnode_decorator():
@decorator
def foo():
pass
def decorator():
pass
def defnode_default():
def foo(arg=default()):
pass
def default():
pass
def class_bases():
class foo(bar):
pass
class bar(object):
pass
def class_decorators():
@decorator
class foo(object):
pass
def decorator(cls):
return cls
def uninitialized_augmented_assignment():
"""
>>> func = jitv(uninitialized_augmented_assignment)
Traceback (most recent call last):
...
NumbaError: 139:4: local variable 'x' referenced before assignment
"""
x += 1
def uninitialized_augmented_assignment_loop():
"""
>>> func = jitv(uninitialized_augmented_assignment_loop)
Warning 148:8: local variable 'x' might be referenced before assignment
"""
for i in range(10):
x += 1
x = 0
if __name__ == "__main__":
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_w_uninitialized_for
# cython: warn.maybe_uninitialized=True
# mode: error
from numba import *
def simple_for(n):
for i in range(n):
a = 1
return a
def simple_for_break(n):
for i in range(n):
a = 1
break
return a
def simple_for_pos(n):
for i in range(n):
a = 1
else:
a = 0
return a
def simple_target(n):
for i in range(n):
pass
return i
def simple_target_f(n):
for i in range(n):
i *= i
return i
#def simple_for_from(n):
# for i from 0 <= i <= n:
# x = i
# else:
# return x
def for_continue(l):
for i in range(l):
if i > 0:
continue
x = i
print(x)
def for_break(l):
for i in range(l):
if i > 0:
break
x = i
print(x)
#def for_finally_continue(f):
# for i in f:
# try:
# x = i()
# finally:
# print x
# continue
def for_finally_break(f):
for i in f:
try:
x = i()
finally:
print(x)
break
def for_finally_outer(p, f):
x = 1
try:
for i in f:
print(x)
x = i()
if x > 0:
continue
if x < 0:
break
finally:
del x
def jitfunc(func):
jit(int_(int_), warnstyle='simple')(func)
__doc__ = """
>>> jitfunc(simple_for)
Warning 8:11: local variable 'a' might be referenced before assignment
>>> jitfunc(simple_for_break)
Warning 14:11: local variable 'a' might be referenced before assignment
>>> jitfunc(simple_for_pos)
>>> jitfunc(simple_target)
Warning 26:11: local variable 'i' might be referenced before assignment
>>> jitfunc(simple_target_f)
Warning 31:11: local variable 'i' might be referenced before assignment
>>> jitfunc(for_continue)
Warning 44:10: local variable 'x' might be referenced before assignment
>>> jitfunc(for_break)
Warning 51:10: local variable 'x' might be referenced before assignment
Finally tests
>> jitfunc(for_finally_break)
Warning 58:19: local variable 'x' might be referenced before assignment
>> jitfunc(for_finally_outer)
Warning 66:19: local variable 'x' might be referenced before assignment
"""
if __name__ == "__main__":
# jitfunc(simple_for_break)
# jitfunc(simple_for_pos)
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_w_uninitialized_while
from numba import *
def simple_while(n):
while n > 0:
n -= 1
a = 0
return a
def simple_while_break(n):
while n > 0:
n -= 1
break
else:
a = 1
return a
def simple_while_pos(n):
while n > 0:
n -= 1
a = 0
else:
a = 1
return a
#def while_finally_continue(p, f):
# while p():
# try:
# x = f()
# finally:
# print x
# continue
#
#def while_finally_break(p, f):
# while p():
# try:
# x = f()
# finally:
# print x
# break
#
#def while_finally_outer(p, f):
# x = 1
# try:
# while p():
# print x
# x = f()
# if x > 0:
# continue
# if x < 0:
# break
# finally:
# del x
def jitfunc(func):
jit(int_(int_), warnstyle='simple')(func)
__doc__ = """
>>> jitfunc(simple_while)
Warning 9:11: local variable 'a' might be referenced before assignment
>>> jitfunc(simple_while_break)
Warning 17:11: local variable 'a' might be referenced before assignment
>>> jitfunc(simple_while_pos)
"""
if __name__ == "__main__":
# jitfunc(simple_while_break)
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_w_unreachable
from numba import *
jitv = jit(void(), warnstyle='simple') #, nopython=True)
def simple_return():
"""
>>> result = jitv(simple_return)
Warning ...: Unreachable code
"""
return
print('Where am I?')
def simple_loops():
"""
>>> result = jitv(simple_loops)
Warning ...: Unreachable code
Warning ...: Unreachable code
Warning ...: Unreachable code
Warning ...: Unreachable code
Warning ...: Unreachable code
Warning ...: Unreachable code
"""
for i in range(10):
continue
print('Never be here')
while True:
break
print('Never be here')
while True:
break
print('Never be here')
for i in range(10):
for j in range(10):
return
print("unreachable")
else:
print("unreachable")
print("unreachable")
return
print("unreachable")
print("unreachable")
return
print("unreachable")
def conditional(a, b):
if a:
return 1
elif b:
return 2
else:
return 37
print('oops')
if __name__ == "__main__":
# jitv(simple_loops)
# jitv(simple_return)
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = decorators
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.exttypes.entrypoints import (jit_extension_class,
autojit_extension_class,
autojit_class_wrapper)
__all__ = ['autojit', 'jit', 'export', 'exportmany']
import types
import logging
import inspect
from numba import *
from numba import typesystem, numbawrapper
from numba import functions
from numba.utils import process_signature
from numba.codegen import llvmwrapper
from numba import environment
import llvm.core as _lc
from numba.wrapping import compiler
logger = logging.getLogger(__name__)
environment.NumbaEnvironment.get_environment().link_cbuilder_utilities()
if PY3:
CLASS_TYPES = type
else:
CLASS_TYPES = (type, types.ClassType)
#------------------------------------------------------------------------
# PyCC decorators
#------------------------------------------------------------------------
def _internal_export(env, function_signature, backend='ast', **kws):
def _iexport(func):
if backend == 'bytecode':
raise NotImplementedError(
'Bytecode translation has been removed for exported functions.')
else:
name = function_signature.name
llvm_module = _lc.Module.new('export_%s' % name)
if not hasattr(func, 'live_objects'):
func.live_objects = []
func._is_numba_func = True
func_ast = functions._get_ast(func)
# FIXME: Hacked "mangled_name" into the translation
# environment. Should do something else. See comment in
# codegen.translate.LLVMCodeGenerator.__init__().
with environment.TranslationContext(
env, func, func_ast, function_signature,
name=name, llvm_module=llvm_module,
mangled_name=name,
link=False, wrap=False,
is_pycc=True) as func_env:
pipeline = env.get_pipeline()
func_ast.pipeline = pipeline
pipeline(func_ast, env)
exports_env = env.exports
exports_env.function_signature_map[name] = function_signature
exports_env.function_module_map[name] = llvm_module
if not exports_env.wrap_exports:
exports_env.function_wrapper_map[name] = None
else:
wrapper_tup = llvmwrapper.build_wrapper_module(env)
exports_env.function_wrapper_map[name] = wrapper_tup
return func
return _iexport
def export(signature, env_name=None, env=None, **kws):
"""
Construct a decorator that takes a function and exports one
A signature is a string with
name ret_type(arg_type, argtype, ...)
"""
if env is None:
env = environment.NumbaEnvironment.get_environment(env_name)
return _internal_export(env, process_signature(signature), **kws)
def exportmany(signatures, env_name=None, env=None, **kws):
"""
A Decorator that exports many signatures for a single function
"""
if env is None:
env = environment.NumbaEnvironment.get_environment(env_name)
def _export(func):
for signature in signatures:
tocall = _internal_export(env, process_signature(signature), **kws)
tocall(func)
return func
return _export
#------------------------------------------------------------------------
# Compilation Entry Points
#------------------------------------------------------------------------
# TODO: Redo this entire module
def compile_function(env, func, argtypes, restype=None, func_ast=None, **kwds):
"""
Compile a python function given the argument types. Compile only
if not compiled already, and only if it is registered to the function
cache.
Returns a triplet of (signature, llvm_func, python_callable)
`python_callable` is the wrapper function (NumbaFunction).
"""
function_cache = env.specializations
# For NumbaFunction, we get the original python function.
func = getattr(func, 'py_func', func)
# get the compile flags
flags = None # stub
# Search in cache
result = function_cache.get_function(func, argtypes, flags)
if result is not None:
sig, lfunc, pycall = result
return sig, lfunc, pycall
# Compile the function
from numba import pipeline
compile_only = getattr(func, '_numba_compile_only', False)
kwds['compile_only'] = kwds.get('compile_only', compile_only)
assert kwds.get('llvm_module') is None, kwds.get('llvm_module')
func_env = pipeline.compile2(env, func, restype, argtypes, func_ast=func_ast, **kwds)
function_cache.register_specialization(func_env)
return (func_env.func_signature,
func_env.lfunc,
func_env.numba_wrapper_func)
def _autojit(template_signature, target, nopython, env_name=None, env=None,
**flags):
if env is None:
env = environment.NumbaEnvironment.get_environment(env_name)
def _autojit_decorator(f):
"""
Defines a numba function, that, when called, specializes on the input
types. Uses the AST translator backend. For the bytecode translator,
use @autojit.
"""
if isinstance(f, CLASS_TYPES):
compiler_cls = compiler.ClassCompiler
wrapper = autojit_class_wrapper
else:
compiler_cls = compiler.FunctionCompiler
wrapper = autojit_wrappers[(target, 'ast')]
env.specializations.register(f)
cache = env.specializations.get_autojit_cache(f)
flags['target'] = target
compilerimpl = compiler_cls(env, f, nopython, flags, template_signature)
numba_func = wrapper(f, compilerimpl, cache)
return numba_func
return _autojit_decorator
def autojit(template_signature=None, backend='ast', target='cpu',
nopython=False, locals=None, **kwargs):
"""
Creates a function that dispatches to type-specialized LLVM
functions based on the input argument types. If no specialized
function exists for a set of input argument types, the dispatcher
creates and caches a new specialized function at call time.
"""
if template_signature and not isinstance(template_signature, typesystem.Type):
if callable(template_signature):
func = template_signature
return autojit(backend='ast', target=target,
nopython=nopython, locals=locals, **kwargs)(func)
else:
raise Exception("The autojit decorator should be called: "
"@autojit()")
if backend == 'bytecode':
return _not_implemented
else:
return _autojit(template_signature, target, nopython,
locals=locals, **kwargs)
def _jit(restype=None, argtypes=None, nopython=False,
_llvm_module=None, env_name=None, env=None, func_ast=None, **kwargs):
#print(ast.dump(func_ast))
if env is None:
env = environment.NumbaEnvironment.get_environment(env_name)
def _jit_decorator(func):
if isinstance(func, CLASS_TYPES):
cls = func
kwargs.update(env_name=env_name)
return jit_extension_class(cls, kwargs, env)
argtys = argtypes
if argtys is None and restype:
assert restype.is_function
return_type = restype.return_type
argtys = restype.args
elif argtys is None:
assert func.__code__.co_argcount == 0, func
return_type = None
argtys = []
else:
return_type = restype
assert argtys is not None
env.specializations.register(func)
assert kwargs.get('llvm_module') is None # TODO link to user module
assert kwargs.get('llvm_ee') is None, "Engine should never be provided"
sig, lfunc, wrapper = compile_function(env, func, argtys,
restype=return_type,
nopython=nopython, func_ast=func_ast, **kwargs)
return numbawrapper.create_numba_wrapper(func, wrapper, sig, lfunc)
return _jit_decorator
def _not_implemented(*args, **kws):
raise NotImplementedError('Bytecode backend is no longer supported.')
jit_targets = {
('cpu', 'bytecode') : _not_implemented,
('cpu', 'ast') : _jit,
}
autojit_wrappers = {
('cpu', 'bytecode') : _not_implemented,
('cpu', 'ast') : numbawrapper.NumbaSpecializingWrapper,
}
def jit(restype=None, argtypes=None, backend='ast', target='cpu', nopython=False,
**kws):
"""
Compile a function given the input and return types.
There are multiple ways to specify the type signature:
* Using the restype and argtypes arguments, passing Numba types.
* By constructing a Numba function type and passing that as the
first argument to the decorator. You can create a function type
by calling an exisiting Numba type, which is the return type,
and the arguments to that call define the argument types. For
example, ``f8(f8)`` would create a Numba function type that
takes a single double-precision floating point value argument,
and returns a double-precision floating point value.
* As above, but using a string instead of a constructed function
type. Example: ``jit("f8(f8)")``.
If backend='bytecode' the bytecode translator is used, if
backend='ast' the AST translator is used. By default, the AST
translator is used. *Note that the bytecode translator is
deprecated as of the 0.3 release.*
"""
kws.update(nopython=nopython, backend=backend)
if isinstance(restype, CLASS_TYPES):
cls = restype
env = kws.pop('env', None) or environment.NumbaEnvironment.get_environment(
kws.get('env_name', None))
return jit_extension_class(cls, kws, env)
# Called with f8(f8) syntax which returns a dictionary of argtypes and restype
if isinstance(restype, typesystem.function):
if argtypes is not None:
raise TypeError("Cannot use both calling syntax and argtypes keyword")
argtypes = restype.args
restype = restype.return_type
# Called with a string like 'f8(f8)'
elif isinstance(restype, str) and argtypes is None:
signature = process_signature(restype, kws.get('name', None))
name, restype, argtypes = (signature.name, signature.return_type,
signature.args)
if name is not None:
kws['func_name'] = name
if restype is not None:
kws['restype'] = restype
if argtypes is not None:
kws['argtypes'] = argtypes
return jit_targets[target, backend](**kws)
########NEW FILE########
__FILENAME__ = environment
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import weakref
import ast as ast_module
import types
import logging
import pprint
import collections
import llvm.core
from numba import pipeline, naming, error, reporting, PY3
from numba.utils import TypedProperty, WriteOnceTypedProperty, NumbaContext
from numba import functions, symtab
from numba.typesystem import TypeSystem, numba_typesystem, function
from numba.utility.cbuilder import library
from numba.nodes import metadata
from numba.control_flow import ControlFlow
from numba.codegen import translate
from numba.codegen import globalconstants
from numba.ndarray_helpers import NumpyArray
from numba.intrinsic import default_intrinsic_library
from numba.external import default_external_library
from numba.external.utility import default_utility_library
# ______________________________________________________________________
# Module data
logger = logging.getLogger(__name__)
if PY3:
NoneType = type(None)
name_types = str
else:
NoneType = types.NoneType
name_types = (str, unicode)
default_normalize_order = [
'ast3to2',
'resolve_templates',
'validate_signature',
'update_signature',
'create_lfunc1',
'NormalizeASTStage',
'TransformBuiltinLoops',
'ValidateASTStage',
]
default_cf_pipeline_order = ['ast3to2', 'ControlFlowAnalysis']
default_pipeline_order = default_normalize_order + [
'ExpandPrange',
'RewritePrangePrivates',
'FixASTLocations',
'ControlFlowAnalysis',
'dump_cfg',
#'ConstFolding',
# 'dump_ast',
'UpdateAttributeStatements',
'TypeInfer',
'CleanupPrange',
'update_signature',
'create_lfunc2',
'TypeSet',
'ClosureTypeInference',
'create_lfunc3',
'TransformFor',
'Specialize',
'RewriteArrayExpressions',
'SpecializeComparisons',
'SpecializeSSA',
'SpecializeClosures',
'Optimize',
'SpecializeLoops',
'LowerRaise',
'FixASTLocations',
'LateSpecializer',
'ExtensionTypeLowerer',
'SpecializeFunccalls',
'SpecializeExceptions',
'cleanup_symtab',
'validate_arrays',
'dump_ast',
'FixASTLocations',
'CodeGen',
'dump_annotations',
'dump_llvm',
'PostPass',
'LinkingStage',
'dump_optimized',
'WrapperStage',
'ErrorReporting',
]
default_dummy_type_infer_pipeline_order = [
'ast3to2',
'TypeInfer',
'TypeSet',
]
default_numba_lower_pipeline_order = [
'ast3to2',
'LateSpecializer',
'SpecializeFunccalls',
'SpecializeExceptions',
]
default_numba_wrapper_pipeline_order = default_numba_lower_pipeline_order
default_numba_late_translate_pipeline_order = \
default_numba_lower_pipeline_order + [
'CodeGen',
]
upto = lambda order, x: order[:order.index(x)+1]
upfr = lambda order, x: order[order.index(x)+1:]
default_type_infer_pipeline_order = upto(default_pipeline_order, 'TypeInfer')
default_compile_pipeline_order = upfr(default_pipeline_order, 'TypeInfer')
default_codegen_pipeline = upto(default_pipeline_order, 'CodeGen')
default_post_codegen_pipeline = upfr(default_pipeline_order, 'CodeGen')
# ______________________________________________________________________
# Convenience functions
def insert_stage(pipeline_order, stage, after=None, before=None):
if after is not None:
idx = pipeline_order.index(after) + 1
else:
idx = pipeline_order.index(before)
pipeline_order.insert(idx, stage)
# ______________________________________________________________________
# Class definitions
class _AbstractNumbaEnvironment(object):
'''Used to break circular type dependency between the translation
and function environments and the top-level NumbaEnvironment.'''
# ______________________________________________________________________
class FunctionErrorEnvironment(object):
"""
Environment for errors or warnings that occurr during translation of
a function.
"""
func = WriteOnceTypedProperty(
(NoneType, types.FunctionType),
'Function (or similar) being translated.')
ast = TypedProperty(
ast_module.AST,
'Original Abstract Syntax Tree for the function being translated.')
source = TypedProperty(
list, #(str, unicode),
"Function source code")
enable_post_mortem = TypedProperty(
bool,
"Enable post-mortem debugging for the Numba compiler",
False,
)
collection = TypedProperty(
reporting.MessageCollection,
"Collection of error and warning messages")
warning_styles = {
'simple' : reporting.MessageCollection,
'fancy': reporting.FancyMessageCollection,
}
def __init__(self, func, ast, warnstyle):
self.func = func
self.ast = ast # copy.deepcopy(ast)
# Retrieve the source code now
source_descr = reporting.SourceDescr(func, ast)
self.source = source_descr.get_lines()
collection_cls = self.warning_styles[warnstyle]
self.collection = collection_cls(self.ast, self.source)
def merge_in(self, parent_error_env):
"""
Merge error messages into another error environment.
Useful to propagate error messages for inner functions outwards.
"""
parent_error_env.collection.messages.extend(self.collection.messages)
del self.collection.messages[:]
# ______________________________________________________________________
class FunctionEnvironment(object):
'''State for a function under translation.'''
# ____________________________________________________________
# Properties
numba = WriteOnceTypedProperty(
_AbstractNumbaEnvironment,
'Grandparent environment (top-level Numba environment).')
func = WriteOnceTypedProperty(
object, 'Function (or similar) being translated.')
ast = TypedProperty(
ast_module.AST,
'Abstract syntax tree for the function being translated.')
func_signature = TypedProperty(
function,
'Type signature for the function being translated.')
is_partial = TypedProperty(
bool,
"Whether this environment is a partially constructed environment",
False)
func_name = TypedProperty(str, 'Target function name.')
module_name = TypedProperty(str, 'Name of the function module.')
mangled_name = TypedProperty(str, 'Mangled name of compiled function.')
qualified_name = TypedProperty(str, "Target qualified function name "
"('mymodule.myfunc')")
llvm_module = TypedProperty(
llvm.core.Module,
'LLVM module for this function. This module is first optimized and '
'then linked into a global module. The Python wrapper function goes '
'directly into the main fat module.')
error_env = TypedProperty(
FunctionErrorEnvironment,
"Error environment for this function.")
lfunc = TypedProperty(
(NoneType, llvm.core.Function),
"Compiled, native, Numba function",
None)
lfunc_pointer = TypedProperty(
(int, long) if not PY3 else int,
"Pointer to underlying compiled function. Can be used as a callback.",
)
link = TypedProperty(
bool,
'Flag indicating whether the LLVM function needs to be linked into '
'the global fast module from LLVMContextManager',
True)
wrap = TypedProperty(
bool,
'Flag indicating whether the function needs a wrapper function to be '
'callable from Python.',
True)
llvm_wrapper_func = TypedProperty(
(llvm.core.Function, NoneType),
'The LLVM wrapper function for the target function. This is a '
'wrapper function that accept python object arguments and returns an '
'object.')
numba_wrapper_func = TypedProperty(
object,
'The Numba wrapper function (see numbafunction.c) for the target '
'function. This is a wrapper function that accept python object '
'arguments and returns an object.')
symtab = TypedProperty(
(symtab.Symtab, dict),
'A map from local variable names to symbol table variables for all '
'local variables. '
'({ "local_var_name" : numba.symtab.Variable(local_var_type) })')
function_globals = TypedProperty(
(dict, NoneType),
"Globals dict of the function",)
locals = TypedProperty(
dict,
'A map from local variable names to types. Used to handle the locals '
'keyword argument to the autojit decorator. '
'({ "local_var_name" : local_var_type } for @autojit(locals=...))')
template_signature = TypedProperty(
(function, NoneType),
'Template signature for @autojit. E.g. T(T[:, :]). See '
'numba.typesystem.templatetypes.')
typesystem = TypedProperty(TypeSystem, "Typesystem for this compilation")
array = TypedProperty(object, "Array abstraction", NumpyArray)
ast_metadata = TypedProperty(
object,
'Metadata for AST nodes of the function being compiled.')
warn = True
flow = TypedProperty(
(NoneType, ControlFlow),
"Control flow graph. See numba.control_flow.",
default=None)
# FIXME: Get rid of this. See comment for translator property,
# below.
cfg_transform = TypedProperty(
object, # Should be ControlFlowAnalysis.
'The Control Flow Analysis transform object '
'(control_flow.ControlFlowAnalysis). Set during the cfg pass.')
cfdirectives = TypedProperty(
dict, "Directives for control flow.",
default={
'warn.maybe_uninitialized': warn,
'warn.unused_result': False,
'warn.unused': warn,
'warn.unused_arg': warn,
# Set the below flag to a path to generate CFG dot files
'control_flow.dot_output': os.path.expanduser("~/cfg.dot"),
'control_flow.dot_annotate_defs': False,
},
)
kill_attribute_assignments = TypedProperty( # Prange
(set, frozenset),
"Assignments to attributes that need to be removed from type "
"inference pre-analysis. We need to do this for prange since we "
"need to infer the types of variables to build a struct type for "
"those variables. So we need type inference to be properly ordered, "
"and not look at the attributes first.")
# FIXME: Get rid of this property; pipeline stages are users and
# transformers of the environment. Any state needed beyond a
# given stage should be put in the environment instead of keeping
# around the whole transformer.
# TODO: Create linking stage
translator = TypedProperty(
object, # FIXME: Should be LLVMCodeGenerator, but that causes
# module coupling.
'The code generator instance used to generate the target LLVM '
'function. Set during the code generation pass, and used for '
'after-the-fact wrapper generation.')
is_closure = TypedProperty(
bool,
'Flag indicating if the current function under translation is a '
'closure or not.',
False)
closures = TypedProperty(
dict, 'Map from ast nodes to closures.')
closure_scope = TypedProperty(
(dict, NoneType),
'Collective symtol table containing all entries from outer '
'functions.')
need_closure_wrapper = TypedProperty(
bool, "Whether this closure needs a Python wrapper function",
default=True,
)
refcount_args = TypedProperty(
bool, "Whether to use refcounting for the function arguments", True)
warn = TypedProperty(
bool,
'Flag that enables control flow warnings on a per-function level.',
True)
annotations = TypedProperty(
dict, "Annotation dict { lineno : Annotation }"
)
intermediates = TypedProperty(
list, "list of Intermediate objects for annotation",
)
warnstyle = TypedProperty(
str if PY3 else basestring,
'Warning style, currently available: simple, fancy',
default='fancy'
)
postpasses = TypedProperty(
dict,
"List of passes that should run on the final llvm ir before linking",
)
kwargs = TypedProperty(
dict,
'Additional keyword arguments. Deprecated, but kept for backward '
'compatibility.')
# ____________________________________________________________
# Methods
def __init__(self, *args, **kws):
self.init(*args, **kws)
def init(self, parent, func, ast, func_signature,
name=None, qualified_name=None,
mangled_name=None,
llvm_module=None, wrap=True, link=True,
symtab=None,
error_env=None, function_globals=None, locals=None,
template_signature=None, is_closure=False,
closures=None, closure_scope=None,
refcount_args=True,
ast_metadata=None, warn=True, warnstyle='fancy',
typesystem=None, array=None, postpasses=None, annotate=False,
**kws):
self.parent = parent
self.numba = parent.numba
self.func = func
self.ast = ast
self.func_signature = func_signature
if name is None:
if self.func:
name = self.func.__name__
else:
name = self.ast.name
if self.func and self.func.__module__:
qname = '.'.join([self.func.__module__, name])
else:
qname = name
if function_globals is not None:
self.function_globals = function_globals
else:
self.function_globals = self.func.__globals__
if self.func:
self.module_name = self.func.__module__ or '<unamed.module>'
else:
self.module_name = self.function_globals.get("__name__", "")
if mangled_name is None:
mangled_name = naming.specialized_mangle(qname,
self.func_signature.args)
self.func_name = name
self.mangled_name = mangled_name
self.qualified_name = qualified_name or name
self.llvm_module = (llvm_module if llvm_module
else self.numba.llvm_context.module)
self._annotate = annotate
self.wrap = wrap
self.link = link
self.llvm_wrapper_func = None
self.symtab = symtab if symtab is not None else {}
self.error_env = error_env or FunctionErrorEnvironment(self.func,
self.ast,
warnstyle)
self.locals = locals if locals is not None else {}
self.template_signature = template_signature
self.is_closure = is_closure
self.closures = closures if closures is not None else {}
self.closure_scope = closure_scope
self.kill_attribute_assignments = set()
self.refcount_args = refcount_args
self.typesystem = typesystem or numba_typesystem
if array:
self.array = array
# assert issubclass(array, NumpyArray)
import numba.postpasses
self.postpasses = postpasses or numba.postpasses.default_postpasses
if ast_metadata is not None:
self.ast_metadata = ast_metadata
else:
self.ast_metadata = metadata.create_metadata_env()
self.annotations = collections.defaultdict(list)
self.intermediates = []
self.warn = warn
self.warnstyle = warnstyle
self.kwargs = kws
def getstate(self):
state = dict(
parent=self.parent,
func=self.func,
ast=self.ast,
func_signature=self.func_signature,
name=self.func_name,
mangled_name=self.mangled_name,
qualified_name=self.qualified_name,
llvm_module=self.llvm_module,
wrap=self.wrap,
link=self.link,
symtab=self.symtab,
function_globals=self.function_globals,
locals=self.locals,
template_signature=self.template_signature,
is_closure=self.is_closure,
closures=self.closures,
kill_attribute_assignments=self.kill_attribute_assignments,
closure_scope=self.closure_scope,
warn=self.warn,
warnstyle=self.warnstyle,
postpasses=self.postpasses,
)
return state
def inherit(self, **kwds):
"""
Inherit from a parent FunctionEnvironment (e.g. to run pipeline stages
on a subset of the AST).
"""
# TODO: link these things together
state = self.getstate()
state.update(kwds)
return type(self)(**state)
@property
def annotate(self):
"Whether we need to annotate the source"
return self._annotate or self.numba.cmdopts.get('annotate')
@property
def func_doc(self):
if self.func is not None:
return self.func.__doc__
else:
return ast_module.get_docstring(self.ast)
# ______________________________________________________________________
class TranslationEnvironment(object):
'''State for a given translation.'''
# ____________________________________________________________
# Properties
numba = TypedProperty(_AbstractNumbaEnvironment, 'Parent environment')
crnt = TypedProperty(
(FunctionEnvironment, NoneType),
'The environment corresponding to the current function under '
'translation.')
stack = TypedProperty(
list,
'A stack consisting of FunctionEnvironment instances. Used to '
'manage lexical closures.')
functions = TypedProperty(
dict,
'A map from target function names that are under compilation to their '
'corresponding FunctionEnvironments')
func_envs = TypedProperty(
weakref.WeakKeyDictionary,
"Map from root AST nodes to FunctionEnvironment objects."
"This allows tracking environments of partially processed ASTs.")
nopython = TypedProperty(
bool,
'Flag used to indicate if calls to the Python C API are permitted or '
'not during code generation.',
False)
allow_rebind_args = TypedProperty(
bool,
'Flag indicating whether the type of arguments may be overridden for '
'@jit functions. This is always true (except for in tests perhaps!)',
True)
warn = TypedProperty(
bool,
'Flag that enables control flow warnings. FunctionEnvironment inherits '
'this unless overridden.',
True)
is_pycc = TypedProperty(
bool,
'Flag that tells us whether this function is being exported with pycc.',
False)
# ____________________________________________________________
# Methods
def __init__(self, parent, **kws):
self.numba = parent
self.crnt = None
self.stack = [(kws, None)]
self.functions = {}
self.func_envs = weakref.WeakKeyDictionary()
self.set_flags(**kws)
def set_flags(self, **kws):
self.nopython = kws.get('nopython', False)
self.allow_rebind_args = kws.get('allow_rebind_args', True)
self.warn = kws.get('warn', True)
self.is_pycc = kws.get('is_pycc', False)
def get_or_make_env(self, func, ast, func_signature, **kwds):
if ast not in self.func_envs:
kwds.setdefault('warn', self.warn)
func_env = self.numba.FunctionEnvironment(
self, func, ast, func_signature, **kwds)
self.func_envs[ast] = func_env
else:
func_env = self.func_envs[ast]
if func_env.is_partial:
state = func_env.partial_state
else:
state = func_env.getstate()
state.update(kwds, func=func, ast=ast,
func_signature=func_signature)
func_env.init(self, **state)
return func_env
def get_env(self, ast):
if ast in self.func_envs:
return self.func_envs[ast]
else:
return None
def make_partial_env(self, ast, **kwds):
"""
Create a partial environment for a function that only initializes
the given attributes.
Later attributes will override existing attributes.
"""
if ast in self.func_envs:
func_env = self.func_envs[ast]
else:
func_env = self.numba.FunctionEnvironment.__new__(
self.numba.FunctionEnvironment)
func_env.is_partial = True
func_env.partial_state = kwds
for key, value in kwds.iteritems():
setattr(func_env, key, value)
self.func_envs[ast] = func_env
func_env.ast = ast
return func_env
def push(self, func, ast, func_signature, **kws):
func_env = self.get_or_make_env(func, ast, func_signature, **kws)
return self.push_env(func_env, **kws)
def push_env(self, func_env, **kws):
self.set_flags(**kws)
self.crnt = func_env
self.stack.append((kws, self.crnt))
self.functions[self.crnt.func_name] = self.crnt
self.func_envs[func_env.ast] = func_env
if self.numba.debug:
logger.debug('stack=%s\ncrnt=%r (%r)', pprint.pformat(self.stack),
self.crnt, self.crnt.func if self.crnt else None)
return self.crnt
def pop(self):
ret_val = self.stack.pop()
kws, self.crnt = self.stack[-1]
self.set_flags(**kws)
if self.numba.debug:
logger.debug('stack=%s\ncrnt=%r (%r)', pprint.pformat(self.stack),
self.crnt, self.crnt.func if self.crnt else None)
return ret_val
# ______________________________________________________________________
class TranslationContext(object):
"""Context manager for handling a translation. Pushes a
FunctionEnvironment input onto the given translation environment's
stack, and pops it when leaving the translation context.
"""
def __init__(self, env, *args, **kws):
self.translation_environment = env.translation
self.args = args
self.kws = kws
def __enter__(self):
return self.translation_environment.push(*self.args, **self.kws)
def __exit__(self, exc_type, exc_value, exc_tb):
self.translation_environment.pop()
# ______________________________________________________________________
class PyccEnvironment(object):
'''pycc environment
Includes flags, and modules for exported functions.
'''
# ____________________________________________________________
# Properties
wrap_exports = TypedProperty(
bool,
'Boolean flag used to indicate that Python wrappers should be '
'generated for exported functions.')
function_signature_map = TypedProperty(
dict,
'Map from function names to type signatures for the translated '
'function (used for header generation).')
function_module_map = TypedProperty(
dict,
'Map from function names to LLVM modules that define the translated '
'function.')
function_wrapper_map = TypedProperty(
dict,
'Map from function names to tuples containing LLVM wrapper functions '
'and LLVM modules that define the wrapper function.')
# ____________________________________________________________
# Methods
def __init__(self, wrap_exports=False, *args, **kws):
self.reset(wrap_exports, *args, **kws)
def reset(self, wrap_exports=False, *args, **kws):
'''Clear the current set of exported functions.'''
self.wrap_exports = wrap_exports
self.function_signature_map = {}
self.function_module_map = {}
self.function_wrapper_map = {}
# ______________________________________________________________________
class NumbaEnvironment(_AbstractNumbaEnvironment):
'''Defines global state for a Numba translator. '''
# ____________________________________________________________
# Properties
name = TypedProperty(name_types, "Name of the environment.")
pipelines = TypedProperty(
dict, 'Map from entry point names to PipelineStages.')
pipeline_stages = TypedProperty(
types.ModuleType,
'Namespace for pipeline stages. Initially set to the numba.pipeline '
'module.',
pipeline)
default_pipeline = TypedProperty(
str,
'Default entry point name. Used to index into the "pipelines" map.',
default='numba')
context = TypedProperty(
NumbaContext,
'Defines a global typing context for handling promotions and type '
'representations.')
specializations = TypedProperty(
functions.FunctionCache, 'Cache for previously specialized functions.')
exports = TypedProperty(
PyccEnvironment, 'Translation environment for pycc usage')
debug = TypedProperty(
bool,
'Global flag indicating verbose debugging output should be enabled.',
False)
debug_coercions = TypedProperty(
bool,
'Flag for checking type coercions done during late specialization.',
False)
stage_checks = TypedProperty(
bool,
'Global flag for enabling detailed checks in translation pipeline '
'stages.',
False)
translation = TypedProperty(
TranslationEnvironment,
'Current translation environment, specific to the current pipeline '
'being run.')
llvm_context = TypedProperty(
translate.LLVMContextManager,
"Manages the global LLVM module and linkages of new translations."
)
constants_manager = TypedProperty(
globalconstants.LLVMConstantsManager,
"Holds constant values in an LLVM module.",
default=globalconstants.LLVMConstantsManager(),
)
cmdopts = TypedProperty(
dict, "Dict of command line options from bin/numba.py", {},
)
annotation_blocks = TypedProperty(
list, "List of annotation information for different functions."
)
# ____________________________________________________________
# Class members
environment_map = {}
TranslationContext = TranslationContext
TranslationEnvironment = TranslationEnvironment
FunctionEnvironment = FunctionEnvironment
# ____________________________________________________________
# Methods
def __init__(self, name, *args, **kws):
self.name = name
actual_default_pipeline = pipeline.ComposedPipelineStage(
default_pipeline_order)
self.pipelines = {
self.default_pipeline : actual_default_pipeline,
'normalize' : pipeline.ComposedPipelineStage(
default_normalize_order),
'cf' : pipeline.ComposedPipelineStage(
default_cf_pipeline_order),
'type_infer' : pipeline.ComposedPipelineStage(
default_type_infer_pipeline_order),
'dummy_type_infer' : pipeline.ComposedPipelineStage(
default_dummy_type_infer_pipeline_order),
'compile' : pipeline.ComposedPipelineStage(
default_compile_pipeline_order),
'wrap_func' : pipeline.ComposedPipelineStage(
default_numba_wrapper_pipeline_order),
'lower' : pipeline.ComposedPipelineStage(
default_numba_lower_pipeline_order),
'late_translate' : pipeline.ComposedPipelineStage(
default_numba_late_translate_pipeline_order),
'codegen' : pipeline.ComposedPipelineStage(
default_codegen_pipeline),
'post_codegen' : pipeline.ComposedPipelineStage(
default_post_codegen_pipeline),
}
self.context = NumbaContext()
self.specializations = functions.FunctionCache(env=self)
self.exports = PyccEnvironment()
self.translation = self.TranslationEnvironment(self)
self.debug = logger.getEffectiveLevel() < logging.DEBUG
# FIXME: NumbaContext has up to now been used as a stand in
# for NumbaEnvironment, so the following member definitions
# should be moved into the environment, and the code that uses
# them should be updated.
context = self.context
context.env = self
context.numba_pipeline = actual_default_pipeline
context.function_cache = self.specializations
context.intrinsic_library = default_intrinsic_library(context)
context.external_library = default_external_library(context)
context.utility_library = default_utility_library(context)
self.llvm_context = translate.LLVMContextManager()
self.annotation_blocks = []
def link_cbuilder_utilities(self):
self.context.cbuilder_library = library.CBuilderLibrary()
self.context.cbuilder_library.declare_registered(self)
# Link modules
self.context.cbuilder_library.link(self.llvm_context.module)
@classmethod
def get_environment(cls, environment_key = None, *args, **kws):
'''
Given an optional key, return the global Numba environment for
that key. If no key is given, return the default global
environment.
Note that internally, the default environment is mapped to None.
'''
if environment_key in cls.environment_map:
ret_val = cls.environment_map[environment_key]
else:
ret_val = cls(environment_key or 'numba', *args, **kws)
cls.environment_map[environment_key] = ret_val
return ret_val
@property
def crnt(self):
return self.translation.crnt
def get_pipeline(self, pipeline_name=None):
'''Convenience function for getting a pipeline object (which
should be a callable object that accepts (ast, env) arguments,
and returns an ast).'''
if pipeline_name is None:
pipeline_name = self.default_pipeline
return self.pipelines[pipeline_name]
def get_or_add_pipeline(self, pipeline_name=None, pipeline_ctor=None):
if pipeline_name is None:
pipeline_name = self.default_pipeline
if pipeline_name in self.pipelines:
pipeline_obj = self.pipelines[pipeline_name]
else:
pipeline_obj = self.pipelines[pipeline_name] = pipeline_ctor()
return pipeline_obj
def __repr__(self):
return "NumbaEnvironment(%s)" % self.name
# ______________________________________________________________________
# Main (self-test) routine
def main(*args):
import numba as nb
test_ast = ast_module.parse('def test_fn(a, b):\n return a + b\n\n',
'<string>', 'exec')
exec(compile(test_ast, '<string>', 'exec'))
test_fn_ast = test_ast.body[-1]
test_fn_sig = nb.double(nb.double, nb.double)
test_fn_sig.name = test_fn.__name__
env = NumbaEnvironment.get_environment()
with TranslationContext(env, test_fn, test_fn_ast, test_fn_sig):
env.get_pipeline()(test_fn_ast, env)
assert env.pipeline_stages == pipeline
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
########NEW FILE########
__FILENAME__ = error
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import traceback
__all__ = ["NumbaError", "InternalError", "InvalidTemplateError"]
def format_pos(node):
if node is not None and hasattr(node, 'lineno'):
return format_postup((node.lineno, node.col_offset))
else:
return ""
def format_postup(tup):
lineno, col_offset = tup
return "%s:%s: " % (lineno, col_offset)
class NumbaError(Exception):
"Some error happened during compilation"
def __init__(self, node, msg=None, *args, **kwds):
if msg is None:
node, msg = None, node
self.node = node
self.msg = msg
self.args = args
self.has_report = kwds.get("has_report", False)
def __str__(self):
try:
if self.has_report:
return self.msg.strip()
pos = format_pos(self.node)
msg = "%s%s %s" % (pos, self.msg, " ".join(map(str, self.args)))
return msg.rstrip()
except:
traceback.print_exc()
return "<internal error creating numba error message>"
class InternalError(Exception):
"Indicates a compiler bug"
class _UnknownAttribute(Exception):
pass
class InvalidTemplateError(Exception):
"Raised for invalid template type specifications"
class UnpromotableTypeError(TypeError):
"Raised when we can't promote two given types"
def __str__(self):
return "Cannot promote types %s and %s" % self.args[0]
########NEW FILE########
__FILENAME__ = external
# -*- coding: utf-8 -*-
"""
This module adds a way to declare external functions.
See numba.function_util on how to call them.
"""
from __future__ import print_function, division, absolute_import
import numba
class ExternalFunction(object):
_attributes = ('func_name', 'arg_types', 'return_type', 'is_vararg',
'check_pyerr_occurred', 'badval', 'goodval')
func_name = None
arg_types = None
return_type = None
is_vararg = False
badval = None
goodval = None
exc_type = None
exc_msg = None
exc_args = None
check_pyerr_occurred = False
def __init__(self, return_type=None, arg_types=None, **kwargs):
# Add positional arguments to keyword arguments
if return_type is not None:
kwargs['return_type'] = return_type
if arg_types is not None:
kwargs['arg_types'] = arg_types
# Process keyword arguments
if __debug__:
# Only accept keyword arguments defined _attributes
for k, v in kwargs.items():
if k not in self._attributes:
raise TypeError("Invalid keyword arg %s -> %s" % (k, v))
vars(self).update(kwargs)
@property
def signature(self):
return numba.function(return_type=self.return_type,
args=self.arg_types,
is_vararg=self.is_vararg)
@property
def name(self):
if self.func_name is None:
return type(self).__name__
else:
return self.func_name
def declare_lfunc(self, context, llvm_module):
lfunc_type = self.signature.to_llvm(context)
lfunc = llvm_module.get_or_insert_function(lfunc_type, name=self.name)
return lfunc
class ExternalLibrary(object):
def __init__(self, context):
self._context = context
# (name) -> (external function instance)
self._functions = {}
def add(self, extfn):
if __debug__:
# Sentry for duplicated external function name
if extfn.name in self._functions:
raise NameError("Duplicated external function: %s" % extfn.name)
self._functions[extfn.name] = extfn
def get(self, name):
return self._functions[name]
def __contains__(self, name):
return name in self._functions
def declare(self, module, name, arg_types=(), return_type=None):
extfn = self._functions[name] # raises KeyError if name is not found
if arg_types and return_type:
if (extfn.arg_types != arg_types
and extfn.return_type != return_type):
raise TypeError("Signature mismatch")
sig = extfn.signature
lfunc_type = sig.to_llvm(self._context)
return sig, module.get_or_insert_function(lfunc_type, extfn.name)
########NEW FILE########
__FILENAME__ = libc
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from .external import ExternalFunction
from numba import *
c_string_type = char.pointer()
class printf(ExternalFunction):
arg_types = [void.pointer()]
return_type = int32
is_vararg = True
class puts(ExternalFunction):
arg_types = [c_string_type]
return_type = int32
class labs(ExternalFunction):
arg_types = [long_]
return_type = long_
class llabs(ExternalFunction):
arg_types = [longlong]
return_type = longlong
class atoi(ExternalFunction):
arg_types = [c_string_type]
return_type = int_
class atol(ExternalFunction):
arg_types = [c_string_type]
return_type = long_
class atoll(ExternalFunction):
arg_types = [c_string_type]
return_type = longlong
class atof(ExternalFunction):
arg_types = [c_string_type]
return_type = double
class strlen(ExternalFunction):
arg_types = [c_string_type]
return_type = size_t
__all__ = [k for k, v in globals().items()
if (v != ExternalFunction
and isinstance(v, type)
and issubclass(v, ExternalFunction))]
########NEW FILE########
__FILENAME__ = pyapi
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import PY3
from .external import ExternalFunction
from numba.typesystem import *
c_string_type = char.pointer()
class ofunc(ExternalFunction):
arg_types = [object_]
return_type = object_
class Py_IncRef(ofunc):
# TODO: rewrite calls to Py_IncRef/Py_DecRef to direct integer
# TODO: increments/decrements
return_type = void
class Py_DecRef(Py_IncRef):
pass
class PyObject_Length(ofunc):
return_type = Py_ssize_t
class PyObject_Call(ExternalFunction):
arg_types = [object_, object_, object_]
return_type = object_
class PyObject_CallMethod(ExternalFunction):
arg_types = [object_, c_string_type, c_string_type]
return_type = object_
is_vararg = True
class PyObject_CallMethodObjArgs(ExternalFunction):
arg_types = [object_, c_string_type]
return_type = object_
is_vararg = True
class PyObject_Type(ExternalFunction):
'''
Added to aid debugging
'''
arg_types = [object_]
return_type = object_
class PyTuple_Pack(ExternalFunction):
arg_types = [Py_ssize_t]
return_type = object_
is_vararg = True
class Py_BuildValue(ExternalFunction):
arg_types = [c_string_type]
return_type = object_
is_vararg = True
class PyArg_ParseTuple(ExternalFunction):
arg_types = [object_, c_string_type]
return_type = int_
is_vararg = True
class PyObject_Print(ExternalFunction):
arg_types = [object_, void.pointer(), int_]
return_type = int_
class PyObject_Str(ExternalFunction):
arg_types = [object_]
return_type = object_
class PyObject_GetAttrString(ExternalFunction):
arg_types = [object_, c_string_type]
return_type = object_
class PyObject_GetItem(ExternalFunction):
arg_types = [object_, object_]
return_type = object_
class PyObject_SetItem(ExternalFunction):
arg_types = [object_, object_, object_]
return_type = int_
class PyObject_GetIter(ExternalFunction):
arg_types = [object_]
return_type = object_
class PyIter_Next(ExternalFunction):
arg_types = [object_]
return_type = object_
class PySlice_New(ExternalFunction):
arg_types = [object_, object_, object_]
return_type = object_
class PyErr_SetString(ExternalFunction):
arg_types = [object_, c_string_type]
return_type = void
class PyErr_Format(ExternalFunction):
arg_types = [object_, c_string_type]
return_type = void.pointer() # object_
is_vararg = True
class PyErr_Occurred(ExternalFunction):
arg_types = []
return_type = void.pointer() # object_
class PyErr_Clear(ExternalFunction):
arg_types = []
return_type = void
class PyErr_SetObject(ExternalFunction):
arg_types = [object_, object_]
return_type = void
#
### Object conversions to native types
#
def create_func(name, restype, argtype, d, check_pyerr_occurred=False):
class PyLong_FromLong(ExternalFunction):
arg_types = [argtype]
return_type = restype
PyLong_FromLong.__name__ = name
PyLong_FromLong.check_pyerr_occurred = check_pyerr_occurred
if restype.is_object:
type = argtype
else:
type = restype
d[type] = PyLong_FromLong
globals()[name] = PyLong_FromLong
# The pipeline is using this dictionary to lookup casting func
_as_long = {}
def as_long(name, type):
create_func(name, type, object_, _as_long, check_pyerr_occurred=True)
as_long('PyLong_AsLong', long_)
as_long('PyLong_AsUnsignedLong', ulong)
as_long('PyLong_AsLongLong', longlong)
as_long('PyLong_AsUnsignedLongLong', ulonglong)
#as_long('PyLong_AsSize_t', size_t) # new in py3k
as_long('PyLong_AsSsize_t', Py_ssize_t)
class PyFloat_FromDouble(ExternalFunction):
arg_types = [double]
return_type = object_
class PyComplex_RealAsDouble(ExternalFunction):
arg_types = [object_]
return_type = double
class PyComplex_ImagAsDouble(ExternalFunction):
arg_types = [object_]
return_type = double
class PyComplex_FromDoubles(ExternalFunction):
arg_types = [double, double]
return_type = object_
class PyComplex_FromCComplex(ExternalFunction):
arg_types = [complex128]
return_type = object_
if not PY3:
class PyInt_FromString(ExternalFunction):
arg_types = [c_string_type, c_string_type.pointer(), int_]
return_type = object_
class PyLong_FromString(ExternalFunction):
arg_types = [c_string_type, c_string_type.pointer(), long_]
return_type = object_
class PyFloat_FromString(ExternalFunction):
arg_types = [object_, c_string_type.pointer()]
return_type = object_
class PyBool_FromLong(ExternalFunction):
arg_types = [long_]
return_type = object_
#
### Conversion of native types to object
#
# The pipeline is using this dictionary to lookup casting func
_from_long = {}
def from_long(name, type):
create_func(name, object_, type, _from_long)
if not PY3:
from_long('PyInt_FromLong', long_)
from_long('PyInt_FromSize_t', size_t) # new in 2.6
from_long('PyInt_FromSsize_t', Py_ssize_t)
else:
from_long('PyLong_FromLong', long_)
from_long('PyLong_FromSize_t', size_t)
from_long('PyLong_FromSsize_t', Py_ssize_t)
from_long('PyLong_FromUnsignedLong', ulong)
from_long('PyLong_FromLongLong', longlong)
from_long('PyLong_FromUnsignedLongLong', ulonglong)
class PyFloat_AsDouble(ExternalFunction):
arg_types = [object_]
return_type = double
class PyComplex_AsCComplex(ExternalFunction):
arg_types = [object_]
return_type = complex128
def create_binary_pyfunc(name):
class PyNumber_BinOp(ExternalFunction):
arg_types = [object_, object_]
return_type = object_
PyNumber_BinOp.__name__ = name
globals()[name] = PyNumber_BinOp
create_binary_pyfunc('PyNumber_Add')
create_binary_pyfunc('PyNumber_Subtract')
create_binary_pyfunc('PyNumber_Multiply')
if PY3:
create_binary_pyfunc('PyNumber_TrueDivide')
else:
create_binary_pyfunc('PyNumber_Divide')
create_binary_pyfunc('PyNumber_Remainder')
class PyNumber_Power(ExternalFunction):
arg_types = [object_, object_, object_]
return_type = object_
create_binary_pyfunc('PyNumber_Lshift')
create_binary_pyfunc('PyNumber_Rshift')
create_binary_pyfunc('PyNumber_Or')
create_binary_pyfunc('PyNumber_Xor')
create_binary_pyfunc('PyNumber_And')
create_binary_pyfunc('PyNumber_FloorDivide')
class PyNumber_Positive(ofunc):
pass
class PyNumber_Negative(ofunc):
pass
class PyNumber_Invert(ofunc):
pass
class PyObject_IsTrue(ExternalFunction):
arg_types = [object_]
return_type = int_
class PyObject_RichCompareBool(ExternalFunction):
arg_types = [object_, object_, int_]
return_type = int_
badval = -1
# check_pyerr_occurred = True
class PyObject_RichCompare(ExternalFunction):
arg_types = [object_, object_, int_]
return_type = object_
__all__ = [k for k, v in globals().items()
if (v != ExternalFunction
and isinstance(v, type)
and issubclass(v, ExternalFunction))]
########NEW FILE########
__FILENAME__ = utility
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm.core
from numba import *
from numba.external import external
from numba.external.utilities import utilities
from numba.exttypes.virtual import PyCustomSlots_Table
class UtilityFunction(external.ExternalFunction):
"""
A utility function written in a native language.
funcaddr: the integer address of the C utility function
See ExternalFunction for keyword arguments!
"""
def __init__(self, funcaddr, return_type, arg_types, **kwargs):
super(UtilityFunction, self).__init__(return_type, arg_types, **kwargs)
self.funcaddr = funcaddr
def declare_lfunc(self, context, llvm_module):
lsig = self.signature.pointer().to_llvm(context)
inttype = Py_uintptr_t.to_llvm(context)
intval = llvm.core.Constant.int(inttype, self.funcaddr)
lfunc = intval.inttoptr(lsig)
return lfunc
@classmethod
def load(cls, func_name, signature, **kwds):
"""
Load a utility function by name from the
numba.external.utilities.utilities module.
"""
# Get the integer address of C utility function
func_addr = getattr(utilities, func_name)
return cls(func_addr, signature.return_type, signature.args,
func_name=func_name, **kwds)
load = UtilityFunction.load
load2 = lambda name, sig: load(name, sig, check_pyerr_occurred=True)
object_to_numeric = {
char : load2("__Numba_PyInt_AsSignedChar", char(object_)),
uchar : load2("__Numba_PyInt_AsUnsignedChar", uchar(object_)),
short : load2("__Numba_PyInt_AsSignedShort", short(object_)),
ushort : load2("__Numba_PyInt_AsUnsignedShort", ushort(object_)),
int_ : load2("__Numba_PyInt_AsSignedInt", int_(object_)),
uint : load2("__Numba_PyInt_AsUnsignedInt", uint(object_)),
long_ : load2("__Numba_PyInt_AsSignedLong", long_(object_)),
ulong : load2("__Numba_PyInt_AsUnsignedLong", ulong(object_)),
longlong : load2("__Numba_PyInt_AsSignedLongLong", longlong(object_)),
ulonglong : load2("__Numba_PyInt_AsUnsignedLongLong", ulonglong(object_)),
}
void_p = void.pointer()
void_pp = void_p.pointer()
utility_funcs = list(object_to_numeric.itervalues()) + [
UtilityFunction.load(
"lookup_method", void_p(void_pp, uint64, char.pointer())),
UtilityFunction.load(
"Raise", int_(*[void_p] * 4),
badval=-1,
),
UtilityFunction.load("__Numba_PyInt_FromLongLong", object_(longlong)),
UtilityFunction.load("__Numba_PyInt_FromUnsignedLongLong", object_(ulonglong)),
UtilityFunction.load("convert_datetime_str_to_timestamp", int64(string_)),
UtilityFunction.load("convert_datetime_str_to_units", int_(string_)),
UtilityFunction.load("convert_numpy_datetime_to_timestamp", int64(object_)),
UtilityFunction.load("convert_numpy_datetime_to_units", int_(object_)),
UtilityFunction.load("convert_numpy_timedelta_to_diff", int64(object_)),
UtilityFunction.load("convert_numpy_timedelta_to_units", int_(object_)),
UtilityFunction.load("create_numpy_datetime",
object_(int64, int_, object_)),
UtilityFunction.load("create_numpy_timedelta",
object_(int64, int_, object_)),
UtilityFunction.load("get_target_unit_for_datetime_datetime",
int_(int_, int_)),
UtilityFunction.load("get_target_unit_for_timedelta_timedelta",
int_(int_, int_)),
UtilityFunction.load("get_target_unit_for_datetime_timedelta",
int_(int_, int_)),
UtilityFunction.load("extract_datetime_year", int64(int64, int32)),
UtilityFunction.load("extract_datetime_month", int_(int64, int32)),
UtilityFunction.load("extract_datetime_day", int_(int64, int32)),
UtilityFunction.load("extract_datetime_hour", int_(int64, int32)),
UtilityFunction.load("extract_datetime_min", int_(int64, int32)),
UtilityFunction.load("extract_datetime_sec", int_(int64, int32)),
UtilityFunction.load("sub_datetime_datetime",
int64(int64, int_, int64, int_, int_)),
UtilityFunction.load("add_datetime_timedelta",
int64(int64, int_, int64, int_, int_)),
UtilityFunction.load("sub_datetime_timedelta",
int64(int64, int_, int64, int_, int_)),
UtilityFunction.load("extract_timedelta_sec", int_(int64, int32)),
UtilityFunction.load("convert_timedelta_units_str", int_(string_)),
UtilityFunction.load("get_units_num", int32(string_)),
]
def default_utility_library(context):
"""
Create a library of utility functions.
"""
extlib = external.ExternalLibrary(context)
for utility_func in utility_funcs:
extlib.add(utility_func)
return extlib
########NEW FILE########
__FILENAME__ = attributetable
# -*- coding: utf-8 -*-
"""
Extension attribute table type. Supports ordered (struct) fields, or
unordered (hash-based) fields.
"""
from __future__ import print_function, division, absolute_import
import numba
from numba.typesystem import is_obj
from numba.exttypes import ordering
#------------------------------------------------------------------------
# Extension Attributes Type
#------------------------------------------------------------------------
class AttributeTable(object):
"""
Type for extension type attributes.
"""
def __init__(self, py_class, parents):
self.py_class = py_class
# List of parent extension attribute table types
self.parents = parents
# attribute_name -> attribute_type
self.attributedict = {}
# Ordered list of attribute names
self.attributes = None
# Set of inherited attribute names
self.inherited = set()
def to_struct(self):
return numba.struct([(attr, self.attributedict[attr])
for attr in self.attributes])
def create_attribute_ordering(self, orderer=ordering.unordered):
"""
Create a consistent attribute ordering with the base types.
ordering โ { unordered, extending, ... }
"""
self.attributes = orderer(ordering.AttributeTable(self))
def need_tp_dealloc(self):
"""
Returns whether this extension type needs a tp_dealloc, tp_traverse
and tp_clear filled out.
"""
if self.parent_type is not None and self.parent_type.need_tp_dealloc:
result = False
else:
field_types = self.attribute_struct.fielddict.itervalues()
result = any(map(is_obj, field_types))
self._need_tp_dealloc = result
return result
def strtable(self):
if self.attributes is None:
return str(self.attributedict)
return "{%s}" % ", ".join("%r: %r" % (name, self.attributedict[name])
for name in self.attributes)
def __repr__(self):
return "AttributeTable(%s)" % self.strtable()
@classmethod
def empty(cls, py_class):
table = AttributeTable(py_class, [])
table.create_attribute_ordering()
return table
@classmethod
def from_list(cls, py_class, attributes):
"Create a final attribute table from a list of attribute (name, type)."
table = AttributeTable(py_class, [])
table.attributedict.update(attributes)
table.attributes = [name for name, type in attributes]
return table
########NEW FILE########
__FILENAME__ = autojitclass
"""
Compiling @autojit extension classes works as follows:
* Create an extension Numba type holding a symtab
* Capture attribute types in the symtab in the same was as @jit
* Build attribute hash-based vtable, hashing on (attr_name, attr_type).
(attr_name, attr_type) is the only allowed key for that attribute
(i.e. this is fixed at compile time (for now). This means consumers
will always know the attribute type (and don't need to specialize
on different attribute types).
However, using a hash-based attribute table allows easy implementation
of multiple inheritance (virtual inheritance), without complicated
C++ dynamic offsets to base objects (see also virtual.py).
For all methods M with static input types:
* Compile M
* Register M in a list of compiled methods
* Build initial hash-based virtual method table from compiled methods
* Create pre-hash values for the signatures
* We use these values to look up methods at runtime
* Parametrize the virtual method table to build a final hash function:
slot_index = (((prehash >> table.r) & self.table.m_f) ^
self.displacements[prehash & self.table.m_g])
See also virtual.py and the following SEPs:
https://github.com/numfocus/sep/blob/master/sep200.rst
https://github.com/numfocus/sep/blob/master/sep201.rst
And the following paper to understand the perfect hashing scheme:
Hash and Displace: Efficient Evaluation of Minimal Perfect
Hash Functions (1999) by Rasmus Pagn:
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.32.6530
* Create descriptors that wrap the native attributes
* Create an extension type:
{
hash-based virtual method table (PyCustomSlots_Table **)
PyGC_HEAD
PyObject_HEAD
...
native attributes
}
We precede the object with the table to make this work in a more
generic scheme, e.g. where a caller is dealing with an unknown
object, and we quickly want to see whether it support such a
perfect-hashing virtual method table:
if (o->ob_type->tp_flags & NATIVELY_CALLABLE_TABLE) {
PyCustomSlots_Table ***slot_p = ((char *) o) - sizeof(PyGC_HEAD)
PyCustomSlots_Table *vtab = **slot_p
look up function
} else {
PyObject_Call(...)
}
We need to store a PyCustomSlots_Table ** in the object to allow
the producer of the table to replace the table with a new table
for all live objects (e.g. by adding a specialization for
an autojit method).
"""
from numba import error
from numba import pipeline
from numba import numbawrapper
from numba import typesystem
from numba.exttypes import types as etypes
from numba.exttypes import utils
from numba.exttypes import virtual
from numba.exttypes import signatures
from numba.exttypes import validators
from numba.exttypes import compileclass
from numba.exttypes import ordering
from numba.exttypes import autojitmeta
#------------------------------------------------------------------------
# Build Attributes Struct
#------------------------------------------------------------------------
class AutojitAttributeBuilder(compileclass.AttributeBuilder):
def finalize(self, ext_type):
# TODO: hash-based attributes
ext_type.attribute_table.create_attribute_ordering(ordering.extending)
def create_descr(self, attr_name):
"""
Create a descriptor that accesses the attribute on the ctypes struct.
TODO: Use a perfect-hashed attribute table.
"""
def _get(self):
return getattr(self._numba_attrs, attr_name)
def _set(self, value):
return setattr(self._numba_attrs, attr_name, value)
return property(_get, _set)
#------------------------------------------------------------------------
# Filter Methods
#------------------------------------------------------------------------
class AutojitMethodFilter(compileclass.Filterer):
def filter(self, methods, ext_type):
typed_methods = []
for method in methods:
if method.signature is None:
# autojit method
ext_type.vtab_type.untyped_methods[method.name] = method
else:
# method with signature
typed_methods.append(method)
return typed_methods
#------------------------------------------------------------------------
# Build Method Wrappers
#------------------------------------------------------------------------
class AutojitMethodWrapperBuilder(compileclass.MethodWrapperBuilder):
def build_method_wrappers(self, env, extclass, ext_type):
"""
Update the extension class with the function wrappers.
"""
self.process_typed_methods(env, extclass, ext_type)
self.process_untyped_methods(env, extclass, ext_type)
def process_untyped_methods(self, env, extclass, ext_type):
"""
Process autojit methods (undecorated methods). Use the fast
NumbaSpecializingWrapper cache when for when we're being called
from python. When we need to add a new specialization,
`autojit_method_compiler` is invoked to compile the method.
extclass: the extension type
ext_type.py_class: the unspecialized class that was decorated
"""
from numba.wrapping import compiler
for method_name, method in ext_type.vtab_type.untyped_methods.iteritems():
env.specializations.register(method.py_func)
cache = env.specializations.get_autojit_cache(method.py_func)
compiler_impl = compiler.MethodCompiler(env, extclass, method)
wrapper = numbawrapper.NumbaSpecializingWrapper(
method.py_func, compiler_impl, cache)
setattr(extclass, method_name, wrapper)
# ______________________________________________________________________
# Compile method when called from Python
def autojit_method_compiler(env, extclass, method, signature):
"""
Called to compile a new specialized method. The result should be
added to the perfect hash-based vtable.
"""
# compiled_method = numba.jit(argtypes=argtypes)(method.py_func)
func_env = pipeline.compile2(env, method.py_func,
restype=signature.return_type,
argtypes=signature.args)
# Create Method for the specialization
new_method = signatures.Method(
method.py_func,
method.name,
func_env.func_signature,
is_class=method.is_class,
is_static=method.is_static)
new_method.update_from_env(func_env)
# Update vtable type
vtable_wrapper = extclass.__numba_vtab
vtable_type = extclass.exttype.vtab_type
vtable_type.specialized_methods[new_method.name,
signature.args] = new_method
# Replace vtable (which will update the vtable all (live) objects use)
new_vtable = virtual.build_hashing_vtab(vtable_type)
vtable_wrapper.replace_vtable(new_vtable)
return func_env.numba_wrapper_func
#------------------------------------------------------------------------
# Autojit Extension Class Compiler
#------------------------------------------------------------------------
class AutojitExtensionCompiler(compileclass.ExtensionCompiler):
"""
Compile @autojit extension classes.
"""
method_validators = validators.autojit_validators
exttype_validators = validators.autojit_type_validators
def get_bases(self):
"""
Get base classes for the resulting extension type.
We can try several inheritance schemes. We can choose between:
1) One unspecialized - general - class
* This must bind specialized methods on each object instance
to allow method calls from Python, making object allocation
more expensive
* We must take the max() of tp_basicsize to allow enough
space for all specializations. However, the specialization
universe is not known up front, so we must allocate
attributes separately on the heap (or perhaps we must
override tp_alloc to use a dynamic size depending on the
specialization - but this may conflict with non-numba base
classes).
2) One specialized class per instance
* This allows us to experiment with static layouts for
attributes or methods more easily
* It seems more intuitive to back specialized objects with
a specialized type
We will go with 2). We could go for a specialization tree as follows:
A
/ | \
A0 | A1
| | |
| B |
| / \ |
B0 B1
Which gets us:
issubclass(A_specialized, A)
isinstance(A_specialized(), A)
as well as
issubclass(B_specialized, A_specialized)
However, to support this scheme, the unspecialized class A must:
1) Be subclassable
2) Return specialized object instances when instantiated
3) Support unbound method calls
1) requires that A be a class, and then 2) implies that A's metaclass
overrides __call__ or that A implements __new__.
However, since A_specialized subclasses A, A_specialized.__new__ would
need to skip A.__new__, which requires numba to insert a __new__
or modify a user's __new__ method in A_specialized.
The metaclass option seems more feasible:
A_meta.__call__ -> specialized object instance
Users can then override a metaclass in a Python (or numba?) subclass
as follows:
class MyMeta(type(MyNumbaClass)):
...
The metaclass can also support indexing:
A_specialized = A[{'attrib_a': double}]
"""
# TODO: subclassing
return (self.py_class,)
def get_metacls(self):
return autojitmeta.create_specialized_metaclass(self.py_class)
#------------------------------------------------------------------------
# Unbound Methods from Python
#------------------------------------------------------------------------
class UnboundDelegatingMethod(object):
"""
Function in the unspecialized class that is used for delegation to
a method in a specialized class, i.e.
A.method(A(10.0)) -> A(10.0).method()
This method can never be bound, since __new__ always returns specialized
instances (so the unspecialized class cannot be instantiated!).
"""
def __init__(self, py_class, name):
self.py_class = py_class
self.name = name
def __call__(self, obj, *args, **kwargs):
numbawrapper.unbound_method_type_check(self.py_class, obj)
return getattr(obj, self.name)(*args, **kwargs)
def make_delegations(py_class):
"""
Make delegation unbound methods that delegate from the unspecialized
class to the specialized class. E.g.
m = A.method
m(A(10.0)) # Delegate to A[double].method
"""
class_dict = vars(py_class)
for name, func in class_dict.iteritems():
if isinstance(func, (typesystem.Function, staticmethod, classmethod)):
method = signatures.process_signature(func, name)
if method.is_class or method.is_static:
# Class or static method: use the pure Python function wrapped
# in classmethod()/staticmethod()
method.wrapper_func = method.py_func
new_func = method.get_wrapper()
else:
# Regular unbound method. Create dispatcher to bound method
# when called
new_func = UnboundDelegatingMethod(py_class, name)
setattr(py_class, name, new_func)
#------------------------------------------------------------------------
# Make Specializing Class -- Entry Point for decorator application
#------------------------------------------------------------------------
def autojit_class_wrapper(py_class, compiler_impl, cache):
"""
Invoked when a class is decorated with @autojit.
:param py_class: decorated python class
:param compiler_impl: compiler.ClassCompiler
:param cache: FunctionCache
:return: py_class that returns specialized object instances
"""
from numba import numbawrapper
if utils.is_numba_class(py_class):
raise error.NumbaError("Subclassing not yet supported "
"for autojit classes")
# runtime_args -> specialized extension type instance
class_specializer = numbawrapper.NumbaSpecializingWrapper(
py_class, compiler_impl, cache)
py_class = autojitmeta.create_unspecialized_cls(py_class, class_specializer)
py_class.__is_numba_autojit = True
# Update the class from which we derive specializations (which will be
# passed to create_extension() below)
compiler_impl.py_func = py_class
# Back up class dict, since we're going to modify it
py_class.__numba_class_dict = dict(vars(py_class))
# Make delegation methods for unbound methods
make_delegations(py_class)
# Setup up partial compilation environment
# partial_env = create_partial_extension_environment(
# compiler_impl.env, py_class, compiler_impl.flags)
# compiler_impl.partial_ext_env = partial_env
return py_class
#------------------------------------------------------------------------
# Build Extension Type -- Compiler Entry Point
#------------------------------------------------------------------------
# def create_partial_extension_environment(env, py_class, flags, argtypes):
def create_extension(env, py_class, flags, argtypes):
"""
Create a partial environment to compile specialized versions of the
extension class in.
Inovked when calling the wrapped class to compile a specialized
new extension type.
"""
# TODO: Remove argtypes! Partial environment!
flags.pop('llvm_module', None)
ext_type = etypes.autojit_exttype(py_class)
class_dict = dict(utils.get_class_dict(py_class))
extension_compiler = AutojitExtensionCompiler(
env, py_class, class_dict, ext_type, flags,
signatures.AutojitMethodMaker(argtypes),
compileclass.AttributesInheriter(),
AutojitMethodFilter(),
AutojitAttributeBuilder(),
virtual.HashBasedVTabBuilder(),
AutojitMethodWrapperBuilder())
extension_compiler.init()
# return extension_compiler
# def compile_class(extension_compiler, argtypes):
extension_compiler.infer()
extension_compiler.finalize_tables()
extension_compiler.validate()
extension_type = extension_compiler.compile()
return extension_type
########NEW FILE########
__FILENAME__ = autojitmeta
# -*- coding: utf-8 -*-
"""
Autojit meta class.
"""
from __future__ import print_function, division, absolute_import
class _AutojitMeta(type):
"""
Metaclass base for autojit classes.
"""
def create_unspecialized_cls(py_class, class_specializer):
"""
Create an unspecialized class.
class_specializer:
NumbaSpecializingWrapper (args -> specialized object instance)
"""
class AutojitMeta(type(py_class)):
"""
Metaclass base for autojit classes.
AutojitMeta -> UnspecializedClass -> SpecializedInstance
"""
def __call__(cls, *args, **kwargs):
return class_specializer(*args, **kwargs)
def __getitem__(cls, key):
assert isinstance(key, dict)
for specialized_cls in cls.specializations:
attrdict = specialized_cls.exttype.attribute_table.attributedict
if attrdict == key:
return specialized_cls
raise KeyError(key)
@property
def specializations(cls):
return class_specializer.funccache.specializations.values()
# def __repr__(cls):
# return "<Unspecialized Class %s at 0x%x>" % (cls.__name__, id(cls))
return AutojitMeta(py_class.__name__,
py_class.__bases__,
dict(vars(py_class)))
def create_specialized_metaclass(py_class):
"""
When the autojit cache compiles a new class specialization, it invokes
it with the constructor arguments. Since A_specialized inherits from A,
AutojitMeta.__call__ again tries to specialize the class. We need to
override this behaviour and instead instantiate A_specialized through
type.__call__ (invoking A_specialized.{__new__,__init__}).
"""
class SpecializedMeta(type(py_class)):
def __call__(cls, *args, **kwargs):
return type.__call__(cls, *args, **kwargs)
# def __repr__(cls):
# return "<Specialized Class %s at 0x%x>" % (cls.__name__, id(cls))
return SpecializedMeta
########NEW FILE########
__FILENAME__ = compileclass
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import pipeline
from numba import symtab
from numba import typesystem
from numba.exttypes import signatures
from numba.exttypes import utils
from numba.exttypes import extension_types
from numba.exttypes import methodtable
from numba.exttypes import attributetable
from numba.exttypes.types import methods
class ExtensionCompiler(object):
# [validators.MethodValidator]
method_validators = None
# [validators.ExtTypeValidator]
exttype_validators = None
def __init__(self, env, py_class, class_dict, ext_type, flags,
method_maker,
inheriter,
method_filter,
attrbuilder,
vtabbuilder,
methodwrapper):
self.env = env
self.py_class = py_class
self.class_dict = class_dict
self.ext_type = ext_type
self.flags = flags
self.inheriter = inheriter
self.method_filter = method_filter
self.attrbuilder = attrbuilder
self.vtabbuilder = vtabbuilder
self.method_maker = method_maker
self.methodwrapper = methodwrapper
# Partial function environments held after type inference has run
self.func_envs = {}
#------------------------------------------------------------------------
# Initialized and Inheritance
#------------------------------------------------------------------------
def init(self):
"""
Initialize:
1) Inherit attributes and methods
* Also build the vtab and attribute table types
2) Process class attribute types:
class Foo(object):
myattr = double
3) Process method signatures @void(double) etc
"""
self.class_dict['__numba_py_class'] = self.py_class
self.inheriter.inherit(self.ext_type)
process_class_attribute_types(self.ext_type, self.class_dict)
# Process method signatures and set self.methods to [Method]
self.methods = self.process_method_signatures()
# Build ext_type.symtab
build_extension_symtab(self.ext_type)
def process_method_signatures(self):
"""
Process all method signatures:
* Verify signatures
* Populate ext_type with method signatures (ExtMethodType)
"""
processor = signatures.MethodSignatureProcessor(self.class_dict,
self.ext_type,
self.method_maker,
self.method_validators)
methods = processor.get_method_signatures()
methods = self.method_filter.filter(methods, self.ext_type)
# Update ext_type and class dict with known Method objects
for method in methods:
self.ext_type.add_method(method)
self.class_dict[method.name] = method
return methods
#------------------------------------------------------------------------
# Type Inference
#------------------------------------------------------------------------
def infer(self):
"""
1) Infer extension attribute types from the __init__ method
2) Type infer all methods
"""
# Update ext_type.symtab
self.type_infer_init_method()
# Type infer the rest of the methods (with fixed attribute table!)
self.type_infer_methods()
def type_infer_method(self, method):
func_env = pipeline.compile2(self.env, method.py_func,
method.signature.return_type,
method.signature.args,
# don't use qualified name
name=method.name,
pipeline_name='type_infer',
**self.flags)
self.func_envs[method] = func_env
# Verify signature after type inference with registered
# (user-declared) signature
method.signature = methods.ExtMethodType(
method.signature.return_type,
method.signature.args,
method.name,
is_class_method=method.is_class,
is_static_method=method.is_static)
self.ext_type.add_method(method)
def type_infer_init_method(self):
initfunc = self.class_dict.get('__init__', None)
if initfunc is None:
return
self.type_infer_method(initfunc)
def type_infer_methods(self):
for method in self.methods:
if method.name not in ('__new__', '__init__') and method.signature:
self.type_infer_method(method)
#------------------------------------------------------------------------
# Finalize Tables
#------------------------------------------------------------------------
def finalize_tables(self):
"""
Finalize (fix) the attribute and method tables.
"""
self.attrbuilder.finalize(self.ext_type)
self.vtabbuilder.finalize(self.ext_type)
#------------------------------------------------------------------------
# Validate
#------------------------------------------------------------------------
def validate(self):
"""
Validate that we can build the extension type.
"""
for validator in self.exttype_validators:
validator.validate(self.ext_type)
#------------------------------------------------------------------------
# Compilation
#------------------------------------------------------------------------
def compile(self):
"""
Compile extension methods:
1) Process signatures such as @void(double)
2) Infer native attributes through type inference on __init__
3) Patch the extension type with a native attributes struct
4) Infer types for all other methods
5) Update the ext_type with a vtab type
6) Compile all methods
"""
self.compile_methods()
vtable = self.vtabbuilder.build_vtab(self.ext_type)
extclass = self.build_extension_type(vtable)
# Set the extension class on the type. We may instead want an
# ExtensionEnvironment associated with each ext_type, but this
# would be a global thing
self.ext_type.extclass = extclass
self.attrbuilder.build_descriptors(self.ext_type, extclass)
self.methodwrapper.build_method_wrappers(
self.env, extclass, self.ext_type)
return extclass
def compile_methods(self):
"""
Compile all methods, reuse function environments from type inference
stage.
โ methods M sets M.lfunc, M.lfunc_pointer and M.wrapper_func
"""
for i, method in enumerate(self.methods):
func_env = self.func_envs[method]
pipeline.run_env(self.env, func_env, pipeline_name='compile')
method.update_from_env(func_env)
def get_bases(self):
"""
Get base classes for the resulting extension type.
For jit types, these are simply the bases of the Python class we
decorated. For autojit-decorated classes we get a more complicated
inheritance scheme (see AutojitExtensionCompiler.get_bases).
"""
return self.py_class.__bases__
def get_metacls(self):
"""
Return the metaclass for the specialized extension type.
"""
return type
def build_extension_type(self, vtable):
"""
Build extension type from llvm methods and pointers and a populated
virtual method table.
"""
vtable_wrapper = self.vtabbuilder.wrap_vtable(vtable)
extension_type = extension_types.create_new_extension_type(
self.get_metacls(),
self.py_class.__name__, self.get_bases(), self.class_dict,
self.ext_type, vtable_wrapper)
return extension_type
#------------------------------------------------------------------------
# Attribute Inheritance
#------------------------------------------------------------------------
class AttributesInheriter(object):
"""
Inherit attributes and methods from parent classes:
For attributes and methods ...
1) Build a table type
2) Copy supertype slots into subclass table type
"""
def inherit(self, ext_type):
"Inherit attributes and methods from superclasses"
attr_table = self.build_attribute_table(ext_type)
ext_type.attribute_table = attr_table
vtable = self.build_method_table(ext_type)
ext_type.vtab_type = vtable
def build_attribute_table(self, ext_type):
bases = utils.get_numba_bases(ext_type.py_class)
parent_attrtables = [base.exttype.attribute_table for base in bases]
attr_table = attributetable.AttributeTable(
ext_type.py_class, parent_attrtables)
for base in bases:
self.inherit_attributes(attr_table, base.exttype)
return attr_table
def build_method_table(self, ext_type):
bases = utils.get_numba_bases(ext_type.py_class)
parent_vtables = [base.exttype.vtab_type for base in bases]
vtable = methodtable.VTabType(ext_type.py_class, parent_vtables)
for base in bases:
self.inherit_methods(vtable, base.exttype)
return vtable
def inherit_attributes(self, attr_table, base_ext_type):
"""
Inherit attributes from a parent class.
May be called multiple times for multiple bases.
"""
base_attrs = base_ext_type.attribute_table.attributedict
attr_table.inherited.update(base_attrs) # { attr_name }
attr_table.attributedict.update(base_attrs) # { attr_name : attr_type }
def inherit_methods(self, vtable, base_ext_type):
"""
Inherit methods from a parent class.
May be called multiple times for multiple bases.
"""
base_methods = base_ext_type.vtab_type.methoddict
vtable.inherited.update(base_methods) # { method_name }
vtable.methoddict.update(base_methods) # { method_name : Method }
#------------------------------------------------------------------------
# Extension Attribute Processing
#------------------------------------------------------------------------
def process_class_attribute_types(ext_type, class_dict):
"""
Process class attribute types:
@jit
class Foo(object):
attr = double
"""
table = ext_type.attribute_table
for name, value in class_dict.iteritems():
if isinstance(value, typesystem.Type):
table.attributedict[name] = value
def build_extension_symtab(ext_type):
"""
Create symbol table for all attributes of the extension type. These
are Variables which are used by the type inferencer and used to
type check attribute assignments.
New attribute assignments create new ExtensionAttributeVariable
variables in the symtab. These variables update the attribute table
during type inference:
class Foo(object):
value1 = double
def __init__(self, value2):
self.value2 = int_(value2)
Before type inference of __init__ we have:
symtab = { 'value1': Variable(double) }
and after type inference of __init__ we have:
symtab = {
'value1': Variable(double), # type is fixed
'value2': ExtensionAttributeVariable(int_), # type is inferred
}
"""
table = ext_type.attribute_table
for attr_name, attr_type in table.attributedict.iteritems():
ext_type.symtab[attr_name] = symtab.Variable(attr_type,
promotable_type=False)
#------------------------------------------------------------------------
# Build Attributes
#------------------------------------------------------------------------
class AttributeBuilder(object):
"""
Build attribute descriptors for Python-level access.
"""
def finalize(self, ext_type):
"Finalize the attribute table (and fix the order if necessary)"
def create_descr(self, attr_name):
"""
Create a descriptor that accesses the attribute from Python space.
"""
def build_descriptors(self, ext_type, extension_class):
"Cram descriptors into the class dict"
table = ext_type.attribute_table
for attr_name, attr_type in table.attributedict.iteritems():
descriptor = self.create_descr(attr_name)
setattr(extension_class, attr_name, descriptor)
#------------------------------------------------------------------------
# Build Method Wrappers
#------------------------------------------------------------------------
class MethodWrapperBuilder(object):
def build_method_wrappers(self, env, extclass, ext_type):
"""
Update the extension class with the function wrappers.
"""
self.process_typed_methods(env, extclass, ext_type)
def process_typed_methods(self, env, extclass, ext_type):
for method in ext_type.methoddict.itervalues():
setattr(extclass, method.name, method.get_wrapper())
#------------------------------------------------------------------------
# Filters
#------------------------------------------------------------------------
class Filterer(object):
def filter(self, iterable, *args):
return list(iterable)
########NEW FILE########
__FILENAME__ = entrypoints
import numba
from numba import error
from numba.exttypes import utils
from numba.exttypes import jitclass
from numba.exttypes import autojitclass
from numba.exttypes.autojitclass import autojit_class_wrapper
from llvm import core as _lc
#------------------------------------------------------------------------
# Build Extension Type (@jit)
#------------------------------------------------------------------------
def jit_extension_class(py_class, translator_kwargs, env):
llvm_module = translator_kwargs.get('llvm_module', None)
if llvm_module is None:
llvm_module = _lc.Module.new('tmp.extension_class.%X' % id(py_class))
translator_kwargs['llvm_module'] = llvm_module
return jitclass.create_extension(env, py_class, translator_kwargs)
#------------------------------------------------------------------------
# Build Dynamic Extension Type (@autojit)
#------------------------------------------------------------------------
def autojit_extension_class(env, py_class, flags, argtypes):
"""
Compile an extension class given the NumbaEnvironment and the Python
class that contains the functions that are to be compiled.
"""
return autojitclass.create_extension(env, py_class, flags, argtypes)
########NEW FILE########
__FILENAME__ = jitclass
"""
Compiling @jit extension classes works as follows:
* Create an extension Numba type holding a symtab
* Capture attribute types in the symtab ...
* ... from the class attributes:
@jit
class Foo(object):
attr = double
* ... from __init__
@jit
class Foo(object):
def __init__(self, attr):
self.attr = double(attr)
* Type infer all methods
* Compile all extension methods
* Process signatures such as @void(double)
* Infer native attributes through type inference on __init__
* Path the extension type with a native attributes struct
* Infer types for all other methods
* Update the ext_type with a vtab type
* Compile all methods
* Create descriptors that wrap the native attributes
* Create an extension type:
{
PyObject_HEAD
...
virtual function table (func **)
native attributes
}
The virtual function table (vtab) is a ctypes structure set as
attribute of the extension types. Objects have a direct pointer
for efficiency.
"""
from numba import typesystem
from numba.exttypes import virtual
from numba.exttypes import signatures
from numba.exttypes import validators
from numba.exttypes import compileclass
from numba.exttypes import ordering
from numba.exttypes import types as etypes
#------------------------------------------------------------------------
# Jit Extension Class Compiler
#------------------------------------------------------------------------
class JitExtensionCompiler(compileclass.ExtensionCompiler):
"""
Compile @jit extension classes.
"""
method_validators = validators.jit_validators
exttype_validators = validators.jit_type_validators
#------------------------------------------------------------------------
# Build Attributes Struct
#------------------------------------------------------------------------
class JitAttributeBuilder(compileclass.AttributeBuilder):
def finalize(self, ext_type):
ext_type.attribute_table.create_attribute_ordering(ordering.extending)
def create_descr(self, attr_name):
"""
Create a descriptor that accesses the attribute on the ctypes struct.
This is set by the extension type constructor __new__.
"""
def _get(self):
return getattr(self._numba_attrs, attr_name)
def _set(self, value):
return setattr(self._numba_attrs, attr_name, value)
return property(_get, _set)
#------------------------------------------------------------------------
# Build Extension Type
#------------------------------------------------------------------------
def create_extension(env, py_class, flags):
"""
Compile an extension class given the NumbaEnvironment and the Python
class that contains the functions that are to be compiled.
"""
flags.pop('llvm_module', None)
# ext_type = etypes.jit_exttype(py_class)
ext_type = typesystem.jit_exttype(py_class)
extension_compiler = JitExtensionCompiler(
env, py_class, dict(vars(py_class)), ext_type, flags,
signatures.JitMethodMaker(),
compileclass.AttributesInheriter(),
compileclass.Filterer(),
JitAttributeBuilder(),
virtual.StaticVTabBuilder(),
compileclass.MethodWrapperBuilder())
extension_compiler.init()
extension_compiler.infer()
extension_compiler.finalize_tables()
extension_compiler.validate()
extension_type = extension_compiler.compile()
return extension_type
########NEW FILE########
__FILENAME__ = methodtable
# -*- coding: utf-8 -*-
"""
Virtual method table types and ordering.
"""
from __future__ import print_function, division, absolute_import
import numba
from numba.exttypes import ordering
from numba.exttypes.types import methods
#------------------------------------------------------------------------
# Virtual Method Table Type
#------------------------------------------------------------------------
class VTabType(object):
"""
Virtual method table type.
"""
def __init__(self, py_class, parents):
self.py_class = py_class
# List of parent vtable types
self.parents = parents
# method_name -> Method
self.methoddict = {}
# method_name -> Method
self.untyped_methods = {}
# specialized methods (i.e. autojit method specializations)
# (method_name, method_argtypes) -> Method
self.specialized_methods = {}
# Set once create_method_ordering is called,
# list of ordered method names
self.methodnames = None
# Set of inherited method ({ method_name })
self.inherited = set()
def create_method_ordering(self, orderer=ordering.unordered):
"""
Create a consistent method ordering with the base types.
ordering โ { unordered, extending, ... }
"""
self.methodnames = orderer(ordering.VTable(self))
def add_method(self, method):
"""
Add a method to the vtab type and verify it with any parent
method signatures.
"""
if method.name in self.methoddict:
# Patch current signature after type inference
signature = self.get_signature(method.name)
assert methods.equal_signature_args(method.signature, signature)
if signature.return_type is None:
signature.return_type = method.signature.return_type
else:
assert signature.return_type == method.signature.return_type, \
method.signature
self.methoddict[method.name] = method
def get_signature(self, method_name):
"Get the signature for the given method name. Returns ExtMethodType"
method = self.methoddict[method_name]
return method.signature
def to_struct(self):
return numba.struct([(m.name, m.signature.pointer())
for m in self.methods])
@property
def methods(self):
"Return methods in the order they were set in"
assert self.methodnames is not None
methods = map(self.methoddict.get, self.methodnames)
return list(methods) + self.specialized_methods.values()
@property
def llvm_methods(self):
for m in self.methods:
yield m.lfunc
@property
def method_pointers(self):
for m in self.methods:
yield m.lfunc_pointer
@classmethod
def empty(cls, py_class):
"Create an empty finalized vtable type"
vtable = cls(py_class, [])
vtable.create_method_ordering()
return vtable
########NEW FILE########
__FILENAME__ = ordering
# -*- coding: utf-8 -*-
"""
This module defines ordering schemes for virtual methods and attributes.
If we use hash-based virtual (method/attribute) tables, we don't care about
the ordering. If we're using a C++ like virtual method/attribute table (like
normal Python extension types do for attributes), we need to have a layout
compatible with base classes (i.e. we may only add more attributes, but not
reorder any existing ones).
"""
from __future__ import print_function, division, absolute_import
from numba.traits import traits, Delegate
from numba import error
#------------------------------------------------------------------------
# Virtual Tables
#------------------------------------------------------------------------
@traits
class AbstractTable(object):
# Ordered attribute names
attributes = None
# Dict mapping attribute names to attribute entities
attrdict = None
py_class = Delegate('table')
def __init__(self, table):
self.table = table
@property
def parents(self):
cls = type(self)
return list(map(cls, self.table.parents))
@traits
class VTable(AbstractTable):
attributes = Delegate('table', 'methodnames')
attrdict = Delegate('table', 'methoddict')
@traits
class AttributeTable(AbstractTable):
attributes = Delegate('table', 'attributes')
attrdict = Delegate('table', 'attributedict')
#------------------------------------------------------------------------
# Table Entry Ordering (Virtual Method / Attribute Ordering)
#------------------------------------------------------------------------
def sort_parents(table):
"Sort parent tables by size"
return sorted(table.parents, key=lambda tab: len(tab.attrdict))
def unordered(table):
"Return table entities in a random order"
return list(table.attrdict)
def extending(table):
"""
Order the table entities according to the given parent tables, i.e.
we can only extend existing tables.
"""
if not table.parents:
return unordered(table)
parents = sort_parents(table)
biggest_table = parents[-1]
appending_attributes = set(table.attrdict) - set(biggest_table.attributes)
return biggest_table.attributes + list(appending_attributes)
# ______________________________________________________________________
# Validate Table Ordering
def validate_extending_order_compatibility(table):
parents = sort_parents(table)
tables = parents + [table]
for table_smaller, table_bigger in zip(tables, tables[1:]):
names1 = table_smaller.attributes
names2 = table_bigger.attributes[:len(table_smaller.attributes)]
if names1 != names2:
raise error.NumbaError(
"Cannot create compatible attribute or method ordering for "
"base classes '%s' and '%s'" % (
table_smaller.py_class.__name__,
table_bigger.py_class.__name__))
########NEW FILE########
__FILENAME__ = signatures
# -*- coding: utf-8 -*-
"""
Handle signatures of methods in @jit and @autojit classes.
"""
from __future__ import print_function, division, absolute_import
import types
import numba
from numba import *
from numba import error
from numba import typesystem
#------------------------------------------------------------------------
# Parse method signatures
#------------------------------------------------------------------------
class Method(object):
"""
py_func: the python 'def' function
"""
def __init__(self, py_func, name, signature, is_class, is_static,
nopython=False):
self.py_func = py_func
# py_func.live_objects = []
# Name of this function, py_func.__name__ is inherently unreliable
self.name = name
self.signature = signature
self.is_class = is_class
self.is_static = is_static
self.nopython = nopython
self.template_signature = None
# Filled out after extension method is compiled
# (ExtensionCompiler.compile_methods())
self.wrapper_func = None
self.lfunc = None
self.lfunc_pointer = None
def get_wrapper(self):
if self.is_class:
return classmethod(self.wrapper_func)
elif self.is_static:
return staticmethod(self.wrapper_func)
else:
return self.wrapper_func
def update_from_env(self, func_env):
self.lfunc = func_env.lfunc
self.lfunc_pointer = func_env.translator.lfunc_pointer
self.wrapper_func = func_env.numba_wrapper_func
def clone(self):
return type(self)(self.py_func, self.name, self.signature,
self.is_class, self.is_static, self.nopython)
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
def get_classmethod_func(func):
"""
Get the Python function the classmethod or staticmethod is wrapping.
In Python2.6 classmethod and staticmethod don't have the '__func__'
attribute.
"""
if isinstance(func, classmethod):
return func.__get__(object()).__func__
else:
assert isinstance(func, staticmethod)
return func.__get__(object())
#------------------------------------------------------------------------
# Method Builders
#------------------------------------------------------------------------
class MethodMaker(object):
"""
Creates Methods from python functions and validates user-declared
signatures.
"""
def no_signature(self, method):
"Called when no signature is found for the method"
def default_signature(self, method, ext_type):
"""
Retrieve the default method signature for the given method if
no user-declared signature exists.
"""
if has_known_signature(method):
# We know the argument types, but we don't have a solid
# infrastucture for inter-procedural type inference yet
# return typesystem.function(None, [])
return None
else:
return None
def make_method_type(self, method):
"Create a method type for the given Method and declared signature"
restype = method.signature.return_type
argtypes = method.signature.args
signature = typesystem.ExtMethodType(
return_type=restype, args=argtypes, name=method.name,
is_class_method=method.is_class, is_static_method=method.is_static)
return signature
def has_known_signature(method):
argcount = method.py_func.__code__.co_argcount
return ((method.is_static and argcount == 0) or
(not method.is_static and argcount == 1))
# ______________________________________________________________________
# Method processing for @jit classes
class JitMethodMaker(MethodMaker):
def no_signature(self, py_func):
if py_func.__name__ != '__init__':
raise error.NumbaError(
"Method '%s' does not have signature" % (py_func.__name__,))
def default_signature(self, method, ext_type):
if method.name == '__init__':
argtypes = [numba.object_] * (method.py_func.__code__.co_argcount - 1)
default_signature = numba.void(*argtypes)
return default_signature
else:
return super(JitMethodMaker, self).default_signature(
method, ext_type)
# ______________________________________________________________________
# Method processing for @autojit classes
class AutojitMethodMaker(MethodMaker):
def __init__(self, argtypes):
self.argtypes = argtypes
def default_signature(self, method, ext_type):
if method.name == '__init__':
default_signature = numba.void(*self.argtypes)
return default_signature
else:
return super(AutojitMethodMaker, self).default_signature(
method, ext_type)
#------------------------------------------------------------------------
# Method signature parsing
#------------------------------------------------------------------------
def method_argtypes(method, ext_type, argtypes):
if method.is_static:
leading_arg_types = ()
elif method.is_class:
leading_arg_types = (numba.object_,)
else:
leading_arg_types = (ext_type,)
return leading_arg_types + tuple(argtypes)
def process_signature(method, method_name, method_maker=MethodMaker()):
"""
Verify a method signature.
Returns a Method object and the resolved signature.
Returns None if the object isn't a method.
"""
signature = None
is_static=False
is_class=False
while True:
if isinstance(method, types.FunctionType):
# Process function
if signature is None:
method_maker.no_signature(method)
method = Method(method, method_name, signature,
is_class, is_static)
return method
elif isinstance(method, typesystem.Function):
# @double(...)
# def func(self, ...): ...
signature = method.signature
method = method.py_func
else:
# Process staticmethod and classmethod
if isinstance(method, staticmethod):
is_static = True
elif isinstance(method, classmethod):
is_class = True
else:
return None
method = get_classmethod_func(method)
assert False # Unreachable
class MethodSignatureProcessor(object):
"""
Processes signatures of extension types.
"""
def __init__(self, class_dict, ext_type, method_maker, validators):
self.class_dict = class_dict
self.ext_type = ext_type
self.method_maker = method_maker
# List of method validators: [MethodValidator]
self.validators = validators
def update_signature(self, method):
"""
Update a method signature with the extension type for 'self'.
class Foo(object):
@void() # becomes: void(ext_type(Foo))
def method(self): ...
"""
argtypes = method_argtypes(method, self.ext_type, method.signature.args)
restype = method.signature.return_type
method.signature = typesystem.function(restype, argtypes)
method.signature = self.method_maker.make_method_type(method)
def get_method_signatures(self):
"""
Return [Method] for each decorated method in the class
"""
methods = []
for method_name, method in sorted(self.class_dict.iteritems()):
method = process_signature(method, method_name)
if method is None:
continue
for validator in self.validators:
validator.validate(method, self.ext_type)
if method.signature is None:
method.signature = self.method_maker.default_signature(
method, self.ext_type)
if method.signature is not None:
self.update_signature(method)
methods.append(method)
return methods
########NEW FILE########
__FILENAME__ = test_extension_class_specializations
# -*- coding: utf-8 -*-
"""
Test properties of specialized classes and test indexing.
"""
from __future__ import print_function, division, absolute_import
from numba import *
@autojit
class C(object):
def __init__(self, value):
self.value = value
obj = C(10.0)
print(type(obj).exttype)
specialized_cls = C[{'value': double}]
print(specialized_cls, C, specialized_cls is C)
assert issubclass(specialized_cls, C)
assert isinstance(obj, C)
try:
C[{'value': int_}]
except KeyError as e:
assert e.args[0] == {'value': int_}
else:
raise Exception
########NEW FILE########
__FILENAME__ = test_unannotated_extension_methods
from itertools import cycle
from numba import *
from numba.testing.test_support import parametrize, main
import numpy as np
# NOTE: We make these two separate classes because we don't want a set of
# NOTE: precompiled methods to affect the tests
del python, nopython # Make sure we run in *compiled* mode
def _make_list_func(self, A):
L = []
with nopython:
for i in range(A.shape[0]):
item = A[i]
with python:
L.append(item)
return L
def make_list_func(self, A):
return self._make_list(A)
@autojit
class Base1(object):
"""
Test numba calling autojit methods
"""
def __init__(self, myfloat):
self.value = myfloat
def getvalue(self):
return self.value
_make_list = _make_list_func
make_list = make_list_func
@autojit
class Base2(object):
"""
Test numba calling autojit methods
"""
def __init__(self, myfloat):
self.value = myfloat
def getvalue(self):
return self.value
_make_list = _make_list_func
make_list = make_list_func
@autojit
class Base3(object):
"""
Test Python calling autojit methods.
"""
def __init__(self, myfloat):
self.value = myfloat
def getvalue(self):
return self.value
make_list = _make_list_func
@autojit
def run(obj, array):
list = obj.make_list(array)
return list
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
obj1 = Base1(10.0)
obj2 = Base2(10.0)
obj3 = Base3(10.0)
assert obj1.value == obj2.value == obj3.value == 10.0
dtypes = (
np.float32, np.int32,
np.double, np.int64,
np.complex64, np.complex128,
)
params = (list(zip(cycle([obj1]), dtypes)) +
list(zip(cycle([obj3]), dtypes)))
# ______________________________________________________________________
# Parameterized tests
@parametrize(*params)
def test_python_specialize_method(param):
obj, dtype = param
A = np.arange(10, dtype=dtype)
L = obj.make_list(A)
assert np.all(A == L)
@parametrize(*zip(cycle([obj2]), dtypes))
def test_numba_func_use_method(param):
obj, dtype = param
A = np.arange(10, dtype=dtype)
L = run(obj, A)
assert np.all(A == L)
if __name__ == '__main__':
# obj, dtype = obj2, np.double
#
# A = np.arange(10, dtype=dtype)
# L = run(obj, A)
# assert np.all(A == L)
main()
########NEW FILE########
__FILENAME__ = test_unspecialized_extension_methods
from numba import *
from numba.testing.test_support import parametrize, main
from numba.exttypes.tests import test_extension_methods
Base1 = test_extension_methods.make_base(autojit)
@autojit
class Base2(object):
def __init__(self, myfloat):
self.value = myfloat
def getvalue(self):
return self.value
# @staticmethod
# def static1():
# return 10.0
#
# @staticmethod
# def static2(value):
# return value * 2
#
# @staticmethod
# def static3(value1, value2):
# return value1 * value2
#
# @staticmethod
# @double(double, double)
# def static4(value1, value2):
# return value1 * value2
#
# @classmethod
# @double()
# def class1(cls):
# return 10.0
#
# @double(double)
# @classmethod
# def class2(cls, value):
# return value * 2
#
# @double(double, double)
# @classmethod
# def class3(cls, value1, value2):
# return value1 * value2
#
# @classmethod
# @double(double, double)
# def class4(cls, value1, value2):
# return value1 * value2
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
obj1 = Base1(10.0)
specialized_class1 = Base1[{'value': double}]
obj2 = Base1(11)
@parametrize(Base1, specialized_class1, obj1, obj2)
def test_staticmethods(obj):
assert obj.static1() == 10.0
assert obj.static2(10.0) == 20.0
assert obj.static3(5.0, 6.0) == 30.0
assert obj.static4(5.0, 6.0) == 30.0
@parametrize(Base1, specialized_class1, obj1, obj2)
def test_classmethods(obj):
assert obj.class1() == 10.0
assert obj.class2(10.0) == 20.0
assert obj.class3(5.0, 6.0) == 30.0
assert obj.class4(5.0, 6.0) == 30.0
@parametrize(obj1)
def test_specialized_unbound(obj):
assert type(obj) is specialized_class1
assert specialized_class1.getvalue(obj) == 10.0
@parametrize(obj2)
def test_specialized_unbound2(obj):
assert issubclass(type(obj), Base1)
assert type(obj).getvalue(obj) == 11
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = test_extension_attributes
"""
Test class attributes.
"""
import numba
from numba import *
from numba.testing.test_support import parametrize, main
def make_base(compiler):
@compiler
class Base(object):
value1 = double
value2 = int_
@void(int_, double)
def __init__(self, value1, value2):
self.value1 = value1
self.value2 = value2
@void(int_)
def setvalue(self, value):
self.value1 = value
@double()
def getvalue1(self):
return self.value1
return Base
def make_derived(compiler):
Base = make_base(compiler)
@compiler
class Derived(Base):
value3 = float_
@void(int_)
def setvalue(self, value):
self.value3 = value
return Base, Derived
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
@parametrize(jit, autojit)
def test_baseclass_attrs(compiler):
Base = make_base(compiler)
assert Base(10, 11.0).value1 == 10.0
assert Base(10, 11.0).value2 == 11
obj = Base(10, 11.0)
obj.setvalue(12)
assert obj.getvalue1() == 12.0
@parametrize(jit) #, autojit)
def test_derivedclass_attrs(compiler):
Base, Derived = make_derived(compiler)
obj = Derived(10, 11.0)
obj.setvalue(9)
assert obj.value3 == 9.0
if __name__ == '__main__':
# test_derivedclass_attrs(autojit)
main()
########NEW FILE########
__FILENAME__ = test_extension_inheritance
"""
Test Python- and Numba-level inheritance.
"""
import numba
from numba import *
from numba.testing.test_support import parametrize, main
if not numba.PY3:
# The operation is valid in Python 3
__doc__ = """
>>> Base.py_method(object())
Traceback (most recent call last):
...
TypeError: unbound method numba_function_or_method object must be called with Base instance as first argument (got object instance instead)
"""
def format_str(msg, *values):
return msg % values
def make_base(compiler):
@compiler
class BaseClass(object):
@void(double)
def __init__(self, value):
self.value = value
@double()
def getvalue(self):
"Return value"
return self.value
@void(double)
def setvalue(self, value):
"Set value"
self.value = value
@double()
def method(self):
return self.getvalue()
@double()
def py_method(self):
return self.value
@object_()
def __repr__(self):
return format_str('Base(%s)', self.value)
return BaseClass
def make_derived(compiler):
BaseClass = make_base(compiler)
@compiler
class DerivedClass(BaseClass):
@void(double)
def __init__(self, value):
self.value = value
self.value2 = 2.0
@double()
def getvalue(self):
"Return value"
return self.value * self.value2
@void(double)
def setvalue2(self, value2):
"Set value"
self.value2 = value2
@object_()
def __repr__(self):
return format_str('Derived(%s)', self.value)
return BaseClass, DerivedClass
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
@parametrize(jit, autojit)
def test_baseclass(compiler):
Base = make_base(compiler)
assert str(Base(10.0)) == 'Base(10.0)'
assert Base(10.0).py_method() == 10.0
assert Base(4.0).method() == 4.0
assert Base(4.0).getvalue() == 4.0
try:
Base.py_method(object())
except TypeError as e:
assert e.args[0] == ('unbound method numba_function_or_method '
'object must be called with BaseClass '
'instance as first argument (got object '
'instance instead)'), e.args[0]
else:
raise Exception("Expected an exception")
@parametrize(jit) #, autojit)
def test_derivedclass(compiler):
Base, Derived = make_derived(compiler)
assert str(Derived(20.0)) == 'Derived(20.0)'
assert Derived(10.0).py_method() == 10.0
assert Derived(4.0).method() == 8.0
assert Derived(4.0).getvalue() == 8.0
obj = Derived(4.0)
obj.value2 = 3.0
result = obj.method()
assert result == 12.0, result
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = test_extension_methods
from numba import *
from numba.testing.test_support import parametrize, main
def make_base(compiler):
@compiler
class Base(object):
@void(double)
def __init__(self, myfloat):
self.value = myfloat
@double()
def getvalue(self):
return self.value
@staticmethod
@double()
def static1():
return 10.0
@double(double)
@staticmethod
def static2(value):
return value * 2
@double(double, double)
@staticmethod
def static3(value1, value2):
return value1 * value2
@staticmethod
@double(double, double)
def static4(value1, value2):
return value1 * value2
@classmethod
@double()
def class1(cls):
return 10.0
@double(double)
@classmethod
def class2(cls, value):
return value * 2
@double(double, double)
@classmethod
def class3(cls, value1, value2):
return value1 * value2
@classmethod
@double(double, double)
def class4(cls, value1, value2):
return value1 * value2
return Base
def make_derived(compiler):
Base = make_base(compiler)
@compiler
class Derived(Base):
pass
return Base, Derived
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
def run_staticmethods(obj):
assert obj.static1() == 10.0
assert obj.static2(10.0) == 20.0
assert obj.static3(5.0, 6.0) == 30.0
assert obj.static4(5.0, 6.0) == 30.0
def run_classmethods(obj):
assert obj.class1() == 10.0
assert obj.class2(10.0) == 20.0
assert obj.class3(5.0, 6.0) == 30.0
assert obj.class4(5.0, 6.0) == 30.0
# ______________________________________________________________________
# Parameterized tests
@parametrize(jit, autojit)
def test_base_staticmethods(compiler):
Base = make_base(compiler)
run_staticmethods(Base(2.0))
run_staticmethods(Base)
@parametrize(jit)
def test_derived_staticmethods(compiler):
Base, Derived = make_derived(compiler)
run_staticmethods(Derived(2.0))
run_staticmethods(Derived)
@parametrize(jit, autojit)
def test_base_classmethods(compiler):
Base = make_base(compiler)
run_classmethods(Base(2.0))
run_classmethods(Base)
@parametrize(jit)
def test_derived_classmethods(compiler):
Base, Derived = make_derived(compiler)
run_classmethods(Derived(2.0))
run_classmethods(Derived)
if __name__ == '__main__':
# Base = make_base(autojit)
# obj = Base(2.0)
# run_staticmethods(Base)
main()
########NEW FILE########
__FILENAME__ = test_extension_sizeof
import sys
from numba import *
@jit
class Base(object):
@void(double)
def __init__(self, myfloat):
self.value = myfloat
@double()
def getvalue(self):
"Return value"
return self.value
@jit
class Derived1(Base):
@void(double)
def __init__(self, value):
self.value = value
self.value2 = double(2.0)
def test_sizeof_extra_attr():
base = Base(10.0)
derived = Derived1(10.0)
base_size = sys.getsizeof(base)
derived_size = sys.getsizeof(derived)
assert base_size + 8 == derived_size, (base_size, derived_size)
@jit
class Derived2(Base):
@double()
def getvalue(self):
return self.value
def test_sizeof_extra_method():
base_size = sys.getsizeof(Base(10.0))
derived_size = sys.getsizeof(Derived2(10.0))
assert base_size == derived_size, (base_size, derived_size)
if __name__ == '__main__':
test_sizeof_extra_attr()
test_sizeof_extra_method()
########NEW FILE########
__FILENAME__ = test_extension_types
import ctypes
import numba
from numba import *
from numba.testing.test_support import parametrize, main
def format_str(msg, *values):
return msg % values
def make_myextension(compiler):
@compiler
class MyExtension(object):
@void(double)
def __init__(self, myfloat):
self.value = myfloat
@double()
def getvalue(self):
"Return value"
return self.value
@void(double)
def setvalue(self, value):
"Set value"
self.value = value
@object_()
def __repr__(self):
return format_str('MyExtension%s', self.value)
return MyExtension
def make_obj_extension(compiler):
@compiler
class ObjectAttrExtension(object):
def __init__(self, value1, value2):
self.value1 = object_(value1)
self.value2 = double(value2)
@object_()
def getvalue(self):
"Return value"
return self.value1
@void(double)
def setvalue(self, value):
"Set value"
self.value1 = value
@object_()
def method(self):
return self.getvalue()
@object_(int32)
def method2(self, new_value):
self.setvalue(new_value * 2)
result = self.method()
return result
return ObjectAttrExtension
def make_extattr_extension():
ObjectAttrExtension = make_obj_extension(jit)
exttype = ObjectAttrExtension.exttype
@jit
class ExtensionTypeAsAttribute(object):
def __init__(self, attr):
self.attr = exttype(attr)
return ExtensionTypeAsAttribute
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
@parametrize(jit, autojit)
def test_extension(compiler):
MyExtension = make_myextension(compiler)
# ______________________________________________________________________
# Test methods and attributes
obj = MyExtension(10.0)
assert obj.value == 10.0
assert obj._numba_attrs.value == 10.0
obj.setvalue(20.0)
assert obj.getvalue() == 20.0
assert obj.value == 20.0
obj._numba_attrs._fields_ == [('value', ctypes.c_double)]
# ______________________________________________________________________
# Test stringifications
assert obj.getvalue.__name__ == 'getvalue'
assert obj.getvalue.__doc__ == 'Return value'
strmethod = str(type(obj.getvalue.__func__))
if numba.PY3:
assert strmethod == "<class 'numba_function_or_method'>"
else:
assert strmethod == "<type 'numba_function_or_method'>"
return MyExtension
@parametrize(jit, autojit)
def test_obj_attributes(compiler):
MyExtension = make_myextension(compiler)
ObjectAttrExtension = make_obj_extension(compiler)
# TODO: Disallow string <-> real coercions! These are conversions!
# try:
# obj = ObjectAttrExtension(10.0, 'blah')
# except TypeError as e:
# assert e.args[0] == 'a float is required'
# else:
# raise Exception
assert ObjectAttrExtension(10.0, 3.5).value1 == 10.0
obj = ObjectAttrExtension('hello', 9.3)
assert obj.value1 == 'hello'
obj.setvalue(20.0)
assert obj.getvalue() == 20.0
obj.value1 = MyExtension(10.0)
assert str(obj.value1) == "MyExtension10.0"
assert str(obj.getvalue()) == "MyExtension10.0"
assert str(obj.method()) == "MyExtension10.0"
assert obj.method2(15.0) == 30.0
@parametrize(jit)
def test_extension_attribute(compiler):
ExtensionTypeAsAttribute = make_extattr_extension()
assert (str(ExtensionTypeAsAttribute.exttype) ==
("<JitExtension ExtensionTypeAsAttribute("
"{'attr': <JitExtension ObjectAttrExtension>})>"))
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = test_extension_warnings
"""
>>> compile_class(False).__name__
'Base'
>>> compile_class(True).__name__
Warning ...: Unused argument '...'
Warning ...: Unused argument '...'
'Base'
"""
from numba import *
def compile_class(warn):
@jit(warn=warn, warnstyle='simple') # TODO: only issue error once !
class Base(object):
@void(int_)
def method(self, argument):
pass
return Base
if __name__ == '__main__':
# compile_class(True)
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_type_recognition
"""
>>> test_typeof()
"""
import numba
from numba import *
def make_base(compiler):
@compiler
class Base(object):
value1 = double
value2 = int_
@void(int_, double)
def __init__(self, value1, value2):
self.value1 = value1
self.value2 = value2
return Base
Base = make_base(jit)
@jit
class Derived(Base):
value3 = float_
@void(int_)
def setvalue(self, value):
self.value3 = value
@autojit
def base_typeof():
obj1 = Base(10, 11.0)
return numba.typeof(obj1.value1), numba.typeof(obj1.value2)
@autojit
def derived_typeof():
obj = Derived(10, 11.0)
return (numba.typeof(obj.value1),
numba.typeof(obj.value2),
numba.typeof(obj.value3))
def test_typeof():
pass
# TODO: type recognition of extension object instantiation
# assert base_typeof() == (double, int_), base_typeof()
# assert derived_typeof() == (double, int_, float_), derived_typeof()
#------------------------------------------------------------------------
# Test Specialized autojit typeof
#------------------------------------------------------------------------
AutoBase = make_base(autojit)
@autojit
def attrtypes(obj):
return numba.typeof(obj.value1), numba.typeof(obj.value2)
def test_autobase():
obj = AutoBase(10, 11.0)
assert attrtypes(obj) == (double, int_)
if __name__ == '__main__':
test_typeof()
test_autobase()
########NEW FILE########
__FILENAME__ = test_vtables
# -*- coding: utf-8 -*-
"""
Test hash-based virtual method tables.
"""
from __future__ import print_function, division, absolute_import
import itertools
import numba as nb
from numba import *
from numba import typesystem
from numba.exttypes import virtual
from numba.exttypes import methodtable
from numba.exttypes.signatures import Method
from numba.testing.test_support import parametrize, main
#------------------------------------------------------------------------
# Signature enumeration
#------------------------------------------------------------------------
class py_class(object):
pass
def myfunc1(a):
pass
def myfunc2(a, b):
pass
def myfunc3(a, b, c):
pass
types = list(nb.numeric) + [object_]
array_types = [t[:] for t in types]
array_types += [t[:, :] for t in types]
array_types += [t[:, :, :] for t in types]
all_types = types + array_types
def method(func, name, sig):
return Method(func, name, sig, False, False)
make_methods1 = lambda: [
method(myfunc1, 'method', typesystem.function(argtype, [argtype]))
for argtype in all_types]
make_methods2 = lambda: [
method(myfunc2, 'method', typesystem.function(argtype1, [argtype1, argtype2]))
for argtype1, argtype2 in itertools.product(all_types, all_types)]
#------------------------------------------------------------------------
# Table building
#------------------------------------------------------------------------
def make_table(methods):
table = methodtable.VTabType(py_class, [])
table.create_method_ordering()
for i, method in enumerate(methods):
key = method.name, method.signature.args
method.lfunc_pointer = i
table.specialized_methods[key] = method
assert len(methods) == len(table.specialized_methods)
return table
def make_hashtable(methods):
table = make_table(methods)
hashtable = virtual.build_hashing_vtab(table)
return hashtable
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
@parametrize(make_methods1(), make_methods2()[:6000])
def test_specializations(methods):
hashtable = make_hashtable(methods)
# print(hashtable)
for i, method in enumerate(methods):
key = virtual.sep201_signature_string(method.signature, method.name)
assert hashtable.find_method(key), (i, method, key)
if __name__ == '__main__':
# test_specializations(make_methods2())
main()
########NEW FILE########
__FILENAME__ = extensiontype
# -*- coding: utf-8 -*-
"""
Extension type types.
"""
from functools import partial
from numba.traits import traits, Delegate
from numba.typesystem import NumbaType
@traits
class ExtensionType(NumbaType):
"""
Extension type Numba type.
Available to users through MyExtensionType.exttype (or
numba.typeof(MyExtensionType).
"""
typename = "extension"
argnames = ["py_class"]
flags = ["object"]
is_final = False
methoddict = Delegate('vtab_type')
untyped_methods = Delegate('vtab_type')
specialized_methods = Delegate('vtab_type')
methodnames = Delegate('vtab_type')
add_method = Delegate('vtab_type')
attributedict = Delegate('attribute_table')
attributes = Delegate('attribute_table')
def __init__(self, py_class):
super(ExtensionType, self).__init__(py_class)
assert isinstance(py_class, type), ("Must be a new-style class "
"(inherit from 'object')")
self.name = py_class.__name__
self.py_class = py_class
self.extclass = None
self.symtab = {} # attr_name -> attr_type
self.compute_offsets(py_class)
self.attribute_table = None
self.vtab_type = None
self.parent_attr_struct = None
self.parent_vtab_type = None
self.parent_type = getattr(py_class, "__numba_ext_type", None)
def compute_offsets(self, py_class):
from numba.exttypes import extension_types
self.vtab_offset = extension_types.compute_vtab_offset(py_class)
self.attr_offset = extension_types.compute_attrs_offset(py_class)
# ______________________________________________________________________
# @jit
class jit_exttype(ExtensionType):
"Type for @jit extension types"
def __repr__(self):
return "<JitExtension %s>" % self.name
def __str__(self):
if self.attribute_table:
return "<JitExtension %s(%s)>" % (
self.name, self.attribute_table.strtable())
return repr(self)
# ______________________________________________________________________
# @autojit
class autojit_exttype(ExtensionType):
"Type for @autojit extension types"
def __repr__(self):
return "<AutojitExtension %s>" % self.name
def __str__(self):
if self.attribute_table:
return "<AutojitExtension %s(%s)>" % (
self.name, self.attribute_table.strtable())
return repr(self)
########NEW FILE########
__FILENAME__ = methods
# -*- coding: utf-8 -*-
"""
Extension method types.
"""
from __future__ import print_function, division, absolute_import
from numba.typesystem import types, numbatypes
#------------------------------------------------------------------------
# Extension Method Types
#------------------------------------------------------------------------
class ExtMethodType(types.function):
typename = "extmethod"
argnames = ["return_type", "args", ("name", None), ("is_vararg", False),
("is_class_method", False), ("is_static_method", False)]
flags = ["function", "object"]
@property
def is_bound_method(self):
return not (self.is_class_method or self.is_static_method)
class AutojitMethodType(types.NumbaType):
typename = "autojit_extmethod"
flags = ["object"]
#------------------------------------------------------------------------
# Method Signature Comparison
#------------------------------------------------------------------------
def drop_self(type):
if type.is_static_method or type.is_class_method:
return type.args
assert len(type.args) >= 1 and type.args[0].is_extension, type
return type.args[1:]
def equal_signature_args(t1, t2):
"""
Compare method signatures without regarding the 'self' type (which is
set to the base extension type in the base class, and the derived
extension type in the derived class).
"""
return (t1.is_static_method == t2.is_static_method and
t1.is_class_method == t2.is_class_method and
t1.is_bound_method == t2.is_bound_method and
drop_self(t1) == drop_self(t2))
def equal_signatures(t1, t2):
return (equal_signature_args(t1, t2) and
t1.return_type == t2.return_type)
def extmethod_to_function(ty):
return numbatypes.function(ty.return_type, ty.args, ty.name)
########NEW FILE########
__FILENAME__ = utils
"Simple utilities related to extension types"
#------------------------------------------------------------------------
# Type checking
#------------------------------------------------------------------------
def is_numba_class(py_class):
return (hasattr(py_class, '__numba_ext_type') or
is_autojit_class(py_class))
def is_autojit_class(py_class):
"Returns whether the given class is an unspecialized autojit class"
return hasattr(py_class, "__is_numba_autojit")
def get_all_numba_bases(py_class):
seen = set()
bases = []
for base in py_class.__mro__[::-1]:
if is_numba_class(base) and base.exttype not in seen:
seen.add(base.exttype)
bases.append(base)
return bases[::-1]
def get_numba_bases(py_class):
return list(filter(is_numba_class, py_class.__bases__))
def get_class_dict(unspecialized_autojit_py_class):
return unspecialized_autojit_py_class.__numba_class_dict
########NEW FILE########
__FILENAME__ = validators
# -*- coding: utf-8 -*-
"""
Validate method signatures and inheritance compatiblity.
"""
from __future__ import print_function, division, absolute_import
import warnings
import inspect
from numba import error
from numba.exttypes import ordering
from numba.exttypes.types import methods
#------------------------------------------------------------------------
# Method Validators
#------------------------------------------------------------------------
class MethodValidator(object):
"Interface for method validators"
def validate(self, method, ext_type):
"""
Validate a Method. Raise an exception for user typing errors.
"""
class ArgcountMethodValidator(MethodValidator):
"""
Validate a signature against the number of arguments the function expects.
"""
def validate(self, method, ext_type):
"""
Validate a signature (which is None if not declared by the user)
for a method.
"""
if method.signature is None:
return
nargs = method.py_func.__code__.co_argcount - 1 + method.is_static
if len(method.signature.args) != nargs:
raise error.NumbaError(
"Expected %d argument types in function "
"%s (don't include 'self')" % (nargs, method.name))
class InitMethodValidator(MethodValidator):
"""
Validate the init method of extension classes.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and (method.is_class or method.is_static):
raise error.NumbaError("__init__ method should not be a class- "
"or staticmethod")
class JitInitMethodValidator(MethodValidator):
"""
Validate the init method for jit functions. Issue a warning when the
signature is omitted.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and method.signature is None:
self.check_init_args(method, ext_type)
def check_init_args(self, method, ext_type):
if inspect.getargspec(method.py_func).args:
warnings.warn(
"Constructor for class '%s' has no signature, "
"assuming arguments have type 'object'" %
ext_type.py_class.__name__)
jit_validators = [ArgcountMethodValidator(), InitMethodValidator(), JitInitMethodValidator()]
autojit_validators = [ArgcountMethodValidator(), InitMethodValidator()]
#------------------------------------------------------------------------
# Inheritance and Table Validators
#------------------------------------------------------------------------
class ExtTypeValidator(object):
"""
Interface for validators that check for compatible inheritance trees.
"""
def validate(self, ext_type):
"""
Validate an extension type with its parents.
"""
# ______________________________________________________________________
# Validate Table Ordering
class AttributeTableOrderValidator(ExtTypeValidator):
"Validate attribute table with static order (non-hash-based)."
def validate(self, ext_type):
ordering.validate_extending_order_compatibility(
ordering.AttributeTable(ext_type.attribute_table))
class MethodTableOrderValidator(ExtTypeValidator):
"Validate method table with static order (non-hash-based)."
def validate(self, ext_type):
ordering.validate_extending_order_compatibility(
ordering.VTable(ext_type.vtab_type))
# ______________________________________________________________________
# Validate Table Slot Types
def validate_type_table(table, comparer):
"""
Determine the compatability of this table with its parents given an
ordering.AbstractTable and a type compare function ((type1, type2) -> bool).
"""
for parent in table.parents:
for attr_name, attr_type in parent.attrdict.iteritems():
type1 = table.attrdict[attr_name]
if not comparer(type1, attr_type):
raise error.NumbaError(
"Found incompatible slot for method or "
"attribute '%s':" % ())
class AttributeTypeValidator(ExtTypeValidator):
"""
Validate attribute types in the table with attribute types in the parent
table.
E.g. if attribute 'foo' has type 'double' in the base class, then
it should also have type 'double' in the derived class.
"""
def validate(self, ext_type):
comparer = lambda t1, t2: t1 == t2
abstract_table = ordering.AttributeTable(ext_type.attribute_table)
validate_type_table(abstract_table, comparer)
class MethodTypeValidator(ExtTypeValidator):
"""
Validate method signatures in the vtable with method signatures
in the parent table.
"""
def validate(self, ext_type):
def comparer(method1, method2):
t1 = method1.signature
t2 = method2.signature
return methods.equal_signatures(t1, t2)
abstract_table = ordering.VTable(ext_type.vtab_type)
validate_type_table(abstract_table, comparer)
# Validators that validate the vtab/attribute struct order
extending_order_validators = [
AttributeTableOrderValidator(),
MethodTableOrderValidator()
]
type_validators = [
AttributeTypeValidator(),
MethodTypeValidator(),
]
jit_type_validators = extending_order_validators + type_validators
autojit_type_validators = type_validators
########NEW FILE########
__FILENAME__ = variable
"""
Extension attribute Variables used for attribute type inference.
See also compileclass.build_extension_symtab().
"""
from numba import symtab
class ExtensionAttributeVariable(symtab.Variable):
"""
Variable created during type inference for assignments to extension
attributes for which we don't know the type yet.
When the assignment happens, update ext_type.attributedict.
"""
def __init__(self, ext_type, attr_name, type, *args, **kwargs):
super(ExtensionAttributeVariable, self).__init__(type, *args, **kwargs)
self.ext_type = ext_type
self.attr_name = attr_name
def perform_assignment(self, rhs_type):
self.type = rhs_type
self.ext_type.attributedict[self.attr_name] = rhs_type
########NEW FILE########
__FILENAME__ = virtual
# -*- coding: utf-8 -*-
"""
Virtual methods using virtual method tables.
Note that for @jit classes, we do not support multiple inheritance with
incompatible base objects. We could use a dynamic offset to base classes,
and adjust object pointers for method calls, like in C++:
http://www.phpcompiler.org/articles/virtualinheritance.html
However, this is quite complicated, and still doesn't allow dynamic extension
for autojit classes. Instead we will use Dag Sverre Seljebotn's hash-based
virtual method tables:
https://github.com/numfocus/sep/blob/master/sep200.rst
https://github.com/numfocus/sep/blob/master/sep201.rst
"""
import ctypes
import numba
from numba.typesystem import *
from numba.exttypes import ordering
from numba.exttypes import extension_types
#------------------------------------------------------------------------
# Virtual Method Table Interface
#------------------------------------------------------------------------
class VTabBuilder(object):
"""
Build virtual method table for quick calling from Numba.
"""
def finalize(self, ext_type):
"Finalize the method table (and fix the order if necessary)"
def build_vtab(self, ext_type, method_pointers):
"""
Build a virtual method table.
The result will be kept alive on the extension type.
"""
def wrap_vtable(self, vtable):
"""
Wrap the vtable such that users can get a pointer to the underlying
data (extension_types.{Static,Dynamic}VtableWrapper).
"""
#------------------------------------------------------------------------
# Static Virtual Method Tables
#------------------------------------------------------------------------
def vtab_name(field_name):
"Mangle method names for the vtab (ctypes doesn't handle this)"
if field_name.startswith("__") and field_name.endswith("__"):
field_name = '__numba_' + field_name.strip("_")
return field_name
def build_static_vtab(vtable, vtab_struct):
"""
Create ctypes virtual method table.
vtab_type: the vtab struct type (typesystem.struct)
method_pointers: a list of method pointers ([int])
"""
vtab_ctype = numba.struct(
[(vtab_name(field_name), field_type)
for field_name, field_type in vtab_struct.fields]).to_ctypes()
methods = []
for method, (field_name, field_type) in zip(vtable.methods,
vtab_struct.fields):
method_type_p = field_type.to_ctypes()
method_void_p = ctypes.c_void_p(method.lfunc_pointer)
cmethod = ctypes.cast(method_void_p, method_type_p)
methods.append(cmethod)
vtab = vtab_ctype(*methods)
return vtab
# ______________________________________________________________________
# Build Virtual Method Table
class StaticVTabBuilder(VTabBuilder):
def finalize(self, ext_type):
ext_type.vtab_type.create_method_ordering(ordering.extending)
def build_vtab(self, ext_type):
vtable = ext_type.vtab_type
return build_static_vtab(vtable, vtable.to_struct())
def wrap_vtable(self, vtable):
return extension_types.StaticVtableWrapper(vtable)
#------------------------------------------------------------------------
# Hash-based virtual method tables
#------------------------------------------------------------------------
# ______________________________________________________________________
# Type Definitions
# TODO: Use something like CFFI + type conversion to get these
# TODO: types automatically
PyCustomSlots_Entry = numba.struct([
('id', uint64),
('ptr', void.pointer()),
])
PyCustomSlots_Table = numba.struct([
('flags', uint64),
('m_f', uint64),
('m_g', uint64),
('entries', PyCustomSlots_Entry.pointer()),
('n', uint16),
('b', uint16),
('r', uint8),
('reserved', uint8),
# actually: uint16[b], 'b' trailing displacements
# ('d', numba.carray(uint16, 0)), #0xffff)),
# ('entries_mem', PyCustomSlot_Entry[n]), # 'n' trailing customslot entries
])
# ______________________________________________________________________
# Hash-table building
def initialize_interner():
from numba.pyextensibletype.extensibletype import intern
intern.global_intern_initialize()
def sep201_signature_string(functype, name):
functype = numba.function(functype.return_type, functype.args, name)
return str(functype)
def hash_signature(functype, name):
from numba.pyextensibletype.extensibletype import methodtable
initialize_interner()
sep201_hasher = methodtable.Hasher()
sigstr = sep201_signature_string(functype, name)
return sep201_hasher.hash_signature(sigstr)
def build_hashing_vtab(vtable):
"""
Build hash-based vtable.
"""
from numba.pyextensibletype.extensibletype import methodtable
n = len(vtable.methods)
ids = [sep201_signature_string(method.signature, method.name)
for method in vtable.methods]
flags = [0] * n
sep201_hasher = methodtable.Hasher()
vtab = methodtable.PerfectHashMethodTable(sep201_hasher)
vtab.generate_table(n, ids, flags, list(vtable.method_pointers))
# print(vtab)
return vtab
# ______________________________________________________________________
# Build Hash-based Virtual Method Table
class HashBasedVTabBuilder(VTabBuilder):
def finalize(self, ext_type):
ext_type.vtab_type.create_method_ordering(ordering.unordered)
def build_vtab(self, ext_type):
return build_hashing_vtab(ext_type.vtab_type)
def wrap_vtable(self, vtable):
return extension_types.DynamicVtableWrapper(vtable)
########NEW FILE########
__FILENAME__ = functions
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast, inspect, linecache, os
import logging
import textwrap
from collections import defaultdict
from numba import *
from numba import typesystem
from numba import numbawrapper
import llvm.core
logger = logging.getLogger(__name__)
try:
from meta.decompiler import decompile_func
except Exception as exn:
def decompile_func(*args, **kwargs):
raise Exception("Could not import Meta -- Cannot recreate source "
"from bytecode")
def fix_ast_lineno(tree):
# NOTE: A hack to fix assertion error in debug mode due to bad lineno.
# Lineno must increase monotonically for co_lnotab,
# the "line number table" to work correctly.
# This script just set all lineno to 1 and col_offset = to 0.
# This makes it impossible to do traceback, but it is not possible
# anyway since we are dynamically changing the source code.
for node in ast.walk(tree):
# only ast.expr and ast.stmt and their subclass has lineno and col_offset.
# if isinstance(node, ast.expr) or isinstance(node, ast.stmt):
node.lineno = 1
node.col_offset = 0
return tree
## Fixme:
## This should be changed to visit the AST and fix-up where a None object
## is present as this will likely not work for all AST.
def _fix_ast(myast):
import _ast
# Remove Pass nodes from the end of the ast
while len(myast.body) > 0 and isinstance(myast.body[-1], _ast.Pass):
del myast.body[-1]
# Add a return node at the end of the ast if not present
if len(myast.body) < 1 or not isinstance(myast.body[-1], _ast.Return):
name = _ast.Name(id='None',ctx=_ast.Load(), lineno=0, col_offset=0)
myast.body.append(ast.Return(name))
# remove _decorator list which sometimes confuses ast visitor
try:
indx = myast._fields.index('decorator_list')
except ValueError:
return
else:
myast.decorator_list = []
def _get_ast(func, flags=0):
if (int(os.environ.get('NUMBA_FORCE_META_AST', 0)) or
func.__name__ == '<lambda>'):
func_def = decompile_func(func)
if isinstance(func_def, ast.Lambda):
func_def = ast.FunctionDef(name='<lambda>', args=func_def.args,
body=[ast.Return(func_def.body)],
decorator_list=[])
assert isinstance(func_def, ast.FunctionDef)
return func_def
try:
linecache.checkcache(inspect.getsourcefile(func))
source = inspect.getsource(func)
source_module = inspect.getmodule(func)
except IOError:
return decompile_func(func)
else:
# Split off decorators
# TODO: This is not quite correct, we can have comments or strings
# starting at column 0 and an indented function !
source = textwrap.dedent(source)
decorators = 0
while not source.lstrip().startswith('def'): # decorator can have multiple lines
assert source
decorator, sep, source = source.partition('\n')
decorators += 1
if (hasattr(source_module, "print_function") and
hasattr(source_module.print_function, "compiler_flag")):
flags |= source_module.print_function.compiler_flag
source_file = getattr(source_module, '__file__', '<unknown file>')
module_ast = compile(source, source_file, "exec",
ast.PyCF_ONLY_AST | flags, True)
lineoffset = func.__code__.co_firstlineno + decorators - 1
ast.increment_lineno(module_ast, lineoffset)
assert len(module_ast.body) == 1
func_def = module_ast.body[0]
_fix_ast(func_def)
assert isinstance(func_def, ast.FunctionDef)
return func_def
live_objects = [] # These are never collected
def keep_alive(py_func, obj):
"""
Keep an object alive for the lifetime of the translated unit.
This is a HACK. Make live objects part of the function-cache
NOTE: py_func may be None, so we can't make it a function attribute
"""
live_objects.append(obj)
class FunctionCache(object):
"""
Cache for compiler functions, declared external functions and constants.
"""
def __init__(self, context=None, env=None):
self.context = context
self.env = env
# All numba-compiled functions
# (py_func) -> (arg_types, flags) -> (signature, llvm_func, ctypes_func)
self.__compiled_funcs = defaultdict(dict)
# Faster caches we use directly from autojit to determine the
# specialization. (py_func) -> (NumbaFunction)
self.__local_caches = defaultdict(numbawrapper.AutojitFunctionCache)
def get_function(self, py_func, argtypes, flags):
'''Get a compiled function in the the function cache.
The function must not be an external function.
For an external function, is_registered() must return False.
'''
result = None
assert argtypes is not None
flags = None # TODO: stub
argtypes_flags = tuple(argtypes), flags
if py_func in self.__compiled_funcs:
result = self.__compiled_funcs[py_func].get(argtypes_flags)
return result
def get_autojit_cache(self, py_func):
"""
Get the numbawrapper.AutojitFunctionCache that does a quick lookup
for the cached case.
"""
return self.__local_caches[py_func]
def is_registered(self, func):
'''Check if a function is registered to the FunctionCache instance.
'''
if isinstance(func, numbawrapper.NumbaWrapper):
return func.py_func in self.__compiled_funcs
return False
def register(self, func):
'''Register a function to the FunctionCache.
It is necessary before calling compile_function().
'''
return self.__compiled_funcs[func]
def register_specialization(self, func_env):
func = func_env.func
argtypes = func_env.func_signature.args
compiled = (
func_env.func_signature,
func_env.lfunc,
func_env.numba_wrapper_func,
)
# Sanity check
assert isinstance(func_env.func_signature, typesystem.function)
assert isinstance(func_env.lfunc, llvm.core.Function)
argtypes_flags = tuple(argtypes), None
self.__compiled_funcs[func][argtypes_flags] = compiled
########NEW FILE########
__FILENAME__ = function_util
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
def external_call(context, llvm_module, name, args=(), temp_name=None):
extfn = context.external_library.get(name)
return external_call_func(context, llvm_module, extfn, args, temp_name)
def utility_call(context, llvm_module, name, args=(), temp_name=None):
extfn = context.utility_library.get(name)
return external_call_func(context, llvm_module, extfn, args, temp_name)
def external_call_func(context, llvm_module, extfn, args=(), temp_name=None):
'''Build a call node to the specified external function.
context --- A numba context
llvm_module --- A LLVM llvm_module
name --- Name of the external function
args --- [optional] argument of for the call
temp_name --- [optional] Name of the temporary value in LLVM IR.
'''
from numba import nodes
temp_name = temp_name or extfn.name
assert temp_name is not None
sig = extfn.signature
lfunc = extfn.declare_lfunc(context, llvm_module)
exc_check = dict(badval = extfn.badval,
goodval = extfn.goodval,
exc_msg = extfn.exc_msg,
exc_type = extfn.exc_type,
exc_args = extfn.exc_args)
result = nodes.NativeCallNode(sig, args, lfunc, name=temp_name, **exc_check)
if extfn.check_pyerr_occurred:
result = nodes.PyErr_OccurredNode(result)
return result
########NEW FILE########
__FILENAME__ = intrinsic
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm.core
from numba.typesystem import function
from collections import namedtuple
Signature = namedtuple('Signature', ['return_type', 'arg_types'])
class Intrinsic(object):
_attributes = ('func_name', 'arg_types', 'return_type')
func_name = None
arg_types = None
return_type = None
linkage = llvm.core.LINKAGE_LINKONCE_ODR
is_vararg = False
# Unused?
# badval = None
# goodval = None
# exc_type = None
# exc_msg = None
# exc_args = None
def __init__(self, **kwargs):
if __debug__:
# Only accept keyword arguments defined _attributes
for k, v in kwargs.items():
if k not in self._attributes:
raise TypeError("Invalid keyword arg %s -> %s" % (k, v))
vars(self).update(kwargs)
@property
def signature(self):
return function(self.return_type, self.arg_types, self.is_vararg)
@property
def name(self):
if self.func_name is None:
return type(self).__name__
else:
return self.func_name
def implementation(self, module, lfunc):
return None
class IntrinsicLibrary(object):
'''An intrinsic library maintains a LLVM module for holding the
intrinsics. These are functions are used internally to implement
specific features.
'''
def __init__(self, context):
self._context = context
self._module = llvm.core.Module.new('intrlib.%X' % id(self))
# A lookup dictionary that matches (name) -> (intr)
self._functions = {}
# (name, args) -> (lfunc)
self._compiled = {}
# Build function pass manager to reduce memory usage of
from llvm.passes import FunctionPassManager, PassManagerBuilder
pmb = PassManagerBuilder.new()
pmb.opt_level = 2
self._fpm = FunctionPassManager.new(self._module)
pmb.populate(self._fpm)
def add(self, intr):
'''Add a new intrinsic.
intr --- an Intrinsic class
'''
if __debug__:
# Sentry for duplicated external function name
if intr.__name__ in self._functions:
raise NameError("Duplicated intrinsic function: %s" \
% intr.__name__)
self._functions[intr.__name__] = intr
if intr.arg_types and intr.return_type:
# only if it defines arg_types and return_type
self.implement(intr())
def implement(self, intr):
'''Implement a new intrinsic.
intr --- an Intrinsic class
'''
# implement the intrinsic
lfunc_type = intr.signature.to_llvm(self._context)
lfunc = self._module.add_function(lfunc_type, name=intr.name)
lfunc.linkage = intr.linkage
intr.implementation(lfunc.module, lfunc)
# optimize the function
self._fpm.run(lfunc)
# populate the lookup dictionary
key = type(intr).__name__, tuple(intr.arg_types)
sig = intr.signature.to_llvm(self._context),
self._compiled[key] = sig, lfunc
def declare(self, module, name, arg_types=(), return_type=None):
'''Create a declaration in the module.
'''
sig, intrlfunc = self.get(name, arg_types, return_type)
lfunc = module.get_or_insert_function(intrlfunc.type.pointee,
name=intrlfunc.name)
return sig, lfunc
def get(self, name, arg_types=(), return_type=None):
'''Get an intrinsic by name and sig
name --- function name
arg_types --- function arg types
return_types --- function return type
Returns the function signature and a lfunc pointing to the
'''
if not arg_types and not return_type:
intr = self._functions[name]
sig, lfunc = self.get(name,
arg_types=intr.arg_types,
return_type=intr.return_type)
else:
key = name, tuple(arg_types)
try:
sig, lfunc = self._compiled[key]
except KeyError:
intr = self._functions[name]
if not intr.arg_types and not intr.return_type:
self.implement(intr(arg_types=arg_types,
return_type=return_type))
sig, lfunc = self._compiled[key]
return sig, lfunc
def link(self, module):
'''Link the intrinsic library into the target module.
'''
module.link_in(self._module, preserve=True)
########NEW FILE########
__FILENAME__ = numba_intrinsic
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
import string
import numba
from numba import *
from numba import nodes
from numba import pipeline
from numba import environment
from numba import numbawrapper
from numba.type_inference.module_type_inference import register_value
#------------------------------------------------------------------------
# Intrinsic Classes
#------------------------------------------------------------------------
class Intrinsic(object):
def __init__(self, func_signature, name):
self.func_signature = func_signature
self.name = name
# Register a type inference function for our intrinsic
register_infer_intrinsic(self)
# Build a function wrapper
self.jitted_func = make_intrinsic(self)
def __call__(self, *args):
return self.jitted_func(*args)
def emit_code(self, lfunc, builder, llvm_args):
raise NotImplementedError
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.name == other.name and
self.func_signature == other.func_signature)
def __hash__(self):
return hash((type(self), self.name, self.func_signature))
class NumbaInstruction(Intrinsic):
def emit_code(self, lfunc, builder, llvm_args):
return getattr(builder, self.name)(*llvm_args)
class NumbaIntrinsic(Intrinsic):
def emit_code(self, lfunc, builder, llvm_args):
raise NotImplementedError
def is_numba_intrinsic(value):
return isinstance(value, Intrinsic)
numbawrapper.add_hash_by_value_type(Intrinsic)
#------------------------------------------------------------------------
# Build Intrinsic Wrapper
#------------------------------------------------------------------------
cache = {}
env = environment.NumbaEnvironment.get_environment()
def make_intrinsic(intrinsic):
"""
Create an intrinsic function given an Intrinsic.
"""
if intrinsic in cache:
return cache[intrinsic]
# NOTE: don't use numba.jit() and 'exec', it will make inspect.getsource()
# NOTE: fail, and hence won't work in python 2.6 (since meta doesn't work
# NOTE: there)
# Build argument names
nargs = len(intrinsic.func_signature.args)
assert nargs < len(string.ascii_letters)
argnames = ", ".join(string.ascii_letters[:nargs])
# Build source code and environment
args = (intrinsic.name, argnames, argnames)
source = ("def %s(%s): return intrinsic(%s)\n" % args)
func_globals = {'intrinsic': intrinsic}
mod_ast = ast.parse(source)
func_ast = mod_ast.body[0]
# Compile
func_env, _ = pipeline.run_pipeline2(
env, func=None, func_ast=func_ast,
func_signature=intrinsic.func_signature,
function_globals=func_globals)
jitted_func = func_env.numba_wrapper_func
# Populate cache
cache[intrinsic] = jitted_func
return jitted_func
#------------------------------------------------------------------------
# Intrinsic Value Type Inference
#------------------------------------------------------------------------
class IntrinsicNode(nodes.ExprNode):
"AST Node representing a reference to an intrinsic"
_fields = ['args']
def __init__(self, intrinsic, args):
self.intrinsic = intrinsic
self.type = self.intrinsic.func_signature.return_type
self.args = list(args)
def register_infer_intrinsic(intrinsic):
def infer(*args):
return IntrinsicNode(intrinsic, args)
register_value(intrinsic, infer,
pass_in_types=False,
can_handle_deferred_types=True)
#------------------------------------------------------------------------
# User Exposed Functionality
#------------------------------------------------------------------------
def declare_intrinsic(func_signature, name):
"""
Declare an intrinsic, e.g.
>>> declare_intrinsic(void(), "llvm.debugtrap")
"""
return NumbaIntrinsic(func_signature, name)
def declare_instruction(func_signature, name):
"""
Declare an instruction, e.g.
>>> declare_instruction(int32(int32, int32), "add")
The llvm.core.Builder instruction with the given name will be used.
"""
return NumbaInstruction(func_signature, name)
########NEW FILE########
__FILENAME__ = string_intrinsic
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
from .intrinsic import Intrinsic
from llpython.byte_translator import LLVMTranslator
__all__ = ['CStringSlice2',
'CStringSlice2Len']
class CStringSlice2 (Intrinsic):
arg_types = [string_, string_, size_t, Py_ssize_t, Py_ssize_t]
return_type = void
def implementation(self, module, lfunc):
# logger.debug((module, str(lfunc)))
def _py_c_string_slice(out_string, in_string, in_str_len, lower,
upper):
zero = lc_size_t(0)
if lower < zero:
lower += in_str_len
if upper < zero:
upper += in_str_len
elif upper > in_str_len:
upper = in_str_len
temp_len = upper - lower
if temp_len < zero:
temp_len = zero
strncpy(out_string, in_string + lower, temp_len)
out_string[temp_len] = li8(0)
return
LLVMTranslator(module).translate(_py_c_string_slice,
llvm_function = lfunc)
return lfunc
class CStringSlice2Len(Intrinsic):
arg_types = [string_, size_t, Py_ssize_t, Py_ssize_t]
return_type = size_t
def implementation(self, module, lfunc):
def _py_c_string_slice_len(in_string, in_str_len, lower, upper):
zero = lc_size_t(0)
if lower < zero:
lower += in_str_len
if upper < zero:
upper += in_str_len
elif upper > in_str_len:
upper = in_str_len
temp_len = upper - lower
if temp_len < zero:
temp_len = zero
return temp_len + lc_size_t(1)
LLVMTranslator(module).translate(_py_c_string_slice_len,
llvm_function = lfunc)
return lfunc
########NEW FILE########
__FILENAME__ = build
import os
from .generator import build
root = os.path.dirname(os.path.abspath(__file__))
features = ['ast', 'transformer', 'visitor']
def build_normalized():
fn = os.path.join(root, "Normalized.asdl")
outdir = os.path.join(root, "normalized")
build.build_package(fn, features, outdir)
def build_untyped():
fn = os.path.join(root, "UntypedIR.asdl")
outdir = os.path.join(root, "untyped")
build.build_package(fn, features, outdir)
def build_typed():
fn = os.path.join(root, "TypedIR.asdl")
outdir = os.path.join(root, "typed")
build.build_package(fn, features, outdir)
if __name__ == '__main__':
build_normalized()
build_untyped()
# build_typed()
########NEW FILE########
__FILENAME__ = astgen
# -*- coding: utf-8 -*-
"""
Generate Python AST nodes and Cython pxd files.
Note: This file needs to be easy to externalize (factor out into another
project).
"""
from __future__ import print_function, division, absolute_import
import os
import ast
from textwrap import dedent
from functools import partial
from . import generator
from . import naming
from .classgen import Class, ClassCodegen
from .formatting import py_formatter, cy_formatter
root = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(root, "tests")
#------------------------------------------------------------------------
# Code Formatting
#------------------------------------------------------------------------
py_preamble = """
import cython
# ASDL builtin types
# identifier = str
# string = basestring
class TypedProperty(object):
'''Defines a class property that does a type check in the setter.'''
def __new__(cls, ty, doc, default=None):
rv = super(TypedProperty, cls).__new__(cls)
cls.__init__(rv, ty, doc, default)
return property(rv.getter, rv.setter, rv.deleter, doc)
def __init__(self, ty, typename, attrname, default=None):
self.propname = '_property_%d' % (id(self),)
self.default = default
if not isinstance(ty, tuple):
ty = (ty,)
self.ty = ty
self.typename = typename
self.attrname = attrname
def getter(self, obj):
return getattr(obj, self.propname, self.default)
def setter(self, obj, new_val):
if not isinstance(new_val, self.ty):
ty = tuple(t.__name__ for t in self.ty)
raise ValueError(
"Invalid type for attribute '%s.%s', "
"expected instance of type(s) %r "
"(got %r)." % (self.typename, self.attrname,
ty, type(new_val).__name__))
setattr(obj, self.propname, new_val)
def deleter(self, obj):
delattr(obj, self.propname)
"""
cy_preamble = """
cimport cython
from %s cimport GenericVisitor
# ctypedef str identifier
# ctypedef str string
# ctypedef bint bool
""" % (naming.interface,)
def format_field(classname, field):
type = py_formatter.format_type(field.type)
if field.opt:
# types.NoneType is not in Python 3, type(None) should work
# for both Python 2 and 3.
type = "(%s, type(None))" % (type,)
format_dict = dict(name=field.name, type=type, classname=classname)
return ('%(classname)s.%(name)s = TypedProperty(%(type)s, '
'"%(classname)s", "%(name)s")' % format_dict)
#------------------------------------------------------------------------
# Class Formatting
#------------------------------------------------------------------------
class PyClass(Class):
"""
Generate Python AST classes.
"""
def __str__(self):
fieldnames = [str(field.name) for field in self.fields]
fields = map(repr, fieldnames)
properties = [format_field(self.name, field)
for field in self.fields] or ["pass"]
if self.attributes is not None:
attributes = map(repr, self.attributes)
attributes = py_formatter.format_stats(",\n", 8, attributes)
attributes = "_attributes = (" + attributes + ")"
else:
attributes = "# inherit _attributes"
if fieldnames:
initialize = ["self.%s = %s" % (name, name) for name in fieldnames]
else:
initialize = ["pass"]
fmtstring = ", ".join("%s=%%s" % name for name in fieldnames)
fmtargs = "(%s)" % ", ".join(py_formatter.get_fields(self.fields))
format_dict = dict(
name=self.name, base=self.base, doc=self.doc,
fields = py_formatter.format_stats(",\n", 8, fields),
attributes = attributes,
properties = py_formatter.format_stats("\n", 4, properties),
params = ", ".join(fieldnames),
initialize = py_formatter.format_stats("\n", 8, initialize),
fmtstring = fmtstring,
fmtargs = fmtargs,
)
return dedent('''
class %(name)s(%(base)s):
"""
%(doc)s
"""
_fields = (
%(fields)s
)
%(attributes)s
def __init__(self, %(params)s):
%(initialize)s
def accept(self, visitor):
return visitor.visit_%(name)s(self)
def __repr__(self):
return "%(name)s(%(fmtstring)s)" %% %(fmtargs)s
if not cython.compiled:
# Properties
%(properties)s
''') % format_dict
class CyClass(Class):
"""
Generate classes in pxd overlay.
"""
def __str__(self):
f = cy_formatter
fields = ["cdef public %s %s" % (f.format_type(field.type), field.name)
for field in self.fields]
fmtdict = {
'name': self.name,
'base': self.base,
'fields': cy_formatter.format_stats("\n", 4, fields)
}
return dedent('''
cdef class %(name)s(%(base)s):
%(fields)s
cpdef accept(self, GenericVisitor visitor)
''') % fmtdict
#------------------------------------------------------------------------
# Global Exports
#------------------------------------------------------------------------
def make_root_class(Class):
return Class("AST", "object", doc="AST root node.", attributes=())
codegens = [
ClassCodegen(naming.nodes + '.py', py_preamble,
PyClass, make_root_class(PyClass)),
ClassCodegen(naming.nodes + '.pxd', cy_preamble,
CyClass, make_root_class(CyClass)),
]
if __name__ == '__main__':
schema_filename = os.path.join(testdir, "testschema1.asdl")
generator.generate_from_file(schema_filename, codegens, root)
########NEW FILE########
__FILENAME__ = build
# -*- coding: utf-8 -*-
"""
Generate a package with IR implementations and tools.
"""
from __future__ import print_function, division, absolute_import
import os
from textwrap import dedent
from itertools import chain
from . import generator
from . import formatting
from . import astgen
from . import visitorgen
from . import naming
#------------------------------------------------------------------------
# Tools Flags
#------------------------------------------------------------------------
cython = 1
#------------------------------------------------------------------------
# Tools Resolution
#------------------------------------------------------------------------
class Tool(object):
def __init__(self, codegens, flags=0, depends=[]):
self.codegens = codegens
self.flags = flags
self.depends = depends
def __repr__(self):
return "Tool(codegens=[%s])" % ", ".join(map(str, self.codegens))
def resolve_tools(tool_list, mask, tools=None, seen=None):
if tools is None:
tools = []
seen = set()
for tool in tool_list:
if not (tool.flags & mask) and tool not in seen:
seen.add(tool)
resolve_tools(tool.depends, mask, tools, seen)
tools.append(tool)
return tools
def enumerate_tools(feature_names, mask):
tool_set = set(chain(*[features[name] for name in feature_names]))
tools = resolve_tools(tool_set, mask)
return tools
def enumerate_codegens(feature_names, mask):
tools = enumerate_tools(feature_names, mask)
codegens = list(chain(*[tool.codegens for tool in tools]))
return codegens
#------------------------------------------------------------------------
# Tool Definitions
#------------------------------------------------------------------------
def make_codegen_dict(codegens):
return dict((codegen.out_filename, codegen) for codegen in codegens)
all_codegens = astgen.codegens + visitorgen.codegens
gens = make_codegen_dict(all_codegens)
pxd_ast_tool = Tool([gens[naming.nodes + ".pxd"]], flags=cython)
py_ast_tool = Tool([gens[naming.nodes + ".py"]])
pxd_interface_tool = Tool([gens[naming.interface + ".pxd"]], flags=cython,
depends=[pxd_ast_tool])
py_interface_tool = Tool([gens[naming.interface + ".py"]],
depends=[py_ast_tool])
pxd_visitor_tool = Tool([gens[naming.visitor + ".pxd"]], flags=cython,
depends=[pxd_interface_tool])
py_visitor_tool = Tool([gens[naming.visitor + ".py"]],
depends=[py_interface_tool, pxd_visitor_tool])
pxd_transform_tool = Tool([gens[naming.transformer + ".pxd"]], flags=cython,
depends=[pxd_interface_tool])
py_transformr_tool = Tool([gens[naming.transformer + ".py"]],
depends=[py_interface_tool, pxd_transform_tool])
pxd_ast_tool.depends.extend([pxd_interface_tool, py_interface_tool])
#------------------------------------------------------------------------
# Feature Definitions & Entry Points
#------------------------------------------------------------------------
features = {
'all': [py_ast_tool, py_visitor_tool, py_transformr_tool],
'ast': [py_ast_tool],
'visitor': [py_visitor_tool],
'transformer': [py_transformr_tool],
}
def build_package(schema_filename, feature_names, output_dir, mask=0):
"""
Build a package from the given schema and feature names in output_dir.
:param mask: indicates which features to mask, e.g. specifying
'mask=build.cython' disables Cython support.
"""
codegens = enumerate_codegens(feature_names, mask)
disk_allocator = generator.generate_from_file(
schema_filename, codegens, output_dir)
try:
_make_package(disk_allocator, codegens)
finally:
disk_allocator.close()
#------------------------------------------------------------------------
# Package Building Utilities
#------------------------------------------------------------------------
source_name = lambda fn: os.path.splitext(os.path.basename(fn))[0]
def _make_package(disk_allocator, codegens):
_make_init(disk_allocator, codegens)
# Make Cython dependency optional
# disk_allocator.open_sourcefile("cython.py")
fns = [c.out_filename for c in codegens if c.out_filename.endswith('.pxd')]
if fns:
_make_setup(disk_allocator, [source_name(fn) + '.py' for fn in fns])
def _make_init(disk_allocator, codegens):
init = disk_allocator.open_sourcefile("__init__.py")
init.write(dedent("""
# Horrid hack to make work around circular cimports
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
"""))
for c in codegens:
if c.out_filename.endswith('.py'):
modname = source_name(c.out_filename)
init.write("from %s import *\n" % modname)
def _make_setup(disk_allocator, filenames):
setup = disk_allocator.open_sourcefile("setup.py")
ext_modules = ["Extension('%s', ['%s'])" % (source_name(fn), fn)
for fn in filenames]
setup.write(dedent("""
from distutils.core import setup
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension
ext_modules = [
%s
]
setup(
# ext_modules=cythonize('*.pyx'),
ext_modules=ext_modules,
cmdclass={'build_ext': build_ext},
)
""") % formatting.py_formatter.format_stats(",\n", 4, ext_modules))
########NEW FILE########
__FILENAME__ = classgen
# -*- coding: utf-8 -*-
"""
Generate Python classes and Cython pxd files.
"""
from __future__ import print_function, division, absolute_import
from . import generator, formatting
from numba.asdl.schema import verify_schema_keywords
class Class(object):
def __init__(self, name, base, doc, fields=(), attributes=None):
self.name = name
self.base = base
self.doc = doc
self.fields = fields
self.attributes = attributes
def doc_sumtype(sumtype, fields):
if fields:
return "%s(%s)" % (sumtype, formatting.format_fields(fields))
return sumtype
class ClassCodegen(generator.SimpleCodegen):
"""
Generate Python AST nodes.
"""
def __init__(self, out_filename, preamble, Class, rootclass):
super(ClassCodegen, self).__init__(out_filename)
self.preamble = preamble
self.rootclass = rootclass
self.Class = Class
def emit_preamble(self, emitter, schema):
verify_schema_keywords(schema)
emitter.emit(self.preamble)
emitter.emit(self.rootclass)
def emit_nonterminal(self, emitter, schema, rulename, rule):
"Emit code for a rule (a nonterminal)"
nonterminal_fields = schema.attributes[rulename]
attrs = tuple(map(repr, (field.name for field in nonterminal_fields)))
alignment = 5 + len(rulename)
sep = "\n%s| " % (" " * alignment)
doc = "%s = %s" % (rulename, sep.join(
doc_sumtype(t, schema.types[t]) for t in rule.fields))
c = self.Class(
rulename,
self.rootclass.name,
doc=doc,
# Attributes: Use None instead of empty tuple to inherit from base
attributes=attrs or None,
)
emitter.emit(c)
def emit_product(self, emitter, schema, rulename, rule):
doc = "%s = (%s)" % (rulename, formatting.format_fields(rule.fields))
emitter.emit(self.Class(rulename, self.rootclass.name,
doc=doc, fields=rule.fields))
def emit_sum(self, emitter, schema, rulename, rule, sumtype):
fields = schema.types[sumtype]
doc = doc_sumtype(sumtype, fields)
emitter.emit(self.Class(sumtype, rulename, doc=doc, fields=fields))
########NEW FILE########
__FILENAME__ = formatting
class Formatter(object):
def format_stats(self, pattern, indent, stats):
pattern = pattern + " " * indent
return pattern.join(stats)
def get_fields(self, fields, obj="self"):
fieldnames = (str(field.name) for field in fields)
return ["%s.%s" % (obj, name) for name in fieldnames]
class PythonFormatter(Formatter):
def format_type(self, asdl_type):
"""
ASDL's five builtin types are identifier, int, string, object, bool
"""
type = str(asdl_type)
defaults = {
'identifier': 'str',
'string': 'str',
}
return defaults.get(type, type)
class CythonFormatter(Formatter):
def format_type(self, asdl_type):
"""
ASDL's five builtin types are identifier, int, string, object, bool
"""
type = str(asdl_type)
defaults = {
'identifier': 'str',
'string': 'str',
'bool': 'bint',
}
return defaults.get(type, type)
def format_fields(fields):
return ", ".join("%s %s" % (f.type, f.name) for f in fields)
py_formatter = PythonFormatter()
cy_formatter = CythonFormatter()
########NEW FILE########
__FILENAME__ = generator
# -*- coding: utf-8 -*-
"""
Generate IR utilities from ASDL schemas.
"""
from __future__ import print_function, division, absolute_import
import os
import types
import codecs
import textwrap
from numba import PY3
from numba.asdl import asdl
from numba.asdl import schema
from numba.asdl import processor
from numba.asdl.asdl import pyasdl
if PY3:
import io
BytesIO = io.BytesIO
else:
import cStringIO
BytesIO = cStringIO.StringIO
root = os.path.dirname(os.path.abspath(__file__))
#------------------------------------------------------------------------
# Entry Points
#------------------------------------------------------------------------
# TODO: This needs to be parametrized when this is externalized
asdl_import_path = [os.path.join(root, os.pardir)]
import_processor = processor.ImportProcessor(pyasdl, asdl_import_path)
def parse(schema_name, schema_def, asdl_processor=import_processor):
"""
Parse a schema given a schema name and schema string (ASDL string).
"""
asdl_tree = get_asdl(schema_name, schema_def, asdl_processor)
schema_instance = schema.build_schema(asdl_tree)
return asdl_tree, schema_instance
def generate(schema_name, schema_str, codegens, file_allocator):
"""
schema: ASDL schema (str)
"""
asdl_tree, schema_instance = parse(schema_name, schema_str)
for codegen in codegens:
emitter = codegen.make_code_emitter(file_allocator)
codegen.generate(emitter, asdl_tree, schema_instance)
def generate_from_file(schema_filename, codegens, output_dir):
"""
Generate code files for the given schema, code generators and output
directory.
Returns a file allocator with the open disk files.
"""
schema_name = os.path.basename(schema_filename)
schema_str = open(schema_filename).read()
file_allocator = DiskFileAllocator(output_dir)
generate(schema_name, schema_str, codegens, file_allocator)
return file_allocator
def generate_module(file_allocator, name):
"""
Generate an in-memory module from a generated Python implementation.
"""
assert name in file_allocator.allocated_files
f = file_allocator.allocated_files[name]
f.seek(0)
data = f.read()
modname, _ = os.path.splitext(name)
d = {}
eval(compile(data, name, "exec"), d, d)
m = types.ModuleType(modname)
vars(m).update(d)
return m
#------------------------------------------------------------------------
# Code Generator Utilities
#------------------------------------------------------------------------
def get_asdl(schema_name, schema, asdl_processor=None):
parser, loader = asdl.load(schema_name, schema, pyasdl,
asdl_processor=asdl_processor)
asdl_tree = loader.load()
return asdl_tree
def is_simple(sum):
"""
Return True if a sum is a simple.
A sum is simple if its types have no fields, e.g.
unaryop = Invert | Not | UAdd | USub
"""
for t in sum.types:
if t.fields:
return False
return True
#------------------------------------------------------------------------
# File Handling
#------------------------------------------------------------------------
StreamWriter = codecs.getwriter('UTF-8')
class FileAllocator(object):
def __init__(self, output_dir=None):
self.output_dir = output_dir
# file_name -> file
self.allocated_files = {}
def open_sourcefile(self, name):
"Allocate a file and save in in allocated_files"
def close(self):
for file in self.allocated_files.itervalues():
file.close()
self.allocated_files.clear()
class DiskFileAllocator(FileAllocator):
def open_sourcefile(self, name):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
filename = os.path.join(self.output_dir, name)
file = codecs.open(filename, 'w', encoding='UTF-8')
self.allocated_files[name] = file
return file
class MemoryFileAllocator(FileAllocator):
def open_sourcefile(self, name):
file = StreamWriter(BytesIO())
self.allocated_files[name] = file
return file
#------------------------------------------------------------------------
# Code Generator Interface
#------------------------------------------------------------------------
class CodeEmitter(object):
def __init__(self, outfile):
self.outfile = outfile
def emit(self, s):
self.outfile.write(unicode(s))
class Codegen(object):
"""
Interface for code generators.
"""
def __init__(self, out_filename):
self.out_filename = out_filename
def make_code_emitter(self, file_allocator):
outfile = file_allocator.open_sourcefile(self.out_filename)
return CodeEmitter(outfile)
def generate(self, emitter, asdl_tree, schema_instance):
"""
Generate code for the given asdl tree. The ASDL tree is accompanied
by a corresponding schema.Schema, which is easier to deal with.
"""
class UtilityCodegen(Codegen):
"""
Generate some utility code:
UtilityCode("foo.py", "my python code")
"""
def __init__(self, out_filename, utility_code):
super(UtilityCodegen, self).__init__(out_filename)
self.utility_code = utility_code
def generate(self, emitter, asdl_tree, schema_instance):
emitter.emit(self.utility_code)
class SimpleCodegen(Codegen):
def generate(self, emitter, asdl_tree, schema):
self.emit_preamble(emitter, schema)
for rulename, rule in schema.dfns.iteritems():
if rule.is_sum:
self.emit_nonterminal(emitter, schema, rulename, rule)
for rulename, rule in schema.dfns.iteritems():
if rule.is_product:
self.emit_product(emitter, schema, rulename, rule)
for rulename, rule in schema.dfns.iteritems():
if rule.is_sum:
for sumtype in rule.fields:
self.emit_sum(emitter, schema, rulename, rule, sumtype)
def emit_preamble(self, emitter, schema):
pass
def emit_nonterminal(self, emitter, schema, rulename, rule):
pass
def emit_product(self, emitter, schema, rulename, rule):
pass
def emit_sum(self, emitter, schema, rulename, rule, sumtype):
pass
if __name__ == '__main__':
schema_def = textwrap.dedent("""
module MyModule version "0.1"
{
mod = Module(object leaf1, object leaf2)
| Foo(object leaf)
foo = Add | Mul
expr = X | Y
attributes (int lineno)
alias = (int foo, int bar)
}
""")
asdl_tree = get_asdl("MyASDL.asdl", schema_def)
print("asdl", asdl_tree)
print ("-------------")
s = schema.build_schema(asdl_tree)
print("dfns", s.dfns)
print("types", s.types)
########NEW FILE########
__FILENAME__ = naming
prefix = "__asdl_"
nodes = prefix + "nodes"
interface = prefix + "interface"
visitor = prefix + "visitor"
transformer = prefix + "transformer"
########NEW FILE########
__FILENAME__ = testutils
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
from .. import generator
root = os.path.dirname(os.path.abspath(__file__))
def schema_filename(name):
return os.path.join(root, name)
def load_schema(name):
schema_fn = schema_filename(name)
schema_name = os.path.basename(schema_fn)
schema_str = open(schema_fn).read()
return schema_name, schema_str
def generate_in_memory(schema_name, codegens):
file_allocator = generator.MemoryFileAllocator()
schema_name, schema_str = load_schema(schema_name)
generator.generate(schema_name, schema_str, codegens, file_allocator)
return file_allocator
########NEW FILE########
__FILENAME__ = test_astgen
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys
from .testutils import generate_in_memory
from .. import generator, astgen, naming
def load_testschema1():
file_allocator = generate_in_memory("testschema1.asdl", astgen.codegens)
m = generator.generate_module(file_allocator, naming.nodes + '.py')
return m
def test_ast_generation():
"""
>>> test_ast_generation()
"""
m = load_testschema1()
# Nonterminals
assert issubclass(m.root, m.AST)
assert issubclass(m.expr, m.AST)
# Products
assert issubclass(m.myproduct, m.AST)
# Terminals
assert issubclass(m.Ham, m.root)
assert issubclass(m.Foo, m.root)
assert issubclass(m.SomeExpr, m.expr)
assert issubclass(m.Bar, m.expr)
def test_ast_attributes():
"""
>>> test_ast_attributes()
"""
m = load_testschema1()
assert m.root._attributes == ('foo', 'bar')
def test_valid_node_instantiation():
"""
>>> test_valid_node_instantiation()
Foo(leaf=Bar(e1=SomeExpr(n=10), e2=SomeExpr(n=11)))
Bar(e1=SomeExpr(n=10), e2=None)
Product(p=myproduct(foo=SomeExpr(n=10), bar=12))
"""
m = load_testschema1()
# Valid
e1 = m.SomeExpr(10)
e2 = m.SomeExpr(11)
# Sum
print(m.Foo(m.Bar(e1, e2)))
print(m.Bar(e1, None))
# Product
print(m.Product(m.myproduct(e1, 12)))
def test_invalid_node_instantiation():
"""
>>> m = load_testschema1()
>>> e2 = m.SomeExpr(10)
>>> m.Foo()
Traceback (most recent call last):
...
TypeError: ...
>>> m.Bar(None, e2)
Traceback (most recent call last):
...
ValueError: Invalid type for attribute 'Bar.e1', expected instance of type(s) ('expr',) (got 'NoneType').
>>> m.Product(e2)
Traceback (most recent call last):
...
ValueError: Invalid type for attribute 'Product.p', expected instance of type(s) ('myproduct',) (got 'SomeExpr').
"""
if __name__ == '__main__':
import doctest
optionflags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
try:
import cython
except ImportError:
print("Skipping test, cython not installed")
else:
sys.exit(0 if doctest.testmod(optionflags=optionflags).failed == 0 else 1)
########NEW FILE########
__FILENAME__ = test_build
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import sys
import shutil
import tempfile
import subprocess
from .. import naming, build
root = os.path.dirname(os.path.abspath(__file__))
schema_filename = os.path.join(root, "testschema1.asdl")
def filenames(codegens):
return [c.out_filename for c in codegens]
all_features = [naming.interface + '.pxd', naming.nodes + '.pxd',
naming.visitor + '.pxd', naming.transformer + '.pxd',
naming.interface + '.py', naming.nodes + '.py',
naming.visitor + '.py', naming.transformer + '.py',]
def test_features():
"""
>>> test_features()
"""
codegens = build.enumerate_codegens(['ast'], mask=0)
fns = filenames(codegens)
assert set(fns) == set([naming.interface + '.pxd', naming.nodes + '.pxd',
naming.interface + '.py', naming.nodes + '.py',])
codegens = build.enumerate_codegens(['ast'], mask=build.cython)
fns = filenames(codegens)
assert set(fns) == set([naming.interface + '.py', naming.nodes + '.py',])
codegens = build.enumerate_codegens(
['ast', 'visitor', 'transformer'], mask=0)
fns = filenames(codegens)
assert set(fns) == set(all_features)
def test_package_building():
"""
>>> test_package_building()
"""
features = ['ast', 'visitor', 'transformer']
output_dir = tempfile.mkdtemp()
try:
build.build_package(schema_filename, features, output_dir)
for feature_file in all_features:
assert os.path.exists(os.path.join(output_dir, feature_file))
finally:
shutil.rmtree(output_dir)
def test_package_compilation():
"""
>>> test_package_compilation()
"""
features = ['ast', 'visitor', 'transformer']
output_dir = tempfile.mkdtemp()
try:
build.build_package(schema_filename, features, output_dir)
p = subprocess.Popen([sys.executable, "setup.py",
"build_ext", "--inplace"], cwd=output_dir)
assert p.wait() == 0, p.poll()
finally:
shutil.rmtree(output_dir)
if __name__ == '__main__':
import doctest
try:
import cython
except ImportError:
print("Skipping test, cython not installed")
else:
sys.exit(0 if doctest.testmod().failed == 0 else 1)
########NEW FILE########
__FILENAME__ = test_visitors
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys
from .testutils import generate_in_memory
from .. import generator, visitorgen, astgen, naming
try:
import cython
have_cython = True
except ImportError:
have_cython = False
#------------------------------------------------------------------------
# Load modules
#------------------------------------------------------------------------
def load_testschema1():
codegens = astgen.codegens + visitorgen.codegens
file_allocator = generate_in_memory("testschema1.asdl", codegens)
interface = generator.generate_module(file_allocator,
naming.interface + '.py')
sys.modules[naming.interface] = interface
nodes = generator.generate_module(file_allocator, naming.nodes + '.py')
sys.modules[naming.nodes] = nodes
visitor = generator.generate_module(file_allocator,
naming.visitor + '.py')
transformer = generator.generate_module(file_allocator,
naming.transformer + '.py')
return nodes, visitor, transformer
if have_cython:
nodes, visitor, transformer = load_testschema1()
else:
class visitor(object):
class Visitor(object):
pass
class transformer(object):
class Transformer(object):
pass
#------------------------------------------------------------------------
# Visitor testers
#------------------------------------------------------------------------
class TestVisitor(visitor.Visitor):
def visit_Bar(self, node):
print("Bar:", node.e1, node.e2)
self.generic_visit(node)
def visit_SomeExpr(self, node):
print("SomeExpr:", node)
class TestTransformer(transformer.Transformer):
def visit_SomeExpr(self, node):
return nodes.SomeOtherExpr(node.n)
#------------------------------------------------------------------------
# Test funcs
#------------------------------------------------------------------------
def test_visitor():
"""
>>> test_visitor()
Bar: Bar(e1=SomeExpr(n=10), e2=SomeExpr(n=11)) SomeExpr(n=12)
Bar: SomeExpr(n=10) SomeExpr(n=11)
SomeExpr: SomeExpr(n=10)
SomeExpr: SomeExpr(n=11)
SomeExpr: SomeExpr(n=12)
"""
e1 = nodes.SomeExpr(10)
e2 = nodes.SomeExpr(11)
expr = nodes.Bar(nodes.Bar(e1, e2), nodes.SomeExpr(12))
TestVisitor().visit(expr)
def test_transformer():
"""
>>> test_transformer()
Bar(e1=Bar(e1=SomeOtherExpr(n=10), e2=SomeOtherExpr(n=11)), e2=SomeOtherExpr(n=12))
"""
e1 = nodes.SomeExpr(10)
e2 = nodes.SomeExpr(11)
expr = nodes.Bar(nodes.Bar(e1, e2), nodes.SomeExpr(12))
result = TestTransformer().visit(expr)
print(result)
if __name__ == '__main__':
import doctest
if have_cython:
sys.exit(0 if doctest.testmod().failed == 0 else 1)
########NEW FILE########
__FILENAME__ = visitorgen
# -*- coding: utf-8 -*-
"""
Generate Python visitors and Cython pxd files.
"""
from __future__ import print_function, division, absolute_import
import os
from . import generator
from . import naming
from .formatting import py_formatter
#------------------------------------------------------------------------
# Code Formatting
#------------------------------------------------------------------------
interface_class = '''
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
result = []
for field in node._fields:
try:
result.append((field, getattr(node, field)))
except AttributeError:
pass
return result
class GenericVisitor(object):
def visit(self, node):
return node.accept(self)
def generic_visit(self, node):
"""Called explicitly by the user from an overridden visitor method"""
raise NotImplementedError
'''
pxd_interface_class = """\
cimport %s
cdef class GenericVisitor(object):
cpdef generic_visit(self, node)
""" % (naming.nodes,)
# TODO: We can also make 'visitchildren' dispatch quickly
visitor_class = '''
from %s import GenericVisitor, iter_fields
from %s import AST
__all__ = ['Visitor']
class Visitor(GenericVisitor):
def generic_visit(self, node):
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
item.accept(self)
elif isinstance(value, AST):
value.accept(self)
''' % (naming.interface, naming.nodes)
transformer_class = """
from %s import GenericVisitor, iter_fields
from %s import AST
__all__ = ['Transformer']
class Transformer(GenericVisitor):
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = value.accept(self)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = old_value.accept(self)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
""" % (naming.interface, naming.nodes,)
pxd_visitor_class = """
from %s cimport GenericVisitor
cdef class Visitor(GenericVisitor):
pass
""" % (naming.interface,)
pxd_transformer_class = """
from %s cimport GenericVisitor
cdef class Transformer(GenericVisitor):
pass
""" % (naming.interface,)
#------------------------------------------------------------------------
# Code Formatting
#------------------------------------------------------------------------
def make_visit_stats(schema, fields, inplace):
stats = []
# ["self.attr_x"]
field_accessors = py_formatter.get_fields(fields, obj="node")
for field, field_access in zip(fields, field_accessors):
field_type = str(field.type)
if field_type not in schema.dfns and field_type not in schema.types:
# Not an AST node
continue
s = "%s.accept(self)" % field_access
if inplace:
# Mutate in-place (transform)
s = "%s = %s" % (field_access, s)
if field.opt:
# Guard for None
s = "if %s is not None: %s" % (field_access, s)
stats.append(s)
if inplace:
stats.append("return node")
return stats or ["pass"]
#------------------------------------------------------------------------
# Method Generation
#------------------------------------------------------------------------
class Method(object):
def __init__(self, schema, name, fields):
self.schema = schema
self.name = name
self.fields = fields
class InterfaceMethod(Method):
def __str__(self):
return (
" def visit_%s(self, node):\n"
" raise NotImplementedError\n"
"\n"
) % (self.name,)
class PyMethod(Method):
inplace = None
def __str__(self):
stats = make_visit_stats(self.schema, self.fields, self.inplace)
return (
" def visit_%s(self, node):\n"
" %s\n"
"\n"
) % (self.name, py_formatter.format_stats("\n", 8, stats))
class PyVisitMethod(PyMethod):
inplace = False
class PyTransformMethod(PyVisitMethod):
inplace = True
class PxdMethod(Method):
def __str__(self):
return " cpdef visit_%s(self, %s.%s node)\n" % (self.name,
naming.nodes,
self.name)
#------------------------------------------------------------------------
# Code Generators
#------------------------------------------------------------------------
class VisitorCodegen(generator.SimpleCodegen):
"""
Generate Python AST nodes.
"""
def __init__(self, out_filename, preamble, Method):
super(VisitorCodegen, self).__init__(out_filename)
self.preamble = preamble
self.Method = Method
def emit_preamble(self, emitter, schema):
emitter.emit(self.preamble)
def emit_sum(self, emitter, schema, rulename, rule, sumtype):
fields = schema.types[sumtype]
emitter.emit(self.Method(schema, sumtype, fields))
#------------------------------------------------------------------------
# Global Exports
#------------------------------------------------------------------------
codegens = [
VisitorCodegen(naming.interface + '.py', interface_class, InterfaceMethod),
VisitorCodegen(naming.interface + '.pxd', pxd_interface_class, PxdMethod),
VisitorCodegen(naming.visitor + '.py', visitor_class, PyVisitMethod),
generator.UtilityCodegen(naming.visitor + '.pxd', pxd_visitor_class),
VisitorCodegen(naming.transformer + '.py', transformer_class, PyTransformMethod),
generator.UtilityCodegen(naming.transformer + '.pxd', pxd_transformer_class),
]
if __name__ == '__main__':
root = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(root, "tests")
schema_filename = os.path.join(testdir, "testschema1.asdl")
generator.generate_from_file(schema_filename, codegens, root)
########NEW FILE########
__FILENAME__ = lexing
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import warnings
from functools import partial
from numba.config import config
try:
import pygments
except ImportError as e:
pygments = None
else:
from pygments import highlight
from pygments.lexers import PythonLexer, LlvmLexer
from pygments.formatters import HtmlFormatter, TerminalFormatter
# ______________________________________________________________________
if pygments:
lexers = {
"python": PythonLexer,
"llvm": LlvmLexer,
}
formatters = {
"html": HtmlFormatter,
"console": partial(TerminalFormatter, bg=config.terminal_background),
}
def lex_source(code, lexer="python", output='html', inline_css=True):
"""
>>> lex_source("print 'hello world'", "python", "html")
<div ...> ... </div>
"""
if not config.colour:
return code
Lexer = lexers[lexer]
Formatter = formatters[output]
result = highlight(code, Lexer(), Formatter(noclasses=inline_css))
return result.rstrip()
else:
def lex_source(code, *args, **kwargs):
warnings.warn("Pygments not installed")
return code
########NEW FILE########
__FILENAME__ = llvm_types
# -*- coding: utf-8 -*-
'''llvm_types
Utility module containing common (to Numba) LLVM types.
'''
from __future__ import print_function, division, absolute_import
# ______________________________________________________________________
import ctypes
import struct as struct_
import llvm.core as lc
from numba import utils
from numba.typedefs import _trace_refs_, PyObject_HEAD
from numba.typesystem import numba_typesystem
import logging
logger = logging.getLogger(__name__)
# ______________________________________________________________________
_plat_bits = struct_.calcsize('@P') * 8
# Assuming sizeof(c_size_t) == sizeof(c_ssize_t) == sizeof(Py_ssize_t)...
_sizeof_py_ssize_t = ctypes.sizeof(
getattr(ctypes, 'c_ssize_t', getattr(ctypes, 'c_size_t')))
_int1 = lc.Type.int(1)
_int8 = lc.Type.int(8)
_int8_star = lc.Type.pointer(_int8)
_int32 = lc.Type.int(32)
_int64 = lc.Type.int(64)
_llvm_py_ssize_t = lc.Type.int(_sizeof_py_ssize_t * 8)
_llvm_size_t = _llvm_py_ssize_t
_intp = lc.Type.int(_plat_bits)
_intp_star = lc.Type.pointer(_intp)
_void_star = lc.Type.pointer(lc.Type.int(8))
_void_star_star = lc.Type.pointer(_void_star)
_float = lc.Type.float()
_double = lc.Type.double()
_complex64 = lc.Type.struct([_float, _float])
_complex128 = lc.Type.struct([_double, _double])
def to_llvm(type):
return numba_typesystem.convert("llvm", type)
# return type.to_llvm(utils.context)
_pyobject_head = [to_llvm(ty) for name, ty in PyObject_HEAD.fields]
_pyobject_head_struct = to_llvm(PyObject_HEAD)
_pyobject_head_struct_p = lc.Type.pointer(_pyobject_head_struct)
_head_len = len(_pyobject_head)
_numpy_struct = lc.Type.struct(_pyobject_head+\
[_void_star, # data
_int32, # nd
_intp_star, # dimensions
_intp_star, # strides
_void_star, # base
_void_star, # descr
_int32, # flags
_void_star, # weakreflist
])
_numpy_array = lc.Type.pointer(_numpy_struct)
_BASE_ARRAY_FIELD_OFS = len(_pyobject_head)
_numpy_array_field_ofs = {
'data' : _BASE_ARRAY_FIELD_OFS,
'ndim' : _BASE_ARRAY_FIELD_OFS + 1,
'shape' : _BASE_ARRAY_FIELD_OFS + 2,
'strides' : _BASE_ARRAY_FIELD_OFS + 3,
'base' : _BASE_ARRAY_FIELD_OFS + 4,
'descr' : _BASE_ARRAY_FIELD_OFS + 5,
}
def constant_int(value, type=_int32):
return lc.Constant.int(type, value)
# ______________________________________________________________________
class _LLVMCaster(object):
def __init__(self, builder):
self.builder = builder
def cast(self, lvalue, dst_ltype, *args, **kws):
src_ltype = lvalue.type
if src_ltype == dst_ltype:
return lvalue
return self.build_cast(self.builder, lvalue, dst_ltype, *args, **kws)
def build_pointer_cast(_, builder, lval1, lty2):
return builder.bitcast(lval1, lty2)
def build_int_cast(_, builder, lval1, lty2, unsigned = False):
width1 = lval1.type.width
width2 = lty2.width
ret_val = lval1
if width2 > width1:
if unsigned:
ret_val = builder.zext(lval1, lty2)
else:
ret_val = builder.sext(lval1, lty2)
elif width2 < width1:
# JDR: Compromise here on logging level...
logger.info("Warning: Perfoming downcast. May lose information.")
ret_val = builder.trunc(lval1, lty2)
return ret_val
def build_float_ext(_, builder, lval1, lty2):
return builder.fpext(lval1, lty2)
def build_float_trunc(_, builder, lval1, lty2):
logger.info("Warning: Perfoming downcast. May lose information.")
return builder.fptrunc(lval1, lty2)
def build_int_to_float_cast(_, builder, lval1, lty2, unsigned = False):
ret_val = None
if unsigned:
ret_val = builder.uitofp(lval1, lty2)
else:
ret_val = builder.sitofp(lval1, lty2)
return ret_val
def build_float_to_int_cast(_, builder, lval1, lty2, unsigned = False):
ret_val = None
if unsigned:
ret_val = builder.fptoui(lval1, lty2)
else:
ret_val = builder.fptosi(lval1, lty2)
return ret_val
CAST_MAP = {
lc.TYPE_POINTER : build_pointer_cast,
lc.TYPE_INTEGER: build_int_cast,
(lc.TYPE_FLOAT, lc.TYPE_DOUBLE) : build_float_ext,
(lc.TYPE_DOUBLE, lc.TYPE_FP128) : build_float_ext,
(lc.TYPE_DOUBLE, lc.TYPE_PPC_FP128) : build_float_ext,
(lc.TYPE_DOUBLE, lc.TYPE_X86_FP80) : build_float_ext,
(lc.TYPE_DOUBLE, lc.TYPE_FLOAT) : build_float_trunc,
(lc.TYPE_FP128, lc.TYPE_DOUBLE) : build_float_trunc,
(lc.TYPE_PPC_FP128, lc.TYPE_DOUBLE) : build_float_trunc,
(lc.TYPE_X86_FP80, lc.TYPE_DOUBLE) : build_float_trunc,
(lc.TYPE_INTEGER, lc.TYPE_FLOAT) : build_int_to_float_cast,
(lc.TYPE_INTEGER, lc.TYPE_DOUBLE) : build_int_to_float_cast,
(lc.TYPE_INTEGER, lc.TYPE_FP128) : build_int_to_float_cast,
(lc.TYPE_INTEGER, lc.TYPE_PPC_FP128): build_int_to_float_cast,
(lc.TYPE_INTEGER, lc.TYPE_X86_FP80) : build_int_to_float_cast,
(lc.TYPE_FLOAT, lc.TYPE_INTEGER) : build_float_to_int_cast,
(lc.TYPE_DOUBLE, lc.TYPE_INTEGER) : build_float_to_int_cast,
(lc.TYPE_FP128, lc.TYPE_INTEGER) : build_float_to_int_cast,
(lc.TYPE_PPC_FP128, lc.TYPE_INTEGER): build_float_to_int_cast,
(lc.TYPE_X86_FP80, lc.TYPE_INTEGER) : build_float_to_int_cast,
}
@classmethod
def build_cast(cls, builder, lval1, lty2, *args, **kws):
ret_val = lval1
lty1 = lval1.type
lkind1 = lty1.kind
lkind2 = lty2.kind
# This looks like the wrong place to enforce this
# TODO: We need to pass in the numba types instead
# if lc.TYPE_INTEGER in (lkind1, lkind2) and 'unsigned' not in kws:
# # Be strict about having `unsigned` define when
# # we have integer types
# raise ValueError("Unknown signedness for integer type",
# '%s -> %s' % (lty1, lty2), args, kws)
if lkind1 == lkind2:
if lkind1 in cls.CAST_MAP:
ret_val = cls.CAST_MAP[lkind1](cls, builder, lval1, lty2,
*args, **kws)
else:
raise NotImplementedError(lkind1)
else:
map_index = (lkind1, lkind2)
if map_index in cls.CAST_MAP:
ret_val = cls.CAST_MAP[map_index](cls, builder, lval1, lty2,
*args, **kws)
else:
raise NotImplementedError('Unable to cast from %s to %s.' %
(str(lty1), str(lty2)))
return ret_val
# ______________________________________________________________________
# End of llvm_types.py
########NEW FILE########
__FILENAME__ = macros
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
from numba import typesystem
from . import llvm_types
import logging
logger = logging.getLogger(__name__)
# TODO: Create a subclass of
# llpython.byte_translator.LLVMTranslator that does macro
# expansion.
def c_string_slice_2 (context, builder, c_string, lb, ub = None):
module = builder.basic_block.function.module
logger.debug((context, builder, c_string, lb, ub))
_, CStringSlice2Len = context.intrinsic_library.declare(module,
'CStringSlice2Len')
_, CStringSlice2 = context.intrinsic_library.declare(module,
'CStringSlice2')
_, strlen = context.external_library.declare(module, 'strlen')
c_str_len = builder.call(strlen, [c_string])
if ub is None:
ub = c_str_len
out_len = builder.call(CStringSlice2Len, [c_string, c_str_len, lb, ub])
ret_val = builder.alloca_array(llvm_types._int8, out_len)
builder.call(CStringSlice2, [ret_val, c_string, c_str_len, lb, ub])
return ret_val
c_string_slice_2.__signature__ = typesystem.function(
return_type = char.pointer(),
args = (char.pointer(), Py_ssize_t, Py_ssize_t))
def c_string_slice_1 (context, builder, c_string, lb):
return c_string_slice_2(context, builder, c_string, lb)
c_string_slice_1.__signature__ = typesystem.function(
return_type = char.pointer(),
args = (char.pointer(), Py_ssize_t))
########NEW FILE########
__FILENAME__ = metadata
# -*- coding: utf-8 -*-
"""
Hold metadata for instructions.
"""
from __future__ import print_function, division, absolute_import
import llvm.core
from numba import *
def _typename(type):
typename = str(type)
typename = typename.replace("float", "float32")
typename = typename.replace("double", "float64")
typename = typename.replace("long double", "float128")
return typename
def typename(type):
"Get the TBAA type name"
if type.is_tbaa:
return type.name
else:
return _typename(type)
def is_tbaa_type(type):
return (type.is_pointer or type.is_tbaa or
type.is_object or type.is_array)
class TBAAMetadata(object):
"""
Type Based Alias Analysis metadata. This defines a type tree where
types in different branches are known not to alias each other. Only
the ancestors of a type node alias that node.
"""
def __init__(self, module):
self.module = module
self.metadata_cache = {}
self.initialize()
self.unique_number = 0
def initialize(self):
self.root = self.make_metadata("root", root=None)
self.char_pointer = self.make_metadata("char *", root=self.root)
self.metadata_cache[char.pointer()] = self.char_pointer
def make_metadata(self, typename, root, is_constant=False):
operands = [self.get_string(typename)]
if root is not None:
assert isinstance(root, llvm.core.MetaData)
operands.append(root)
if is_constant:
constant = llvm.core.Constant.int(llvm.core.Type.int(64), 1)
operands.append(constant)
node = llvm.core.MetaData.get(self.module, operands)
llvm.core.MetaData.add_named_operand(self.module, "tbaa", node)
# print "made metadata", self.module.id, typename # , node, root
return node
def make_unique_metadata(self, root, is_constant=False):
result = self.make_metadata("unique%d" % self.unique_number, root,
is_constant)
self.unique_number += 1
return result
def get_string(self, string):
return llvm.core.MetaDataString.get(self.module, string)
def find_root(self, type):
"""
Find the metadata root of a type. E.g. if we have tbaa(sometype).pointer(),
we want to return tbaa(sometype).root.pointer()
"""
# Find TBAA base type
n = 0
while type.is_pointer:
type = type.base_type
n += 1
if type.is_tbaa:
# Return TBAA base type pointer composition
root_type = type.root
for i in range(n):
root_type = root_type.pointer()
# Define root type metadata
root = self.get_metadata(root_type)
else:
# Not a TBAA root type, alias anything
root = self.char_pointer
return root
def get_metadata(self, type, typeroot=None):
if not is_tbaa_type(type):
return None
if type in self.metadata_cache:
return self.metadata_cache[type]
# Build metadata
if typeroot:
root = self.metadata_cache[typeroot]
elif type.is_tbaa:
if type.root in self.metadata_cache:
root = self.metadata_cache[type.root]
else:
root = self.get_metadata(type.root)
else:
root = self.find_root(type)
node = self.make_metadata(typename(type), root,
) #is_constant="const" in type.qualifiers)
# Cache result
self.metadata_cache[type] = node
return node
def set_metadata(self, load_instr, metadata):
load_instr.set_metadata("tbaa", metadata)
def set_tbaa(self, load_instr, type):
md = self.get_metadata(type)
if md is not None:
self.set_metadata(load_instr, md)
########NEW FILE########
__FILENAME__ = codegen
# -*- coding: utf-8 -*-
"""
Code generator module. Subclass CodeGen to implement a code generator
as a visitor.
"""
from __future__ import print_function, division, absolute_import
import sys
import string
from . import minierror
from . import minitypes
from . import minivisitor
class CodeGen(minivisitor.TreeVisitor):
"""
Base class for code generators written as visitors.
"""
def __init__(self, context, codewriter):
super(CodeGen, self).__init__(context)
self.code = codewriter
def clone(self, context, codewriter):
cls = type(self)
kwds = dict(self.__dict__)
kwds.update(context=context, codewriter=codewriter)
result = cls(context, codewriter)
vars(result).update(kwds)
return result
def results(self, *nodes):
results = []
for childlist in nodes:
result = self.visit_childlist(childlist)
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
return tuple(results)
def visitchild(self, node):
if node is None:
return
return self.visit(node)
class CodeGenCleanup(CodeGen):
"""
Perform cleanup for all nodes. This is invoked from an appropriate clean-
up point from an :py:class:`minivect.miniast.ErrorHandler`. Recursion
should hence stop at ErrorHandler nodes, since an ErrorHandler descendant
should handle its own descendants.
Users of minivect should subclass this to DECREF object temporaries, etc.
"""
def visit_Node(self, node):
self.visitchildren(node)
def visit_ErrorHandler(self, node):
# stop recursion here
pass
def get_printf_specifier(type):
format = None
if type.is_pointer:
format = "%p"
elif type.is_numeric:
if type.is_int or type.is_float:
format = {
minitypes.int_: "%i",
minitypes.long_: "%ld",
minitypes.longlong: "%lld",
minitypes.uint: "%u",
minitypes.ulong: "%lu",
minitypes.ulonglong: "%llu",
minitypes.float_: "%f",
minitypes.double: "%lf",
}.get(type, ["%lld", "%lf"][type.is_float])
elif type.is_c_string:
format = "%s"
return format
def format_specifier(node, astbuilder):
"Return a printf() format specifier for the type of the given AST node"
type = node.type
dst_type = None
format = get_printf_specifier(type)
if format is not None:
if dst_type:
node = astbuilder.cast(node, dst_type)
return format, node
else:
raise minierror.UnmappableFormatSpecifierError(type)
class CCodeGen(CodeGen):
"""
Generates C code from an AST, needs a
:py:class:`minivect.minicode.CCodeWriter`. To use the vectorized
specializations, use the :py:class:`VectorCodeGen` below.
"""
label_counter = 0
disposal_point = None
def __init__(self, context, codewriter):
super(CCodeGen, self).__init__(context, codewriter)
self.declared_temps = set()
self.temp_names = set()
def strip(self, expr_string):
# strip parentheses from C string expressions where unneeded
if expr_string and expr_string[0] == '(' and expr_string[-1] == ')':
return expr_string[1:-1]
return expr_string
def visit_FunctionNode(self, node):
code = self.code
self.specializer = node.specializer
self.function = node
name = code.mangle(node.mangled_name + node.specialization_name)
node.mangled_name = name
args = self.results(node.arguments + node.scalar_arguments)
args = (arg for arg in args if arg is not None)
proto = "static int %s(%s)" % (name, ", ".join(args))
code.proto_code.putln(proto + ';')
code.putln("%s {" % proto)
code.declaration_levels.append(code.insertion_point())
code.function_declarations = code.insertion_point()
code.before_loop = code.insertion_point()
self.visitchildren(node)
code.declaration_levels.pop()
code.putln("}")
def _argument_variables(self, variables):
return ", ".join("%s %s" % (v.type, self.visit(v))
for v in variables if v is not None)
def visit_FunctionArgument(self, node):
if node.used:
return self._argument_variables(node.variables)
def visit_ArrayFunctionArgument(self, node):
if node.used:
return self._argument_variables([node.data_pointer,
node.strides_pointer])
def visit_StatListNode(self, node):
self.visitchildren(node)
return node
def visit_ExprStatNode(self, node):
node.expr.is_statement = True
result = self.visit(node.expr)
if result:
self.code.putln(self.strip(result) + ';')
def visit_ExprNodeWithStatement(self, node):
self.visit(node.stat)
return self.visit(node.expr)
def visit_OpenMPLoopNode(self, node):
self.code.putln("#ifdef _OPENMP")
if_clause = self.visit(node.if_clause)
lastprivates = ", ".join(self.results(node.lastprivates))
privates = ""
if node.privates:
privates = " private(%s)" % ", ".join(self.results(node.privates))
pragma = "#pragma omp parallel for if(%s) lastprivate(%s)%s"
self.code.putln(pragma % (if_clause, lastprivates, privates))
self.code.putln("#endif")
self.visit(node.for_node)
def visit_OpenMPConditionalNode(self, node):
if node.if_body:
self.code.putln("#ifdef _OPENMP")
self.visit(node.if_body)
if node.else_body:
if not node.if_body:
self.code.putln("#ifndef _OPENMP")
else:
self.code.putln("#else")
self.visit(node.else_body)
self.code.putln("#endif")
def put_intel_pragmas(self, code):
"""
Insert Intel compiler specific pragmas. See "A Guide to Vectorization
with Intel(R) C++ Compilers".
"""
code.putln("#ifdef __INTEL_COMPILER")
# force auto-vectorization
code.putln("#pragma simd")
# ignore potential data dependencies
# code.putln("#pragma ivdep")
# vectorize even if the compiler doesn't think this will be beneficial
# code.putln("#pragma vector always")
code.putln("#endif")
def visit_PragmaForLoopNode(self, node):
self.put_intel_pragmas(self.code)
self.visit(node.for_node)
def visit_ForNode(self, node):
code = self.code
exprs = self.results(node.init, node.condition, node.step)
code.putln("for (%s; %s; %s) {" % tuple(self.strip(e) for e in exprs))
self.code.declaration_levels.append(code.insertion_point())
self.code.loop_levels.append(code.insertion_point())
self.visit(node.init)
self.visit(node.body)
self.code.declaration_levels.pop()
self.code.loop_levels.pop()
code.putln("}")
def visit_IfNode(self, node):
self.code.putln("if (%s) {" % self.results(node.cond))
self.visit(node.body)
if node.else_body:
self.code.putln("} else {")
self.visit(node.else_body)
self.code.putln("}")
def visit_PromotionNode(self, node):
# Use C rules for promotion
return self.visit(node.operand)
def visit_FuncCallNode(self, node):
return "%s(%s)" % (self.visit(node.func_or_pointer),
", ".join(self.results(node.args)))
def visit_FuncNameNode(self, node):
return node.name
def visit_FuncRefNode(self, node):
return node.function.mangled_name
def visit_ReturnNode(self, node):
self.code.putln("return %s;" % self.results(node.operand))
def visit_BinopNode(self, node):
op = node.operator
return "(%s %s %s)" % (self.visit(node.lhs),
op,
self.visit(node.rhs))
def visit_UnopNode(self, node):
return "(%s%s)" % (node.operator, self.visit(node.operand))
def _mangle_temp(self, node):
name = self.code.mangle(node.repr_name or node.name)
if name in self.temp_names:
name = "%s_%d" % (name.rstrip(string.digits),
len(self.declared_temps))
node.name = name
self.temp_names.add(name)
self.declared_temps.add(node)
def _declare_temp(self, node, rhs_result=None):
if node not in self.declared_temps:
self._mangle_temp(node)
code = self.code.declaration_levels[-1]
if rhs_result:
assignment = " = %s" % (rhs_result,)
else:
assignment = ""
code.putln("%s %s%s;" % (node.type, node.name, assignment))
def visit_TempNode(self, node):
if node not in self.declared_temps:
self._declare_temp(node)
return node.name
def visit_AssignmentExpr(self, node):
if (node.rhs.is_binop and node.rhs.operator == '+' and
node.rhs.rhs.is_constant and node.rhs.rhs.value == 1):
return "%s++" % self.visit(node.rhs.lhs)
elif node.rhs.is_binop and node.lhs == node.rhs.lhs:
return "(%s %s= %s)" % (self.visit(node.lhs),
node.rhs.operator,
self.visit(node.rhs.rhs))
elif (node.is_statement and node.lhs.is_temp and
node.lhs not in self.declared_temps and node.may_reorder):
self._mangle_temp(node.lhs)
self._declare_temp(node.lhs, self.visit(node.rhs))
else:
return "(%s = %s)" % self.results(node.lhs, node.rhs)
def visit_IfElseExprNode(self, node):
return "(%s ? %s : %s)" % (self.results(node.cond, node.lhs, node.rhs))
def visit_CastNode(self, node):
return "((%s) %s)" % (node.type, self.visit(node.operand))
def visit_DereferenceNode(self, node):
return "(*%s)" % self.visit(node.operand)
def visit_SingleIndexNode(self, node):
return "%s[%s]" % self.results(node.lhs, node.rhs)
def visit_SizeofNode(self, node):
return "sizeof(%s)" % node.sizeof_type
def visit_ArrayAttribute(self, node):
return node.name
def visit_Variable(self, node):
if node.type.is_function:
return node.name
if not node.mangled_name:
node.mangled_name = self.code.mangle(node.name)
return node.mangled_name
def visit_NoopExpr(self, node):
return ""
def visit_ResolvedVariable(self, node):
return self.visit(node.element)
def visit_JumpNode(self, node):
self.code.putln("goto %s;" % self.results(node.label))
def visit_JumpTargetNode(self, node):
self.code.putln("%s:" % self.results(node.label))
def visit_LabelNode(self, node):
if node.mangled_name is None:
node.mangled_name = self.code.mangle("%s%d" % (node.name,
self.label_counter))
self.label_counter += 1
return node.mangled_name
def visit_ConstantNode(self, node):
if node.type.is_c_string:
return '"%s"' % node.value.encode('string-escape')
return str(node.value)
def visit_ErrorHandler(self, node):
# initialize the mangled names before generating code for the body
self.visit(node.error_label)
self.visit(node.cleanup_label)
self.visit(node.error_var_init)
self.visit(node.body)
self.visit(node.cleanup_jump)
self.visit(node.error_target_label)
self.visit(node.error_set)
self.visit(node.cleanup_target_label)
disposal_codewriter = self.code.insertion_point()
self.context.generate_disposal_code(disposal_codewriter, node.body)
#have_disposal_code = disposal_codewriter.getvalue()
self.visit(node.cascade)
return node
class VectorCodegen(CCodeGen):
"""
Generate C code for vectorized ASTs. As a subclass of :py:class:`CCodeGen`,
can write C code for any minivect AST.
"""
types = {
minitypes.VectorType(minitypes.float_, 4) : '_mm_%s_ps',
minitypes.VectorType(minitypes.float_, 8) : '_mm256_%s_ps',
minitypes.VectorType(minitypes.double, 4) : '_mm_%s_pd',
minitypes.VectorType(minitypes.double, 8) : '_mm256_%s_pd',
}
binops = {
'+': 'add',
'*': 'mul',
'-': 'sub',
'/': 'div',
# pow not supported
# floordiv not supported
# mod not supported
'<': 'cmplt',
'<=': 'cmple',
'==': 'cmpeq',
'!=': 'cmpne',
'>=': 'cmpge',
'>': 'cmpgt',
}
def visit_VectorVariable(self, node):
return self.visit(node.variable)
def visit_VectorLoadNode(self, node):
load = self.types[node.type] % 'loadu'
return '%s(%s)' % (load, self.visit(node.operand))
def visit_VectorStoreNode(self, node):
# Assignment to data pointer
store = self.types[node.rhs.type] % 'storeu'
return '%s(%s, %s)' % (store, self.visit(node.lhs),
self.visit(node.rhs))
def visit_VectorBinopNode(self, node):
binop_name = self.binops[node.operator]
func_name = self.types[node.lhs.type] % binop_name
return '%s(%s, %s)' % (func_name, self.visit(node.lhs),
self.visit(node.rhs))
def visit_ConstantVectorNode(self, node):
func_template = self.types[node.type]
if node.constant == 0:
return func_template % 'setzero'
else:
func = func_template % 'set'
c = node.constant
return '%s(%s, %s, %s, %s)' % (func, c, c, c, c)
########NEW FILE########
__FILENAME__ = complex_support
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
from .miniutils import ctypes
from .minitypes import *
### Taken from Numba ###
# NOTE: The following ctypes structures were inspired by Joseph
# Heller's response to python-list question about ctypes complex
# support. In that response, he said these were only suitable for
# Linux. Might our milage vary?
class ComplexMixin (object):
def _get(self):
# FIXME: Ensure there will not be a loss of precision here!
return self._numpy_ty_(self.real + (self.imag * 1j))
def _set(self, value):
self.real = value.real
self.imag = value.imag
value = property(_get, _set)
@classmethod
def from_param(cls, param):
ret_val = cls()
ret_val.value = param
return ret_val
@classmethod
def make_ctypes_prototype_wrapper(cls, ctypes_prototype):
'''This is a hack so that functions that return a complex type
will construct a new Python value from the result, making the
Numba compiled function a drop-in replacement for a Python
function.'''
# FIXME: See if there is some way of avoiding this additional
# wrapper layer.
def _make_complex_result_wrapper(in_func):
ctypes_function = ctypes_prototype(in_func)
def _complex_result_wrapper(*args, **kws):
# Return the value property, not the ComplexMixin
# instance built by ctypes.
result = ctypes_function(*args, **kws)
return result.value
return _complex_result_wrapper
return _make_complex_result_wrapper
class Complex64(ctypes.Structure, ComplexMixin):
_fields_ = [('real', ctypes.c_float), ('imag', ctypes.c_float)]
_numpy_ty_ = np.complex64
class Complex128(ctypes.Structure, ComplexMixin):
_fields_ = [('real', ctypes.c_double), ('imag', ctypes.c_double)]
_numpy_ty_ = np.complex128
if hasattr(np, 'complex256'):
class Complex256(ctypes.Structure, ComplexMixin):
_fields_ = [('real', ctypes.c_longdouble), ('imag', ctypes.c_longdouble)]
_numpy_ty_ = np.complex256
else:
Complex256 = None
### End Taken from Numba ###
########NEW FILE########
__FILENAME__ = ctypes_conversion
# -*- coding: utf-8 -*-
"""
Convert a minivect type to a ctypes type and an llvm function to a
ctypes function.
"""
from __future__ import print_function, division, absolute_import
import math
import warnings
from .miniutils import ctypes
from .minitypes import *
try:
from ctypes import *
except ImportError:
pass
def convert_to_ctypes(type):
"""
Convert the minitype to a ctypes type
>>> from minitypes import *
>>> assert convert_to_ctypes(int32) == ctypes.c_int32
>>> assert convert_to_ctypes(int64) == ctypes.c_int64
>>> assert convert_to_ctypes(uint32) == ctypes.c_uint32
>>> assert convert_to_ctypes(uint64) == ctypes.c_uint64
>>> assert convert_to_ctypes(short) == ctypes.c_short
>>> assert convert_to_ctypes(int_) == ctypes.c_int
>>> assert convert_to_ctypes(long_) == ctypes.c_long
>>> assert convert_to_ctypes(float_) == ctypes.c_float
>>> assert convert_to_ctypes(double) == ctypes.c_double
>>> #convert_to_ctypes(complex64)
>>> #convert_to_ctypes(complex128)
>>> #convert_to_ctypes(complex256)
"""
from . import minitypes
if type.is_pointer:
return ctypes.POINTER(convert_to_ctypes(type.base_type))
elif type.is_object or type.is_array:
return ctypes.py_object
elif type.is_float:
if type.itemsize == 4:
return ctypes.c_float
elif type.itemsize == 8:
return ctypes.c_double
else:
return ctypes.c_longdouble
elif type.is_numpy_intp or type.is_py_ssize_t:
if minitypes._plat_bits == 32:
return ctypes.c_int32
else:
return ctypes.c_int64
elif type == minitypes.int_:
return ctypes.c_int
elif type == minitypes.uint:
return ctypes.c_uint
elif type == minitypes.long_:
return ctypes.c_long
elif type == minitypes.ulong:
return ctypes.c_ulong
# TODO: short, long long, etc
elif type.is_int:
item_idx = int(math.log(type.itemsize, 2))
if type.signed:
values = [ctypes.c_int8, ctypes.c_int16, ctypes.c_int32,
ctypes.c_int64]
else:
values = [ctypes.c_uint8, ctypes.c_uint16, ctypes.c_uint32,
ctypes.c_uint64]
return values[item_idx]
elif type.is_complex:
from . import complex_support
if type.itemsize == 8:
return complex_support.Complex64
elif type.itemsize == 16:
return complex_support.Complex128
else:
return complex_support.Complex256
elif type.is_c_string:
return ctypes.c_char_p
elif type.is_function:
return_type = convert_to_ctypes(type.return_type)
arg_types = tuple(convert_to_ctypes(arg_type)
for arg_type in type.args)
return ctypes.CFUNCTYPE(return_type, *arg_types)
elif type.is_void:
return None
elif type.is_carray:
return convert_to_ctypes(type.base_type) * type.size
elif type.is_struct:
class Struct(ctypes.Structure):
_fields_ = [(field_name, convert_to_ctypes(field_type))
for field_name, field_type in type.fields]
return Struct
else:
raise NotImplementedError(type)
def get_ctypes_func(func, llvm_func, llvm_execution_engine, context):
"Get a ctypes function from an llvm function"
ctypes_func_type = convert_to_ctypes(func.type)
p = llvm_execution_engine.get_pointer_to_function(llvm_func)
return ctypes_func_type(p)
def get_data_pointer(numpy_array, array_type):
"Get a ctypes typed data pointer for the numpy array with type array_type"
dtype_pointer = array_type.dtype.pointer()
return numpy_array.ctypes.data_as(convert_to_ctypes(dtype_pointer))
def get_pointer(context, llvm_func):
"Get a pointer to the LLVM function (int)"
from numba.codegen.llvmcontext import LLVMContextManager
return LLVMContextManager().execution_engine.get_pointer_to_function(llvm_func)
if __name__ == '__main__':
import doctest
doctest.testmod()
########NEW FILE########
__FILENAME__ = graphviz
# -*- coding: utf-8 -*-
"""
Visitor to generate a Graphviz .dot file with an AST representation.
"""
from __future__ import print_function, division, absolute_import
from .pydot import pydot
from . import minivisitor
class GraphvizGenerator(minivisitor.PrintTree):
"""
Render a minivect AST as a graphviz tree.
"""
def __init__(self, context, name, node_color=None, edge_color=None,
node_fontcolor=None, edge_fontcolor=None):
super(GraphvizGenerator, self).__init__(context)
self.name = name
self.counter = 0
self.node_color = node_color
self.edge_color = edge_color
self.node_fontcolor = node_fontcolor
self.edge_fontcolor = edge_fontcolor
def create_node(self, node):
"Create a graphviz node from the miniast node"
label = '"%s"' % self.format_node(node, want_type_info=False)
self.counter += 1
pydot_node = pydot.Node(str(self.counter), label=label)
self.add_node(pydot_node)
return pydot_node
def add_node(self, pydot_node):
"Add a pydot node to the graph and set its colors"
if self.node_color is not None:
pydot_node.set_color(self.node_color)
if self.node_fontcolor is not None:
pydot_node.set_fontcolor(self.node_fontcolor)
self.graph.add_node(pydot_node)
def add_edge(self, source, dest, edge_label=None):
"Add an edge between two pydot nodes and set the colors"
edge = pydot.Edge(source, dest)
if edge_label is not None:
edge.set_label(edge_label)
if self.edge_color is not None:
edge.set_color(self.edge_color)
if self.edge_fontcolor is not None:
edge.set_fontcolor(self.edge_fontcolor)
self.graph.add_edge(edge)
def visit_Node(self, node, pydot_node=None):
"Visit children and add edges to their Graphviz nodes."
if pydot_node is None:
pydot_node = self.create_node(node)
nodes_dict = self.visitchildren(node)
attrs = self.context.getchildren(node)
for attr in attrs:
values = nodes_dict.get(attr, None)
if values is not None:
if isinstance(values, list):
for value in values:
self.add_edge(pydot_node, value)
else:
self.add_edge(pydot_node, values, attr)
return pydot_node
def visit_FunctionNode(self, node):
"Create a graphviz graph"
self.graph = pydot.Dot(self.name, graph_type='digraph')
pydot_function = self.create_node(node)
pydot_body = self.visit(node.body)
# Create artificial arguments for brevity
pydot_args = pydot.Node("Arguments (omitted)")
self.add_node(pydot_args)
self.add_edge(pydot_function, pydot_body)
self.add_edge(pydot_function, pydot_args)
return self.graph
########NEW FILE########
__FILENAME__ = llvm_codegen
# -*- coding: utf-8 -*-
"""
Generate LLVM code for a minivect AST.
"""
from __future__ import print_function, division, absolute_import
import sys
try:
import llvm.core
import llvm.ee
import llvm.passes
except ImportError:
llvm = None
from . import codegen
from . import minierror
from . import minitypes
from . import minivisitor
from . import ctypes_conversion
def handle_struct_passing(builder, alloca_func, largs, signature):
"""
Handle signatures with structs. If signature.struct_by_reference
is set, we need to pass in structs by reference, and retrieve
struct return values by refererence through an additional argument.
Structs are always loaded as pointers.
Complex numbers are always immediate struct values.
"""
for i, (arg_type, larg) in enumerate(zip(signature.args, largs)):
if minitypes.pass_by_ref(arg_type):
if signature.struct_by_reference:
if arg_type.is_complex or \
arg_type.is_datetime or \
arg_type.is_timedelta:
new_arg = alloca_func(arg_type)
builder.store(larg, new_arg)
larg = new_arg
largs[i] = larg
if (signature.struct_by_reference and
minitypes.pass_by_ref(signature.return_type)):
return_value = alloca_func(signature.return_type)
largs.append(return_value)
return return_value
return None
class LLVMCodeGen(codegen.CodeGen):
"""
Generate LLVM code for a minivect AST.
Takes a regular :py:class:`minivect.minicode.CodeWriter` to which it
writes the LLVM function and a ctypes function.
"""
in_lhs_expr = 0
def __init__(self, context, codewriter):
super(LLVMCodeGen, self).__init__(context, codewriter)
self.declared_temps = set()
self.temp_names = set()
self.astbuilder = context.astbuilder
self.blocks = []
self.symtab = {}
self.llvm_temps = {}
import llvm # raise an error at this point if llvm-py is not installed
self.init_binops()
self.init_comparisons()
# List of LLVM call instructions to inline
self.inline_calls = []
def append_basic_block(self, name='unamed'):
"append a basic block and keep track of it"
idx = len(self.blocks)
bb = self.lfunc.append_basic_block('%s_%d' % (name, idx))
self.blocks.append(bb)
return bb
def inline_funcs(self):
for call_instr in self.inline_calls:
# print 'inlining...', call_instr
llvm.core.inline_function(call_instr)
def optimize(self):
"Run llvm optimizations on the generated LLVM code"
llvm_fpm = llvm.passes.FunctionPassManager.new(self.llvm_module)
# target_data = llvm.ee.TargetData(self.context.llvm_ee)
#llvm_fpm.add(self.context.llvm_ee.target_data.clone())
pmb = llvm.passes.PassManagerBuilder.new()
pmb.opt_level = 3
pmb.vectorize = True
pmb.populate(llvm_fpm)
llvm_fpm.run(self.lfunc)
def optimize2(self, opt=3, cg=3, inline=1000):
features = '-avx'
tm = self.__machine = llvm.ee.TargetMachine.new(
opt=cg, cm=llvm.ee.CM_JITDEFAULT, features=features)
has_loop_vectorizer = llvm.version >= (3, 2)
passmanagers = llvm.passes.build_pass_managers(
tm, opt=opt, inline_threshold=inline,
loop_vectorize=has_loop_vectorizer, fpm=False)
passmanagers.pm.run(self.llvm_module)
def visit_FunctionNode(self, node):
self.specializer = node.specializer
self.function = node
self.llvm_module = self.context.llvm_module
name = node.name + node.specialization_name
node.mangled_name = name
lfunc_type = node.type.to_llvm(self.context)
self.lfunc = self.llvm_module.add_function(lfunc_type, node.mangled_name)
# self.lfunc.linkage = llvm.core.LINKAGE_LINKONCE_ODR
self.entry_bb = self.append_basic_block('entry')
self.builder = llvm.core.Builder.new(self.entry_bb)
self.add_arguments(node)
self.visit(node.body)
# print self.lfunc
self.llvm_module.verify()
self.inline_funcs()
if self.context.optimize_llvm:
self.optimize2()
self.code.write(self.lfunc)
# from numba.codegen.llvmcontext import LLVMContextManager
# ctypes_func = ctypes_conversion.get_ctypes_func(
# node, self.lfunc, LLVMContextManager().execution_engine,
# self.context)
# self.code.write(ctypes_func)
def add_arguments(self, function):
"Insert function arguments into the symtab"
i = 0
for arg in function.arguments + function.scalar_arguments:
if arg.used:
for var in arg.variables:
llvm_arg = self.lfunc.args[i]
self.symtab[var.name] = llvm_arg
if var.type.is_pointer:
llvm_arg.add_attribute(llvm.core.ATTR_NO_ALIAS)
llvm_arg.add_attribute(llvm.core.ATTR_NO_CAPTURE)
i += 1
else:
for var in arg.variables:
self.symtab[var.name] = self.visit(var)
def visit_PrintNode(self, node):
pass
def visit_Node(self, node):
self.visitchildren(node)
return node
def visit_OpenMPConditionalNode(self, node):
"OpenMP is not yet implemented, only process the 'else' directives."
if node.else_body:
self.visit(node.else_body)
return node
def visit_ForNode(self, node):
'''
Implements simple for loops with iternode as range, xrange
'''
bb_cond = self.append_basic_block('for.cond')
bb_incr = self.append_basic_block('for.incr')
bb_body = self.append_basic_block('for.body')
bb_exit = self.append_basic_block('for.exit')
# generate initializer
self.visit(node.init)
self.builder.branch(bb_cond)
# generate condition
self.builder.position_at_end(bb_cond)
cond = self.visit(node.condition)
self.builder.cbranch(cond, bb_body, bb_exit)
# generate increment
self.builder.position_at_end(bb_incr)
self.visit(node.step)
self.builder.branch(bb_cond)
# generate body
self.builder.position_at_end(bb_body)
self.visit(node.body)
self.builder.branch(bb_incr)
# move to exit block
self.builder.position_at_end(bb_exit)
def visit_IfNode(self, node):
cond = self.visit(node.cond)
bb_true = self.append_basic_block('if.true')
bb_endif = self.append_basic_block('if.end')
if node.else_body:
bb_false = self.append_basic_block('if.false')
else:
bb_false = bb_endif
test = self.visit(node.cond)
self.builder.cbranch(test, bb_true, bb_false)
# if cond
self.builder.position_at_end(bb_true)
self.visit(node.body)
self.builder.branch(bb_endif)
if node.else_body:
# else
self.builder.position_at_end(bb_false)
self.visit(node.else_body)
self.builder.branch(bb_endif)
# endif
self.builder.position_at_end(bb_endif)
def visit_ReturnNode(self, node):
self.builder.ret(self.visit(node.operand))
def visit_CastNode(self, node):
if node.type.is_pointer:
result = self.visit(node.operand)
dest_type = node.type.to_llvm(self.context)
# print result, dest_type
# node.print_tree(self.context)
return self.builder.bitcast(result, dest_type)
# return result.bitcast(node.type)
return self.visit_PromotionNode(node)
def visit_PromotionNode(self, node):
"""
Handle promotions as inserted by
:py:class:`minivect.type_promoter.TypePromoter`
"""
result = self.visit(node.operand)
type = node.type
op_type = node.operand.type
smaller = type.itemsize < op_type.itemsize
if type.is_int and op_type.is_int:
op = (('zext', 'sext'), ('trunc', 'trunc'))[smaller][type.signed]
elif type.is_float and op_type.is_float:
op = ('fpext', 'fptrunc')[smaller]
elif type.is_int and op_type.is_float:
op = ('fptoui', 'fptosi')[type.signed]
elif type.is_float and op_type.is_int:
op = ('fptoui', 'fptosi')[type.signed]
else:
raise NotImplementedError((type, op_type))
ltype = type.to_llvm(self.context)
return getattr(self.builder, op)(result, ltype)
def init_binops(self):
# (float_op, unsigned_int_op, signed_int_op)
self._binops = {
'+': ('fadd', 'add', 'add'),
'-': ('fsub', 'sub', 'sub'),
'*': ('fmul', 'mul', 'mul'),
'/': ('fdiv', 'udiv', 'sdiv'),
'%': ('frem', 'urem', 'srem'),
'&': (None, 'and_', 'and_'),
'|': (None, 'or_', 'or_'),
'^': (None, 'xor', 'xor'),
# TODO: other ops
}
def init_comparisons(self):
"""
Define binary operation LLVM instructions. Do this in a function in
case llvm-py is not installed.
"""
self._compare_mapping_float = {
'>': llvm.core.FCMP_OGT,
'<': llvm.core.FCMP_OLT,
'==': llvm.core.FCMP_OEQ,
'>=': llvm.core.FCMP_OGE,
'<=': llvm.core.FCMP_OLE,
'!=': llvm.core.FCMP_ONE,
}
self._compare_mapping_sint = {
'>': llvm.core.ICMP_SGT,
'<': llvm.core.ICMP_SLT,
'==': llvm.core.ICMP_EQ,
'>=': llvm.core.ICMP_SGE,
'<=': llvm.core.ICMP_SLE,
'!=': llvm.core.ICMP_NE,
}
self._compare_mapping_uint = {
'>': llvm.core.ICMP_UGT,
'<': llvm.core.ICMP_ULT,
'==': llvm.core.ICMP_EQ,
'>=': llvm.core.ICMP_UGE,
'<=': llvm.core.ICMP_ULE,
'!=': llvm.core.ICMP_NE,
}
def visit_BinopNode(self, node):
lhs = self.visit(node.lhs)
rhs = self.visit(node.rhs)
op = node.operator
if (node.type.is_int or node.type.is_float) and node.operator in self._binops:
idx = 0 if node.type.is_float else node.type.is_int + node.type.signed
llvm_method_name = self._binops[op][idx]
meth = getattr(self.builder, llvm_method_name)
if not lhs.type == rhs.type:
node.print_tree(self.context)
assert False, (node.lhs.type, node.rhs.type, lhs.type, rhs.type)
return meth(lhs, rhs)
elif node.operator in self._compare_mapping_float:
return self.generate_compare(node, op, lhs, rhs)
elif node.type.is_pointer:
if node.rhs.type.is_pointer:
lhs, rhs = rhs, lhs
return self.builder.gep(lhs, [rhs])
else:
raise minierror.CompileError(
node, "Binop %s (type=%s) not implemented for types (%s, %s)" % (
op, node.type, lhs.type, rhs.type))
def generate_compare(self, node, op, lhs_value, rhs_value):
op = node.operator
lop = None
if node.lhs.type.is_float and node.rhs.type.is_float:
lfunc = self.builder.fcmp
lop = self._compare_mapping_float[op]
elif node.lhs.type.is_int and node.rhs.type.is_int:
lfunc = self.builder.icmp
if node.lhs.type.signed and node.rhs.type.signed:
lop = self._compare_mapping_sint[op]
elif not (node.lhs.type.signed or node.rhs.type.signed):
lop = self._compare_mapping_uint[op]
if lop is None:
raise minierror.CompileError(
node, "%s for types (%s, %s)" % (node.operator,
node.lhs.type, node.rhs.type))
return lfunc(lop, lhs_value, rhs_value)
def visit_UnopNode(self, node):
result = self.visit(node.operand)
if node.operator == '-':
return self.builder.neg(result)
elif node.operator == '+':
return result
else:
raise NotImplementedError(node.operator)
def visit_TempNode(self, node):
if node not in self.declared_temps:
llvm_temp = self._declare_temp(node)
else:
llvm_temp = self.llvm_temps[node]
if self.in_lhs_expr:
return llvm_temp
else:
return self.builder.load(llvm_temp)
def _mangle_temp(self, node):
name = node.repr_name or node.name
if name in self.temp_names:
name = "%s%d" % (name, len(self.declared_temps))
node.name = name
self.temp_names.add(name)
self.declared_temps.add(node)
def _declare_temp(self, node, rhs_result=None):
if node not in self.declared_temps:
self._mangle_temp(node)
llvm_temp = self.alloca(node.type)
self.llvm_temps[node] = llvm_temp
return llvm_temp
def alloca(self, type, name=''):
bb = self.builder.basic_block
self.builder.position_at_beginning(self.entry_bb)
llvm_temp = self.builder.alloca(type.to_llvm(self.context), name)
self.builder.position_at_end(bb)
return llvm_temp
def visit_AssignmentExpr(self, node):
self.in_lhs_expr += 1
lhs = self.visit(node.lhs)
self.in_lhs_expr -= 1
rhs = self.visit(node.rhs)
return self.builder.store(rhs, lhs)
def visit_SingleIndexNode(self, node):
in_lhs_expr = self.in_lhs_expr
if in_lhs_expr:
self.in_lhs_expr -= 1
lhs = self.visit(node.lhs)
rhs = self.visit(node.rhs)
if in_lhs_expr:
self.in_lhs_expr += 1
result = self.builder.gep(lhs, [rhs])
if self.in_lhs_expr:
return result
else:
return self.builder.load(result)
def visit_DereferenceNode(self, node):
node = self.astbuilder.index(node.operand, self.astbuilder.constant(0))
return self.visit_SingleIndexNode(node)
def visit_SizeofNode(self, node):
return self.visit(self.astbuilder.constant(node.sizeof_type.itemsize,
node.type))
def visit_Variable(self, node):
value = self.symtab[node.name]
return value
def visit_ArrayAttribute(self, node):
return self.symtab[node.name]
def visit_NoopExpr(self, node):
pass
def visit_ResolvedVariable(self, node):
return self.visit(node.element)
def visit_JumpNode(self, node):
return self.builder.branch(self.visit(node.label))
def visit_JumpTargetNode(self, node):
basic_block = self.visit(node.label)
self.builder.branch(basic_block)
self.builder.position_at_end(basic_block)
def visit_LabelNode(self, node):
if node not in self.labels:
self.labels[node] = self.append_basic_block(node.label)
return self.labels[node]
def handle_string_constant(self, b, constant):
# Create a global string constant, if it doesn't already
# Based on code in Numba. Seems easier than creating a stack variable
string_constants = self.context.string_constants = getattr(
self.context, 'string_constants', {})
if constant in string_constants:
lvalue = string_constants[constant]
else:
lstring = llvm.core.Constant.stringz(constant)
lvalue = self.context.llvm_module.add_global_variable(
lstring.type, "__string_%d" % len(string_constants))
lvalue.initializer = lstring
lvalue.linkage = llvm.core.LINKAGE_INTERNAL
lzero = self.visit(b.constant(0))
lvalue = self.builder.gep(lvalue, [lzero, lzero])
string_constants[constant] = lvalue
return lvalue
def visit_ConstantNode(self, node):
b = self.astbuilder
ltype = node.type.to_llvm(self.context)
constant = node.value
if node.type.is_float:
lvalue = llvm.core.Constant.real(ltype, constant)
elif node.type.is_int:
lvalue = llvm.core.Constant.int(ltype, constant)
elif node.type.is_pointer and self.pyval == 0:
lvalue = llvm.core.ConstantPointerNull
elif node.type.is_c_string:
lvalue = self.handle_string_constant(b, constant)
else:
raise NotImplementedError("Constant %s of type %s" % (constant,
node.type))
return lvalue
def visit_FuncCallNode(self, node):
llvm_args = list(self.results(node.args))
llvm_func = self.visit(node.func_or_pointer)
signature = node.func_or_pointer.type
if signature.struct_by_reference:
result = handle_struct_passing(
self.builder, self.alloca, llvm_args, signature)
llvm_call = self.builder.call(llvm_func, llvm_args)
if node.inline:
self.inline_calls.append(llvm_call)
if (signature.struct_by_reference and
minitypes.pass_by_ref(signature.return_type)):
return self.builder.load(result)
else:
return llvm_call
def visit_FuncNameNode(self, node):
func_type = node.type.to_llvm(self.context)
func = self.context.llvm_module.get_or_insert_function(func_type,
node.name)
return func
def visit_FuncRefNode(self, node):
raise NotImplementedError
########NEW FILE########
__FILENAME__ = miniast
# -*- coding: utf-8 -*-
"""
This module provides the AST. Subclass :py:class:`Context` and override the
various methods to allow minivect visitors over the AST, to promote and map types,
etc. Subclass and override :py:class:`ASTBuilder`'s methods to provide alternative
AST nodes or different implementations.
"""
from __future__ import print_function, division, absolute_import
import copy
import string
import types
from . import minitypes
from . import miniutils
from . import minivisitor
from . import specializers
from . import type_promoter
from . import minicode
from . import codegen
from . import llvm_codegen
from . import graphviz
try:
import llvm.core
import llvm.ee
import llvm.passes
except ImportError:
llvm = None
class UndocClassAttribute(object):
"Use this to document class attributes for Sphinx"
def __init__(self, cls):
self.cls = cls
def __call__(self, *args, **kwargs):
return self.cls(*args, **kwargs)
def make_cls(cls1, cls2):
"Fuse two classes together."
name = "%s_%s" % (cls1.__name__, cls2.__name__)
return type(name, (cls1, cls2), {})
class Context(object):
"""
A context that knows how to map ASTs back and forth, how to wrap nodes
and types, and how to instantiate a code generator for specialization.
An opaque_node or foreign node is a node that is not from our AST,
and a normal node is one that has a interface compatible with ours.
To provide custom functionality, set the following attributes, or
subclass this class.
:param astbuilder: the :py:class:`ASTBuilder` or ``None``
:param typemapper: the :py:class:`minivect.minitypes.Typemapper` or
``None`` for the default.
.. attribute:: codegen_cls
The code generator class that is used to generate code.
The default is :py:class:`minivect.codegen.CodeGen`
.. attribute:: cleanup_codegen_cls
The code generator that generates code to dispose of any
garbage (e.g. intermediate object temporaries).
The default is :py:class:`minivect.codegen.CodeGenCleanup`
.. attribute:: codewriter_cls
The code writer that the code generator writes its generated code
to. This may be strings or arbitrary objects.
The default is :py:class:`minivect.minicode.CodeWriter`, which accepts
arbitrary objects.
.. attribute:: codeformatter_cls
A formatter to format the generated code.
The default is :py:class:`minivect.minicode.CodeFormatter`,
which returns a list of objects written. Set this to
:py:class:`minivect.minicode.CodeStringFormatter`
to have the strings joined together.
.. attribute:: specializer_mixin_cls
A specializer mixin class that can override or intercept
functionality. This class should likely participate
cooperatively in MI.
.. attribute:: variable_resolving_mixin_cls
A specializer mixin class that resolves wrapped miniasts in a foreign
AST. This is only needed if you are using :py:class:`NodeWrapper`,
which wraps a miniast somewhere at the leaves.
.. attribute: graphviz_cls
Visitor to generate a Graphviz graph. See the :py:module:`graphviz`
module.
.. attribute: minifunction
The current minifunction that is being translated.
Use subclass :py:class:`CContext` to get the defaults for C code generation.
"""
debug = False
debug_elements = False
use_llvm = False
optimize_llvm = True
optimize_broadcasting = True
shape_type = minitypes.Py_ssize_t.pointer()
strides_type = shape_type
astbuilder_cls = None
codegen_cls = UndocClassAttribute(codegen.VectorCodegen)
cleanup_codegen_cls = UndocClassAttribute(codegen.CodeGenCleanup)
codewriter_cls = UndocClassAttribute(minicode.CodeWriter)
codeformatter_cls = UndocClassAttribute(minicode.CodeFormatter)
graphviz_cls = UndocClassAttribute(graphviz.GraphvizGenerator)
specializer_mixin_cls = None
variable_resolving_mixin_cls = None
func_counter = 0
final_specializer = specializers.FinalSpecializer
def __init__(self):
self.init()
if self.use_llvm:
if llvm is None:
import llvm.core as llvm_py_not_available # llvm-py not available
self.llvm_module = llvm.core.Module.new('default_module')
# self.llvm_ee = llvm.ee.ExecutionEngine.new(self.llvm_module)
#self.llvm_ee = llvm.ee.EngineBuilder.new(self.llvm_module).force_jit().opt(3).create()
self.llvm_fpm = llvm.passes.FunctionPassManager.new(self.llvm_module)
self.llvm_fpm.initialize()
if not self.debug:
for llvm_pass in self.llvm_passes():
self.llvm_fpm.add(llvm_pass)
else:
self.llvm_ee = None
self.llvm_module = None
def init(self):
self.astbuilder = self.astbuilder_cls(self)
self.typemapper = minitypes.TypeMapper(self)
def run_opaque(self, astmapper, opaque_ast, specializers):
return self.run(astmapper.visit(opaque_ast), specializers)
def run(self, ast, specializer_classes, graphviz_outfile=None,
print_tree=False):
"""
Specialize the given AST with all given specializers and return
an iterable of generated code in the form of
``(specializer, new_ast, codewriter, code_obj)``
The code_obj is the generated code (e.g. a string of C code),
depending on the code formatter used.
"""
for specializer_class in specializer_classes:
self.init()
pipeline = self.pipeline(specializer_class)
specialized_ast = specializers.specialize_ast(ast)
self.astbuilder.minifunction = specialized_ast
for transform in pipeline:
specialized_ast = transform.visit(specialized_ast)
if print_tree:
specialized_ast.print_tree(self)
if graphviz_outfile is not None:
data = self.graphviz(specialized_ast)
graphviz_outfile.write(data)
codewriter = self.codewriter_cls(self)
codegen = self.codegen_cls(self, codewriter)
codegen.visit(specialized_ast)
specializer = pipeline[0]
yield (specializer, specialized_ast, codewriter,
self.codeformatter_cls().format(codewriter))
def run_simple(self, ast, specializer):
(_, _, _, code_result), = self.run(ast, [specializer])
return code_result
def debug_c(self, ast, specializer, astbuilder_cls=None):
"Generate C code (for debugging)"
context = CContext()
if astbuilder_cls:
context.astbuilder_cls = astbuilder_cls
else:
context.astbuilder_cls = self.astbuilder_cls
context.shape_type = self.shape_type
context.strides_type = self.strides_type
context.debug = self.debug
result = next(context.run(ast, [specializer]))
_, specialized_ast, _, (proto, impl) = result
return impl
def pipeline(self, specializer_class):
# add specializer mixin and run specializer
if self.specializer_mixin_cls:
specializer_class = make_cls(self.specializer_mixin_cls,
specializer_class)
specializer = specializer_class(self)
pipeline = [specializer]
# Add variable resolving mixin to the final specializer and run
# transform
final_specializer_cls = self.final_specializer
if final_specializer_cls:
if self.variable_resolving_mixin_cls:
final_specializer_cls = make_cls(
self.variable_resolving_mixin_cls,
final_specializer_cls)
pipeline.append(final_specializer_cls(self, specializer))
pipeline.append(type_promoter.TypePromoter(self))
return pipeline
def generate_disposal_code(self, code, node):
"Run the disposal code generator on an (sub)AST"
transform = self.cleanup_codegen_cls(self, code)
transform.visit(node)
#
### Override in subclasses where needed
#
def llvm_passes(self):
"Returns a list of LLVM optimization passes"
return []
return [
# llvm.passes.PASS_CFG_SIMPLIFICATION
llvm.passes.PASS_BLOCK_PLACEMENT,
llvm.passes.PASS_BASIC_ALIAS_ANALYSIS,
llvm.passes.PASS_NO_AA,
llvm.passes.PASS_SCALAR_EVOLUTION_ALIAS_ANALYSIS,
# llvm.passes.PASS_ALIAS_ANALYSIS_COUNTER,
llvm.passes.PASS_AAEVAL,
llvm.passes.PASS_LOOP_DEPENDENCE_ANALYSIS,
llvm.passes.PASS_BREAK_CRITICAL_EDGES,
llvm.passes.PASS_LOOP_SIMPLIFY,
llvm.passes.PASS_PROMOTE_MEMORY_TO_REGISTER,
llvm.passes.PASS_CONSTANT_PROPAGATION,
llvm.passes.PASS_LICM,
# llvm.passes.PASS_CONSTANT_MERGE,
llvm.passes.PASS_LOOP_STRENGTH_REDUCE,
llvm.passes.PASS_LOOP_UNROLL,
# llvm.passes.PASS_FUNCTION_ATTRS,
# llvm.passes.PASS_GLOBAL_OPTIMIZER,
# llvm.passes.PASS_GLOBAL_DCE,
llvm.passes.PASS_DEAD_CODE_ELIMINATION,
llvm.passes.PASS_INSTRUCTION_COMBINING,
llvm.passes.PASS_CODE_GEN_PREPARE,
]
def mangle_function_name(self, name):
name = "%s_%d" % (name, self.func_counter)
self.func_counter += 1
return name
def promote_types(self, type1, type2):
"Promote types in an arithmetic operation"
if type1 == type2:
return type1
return self.typemapper.promote_types(type1, type2)
def getchildren(self, node):
"Implement to allow a minivisitor.Visitor over a foreign AST."
return node.child_attrs
def getpos(self, opaque_node):
"Get the position of a foreign node"
filename, line, col = opaque_node.pos
return Position(filename, line, col)
def gettype(self, opaque_node):
"Get a type of a foreign node"
return opaque_node.type
def may_error(self, opaque_node):
"Return whether this node may result in an exception."
raise NotImplementedError
def declare_type(self, type):
"Return a declaration for a type"
raise NotImplementedError
def to_llvm(self, type):
"Return an LLVM type for the given minitype"
return self.typemapper.to_llvm(type)
def graphviz(self, node, graphviz_name="AST"):
visitor = self.graphviz_cls(self, graphviz_name)
graphviz_graph = visitor.visit(node)
return graphviz_graph.to_string()
def is_object(self, type):
return isinstance(type, minitypes.ObjectType)
class CContext(Context):
"Set defaults for C code generation."
codegen_cls = codegen.VectorCodegen
codewriter_cls = minicode.CCodeWriter
codeformatter_cls = minicode.CCodeStringFormatter
class LLVMContext(Context):
"Context with default for LLVM code generation"
use_llvm = True
codegen_cls = llvm_codegen.LLVMCodeGen
class ASTBuilder(object):
"""
This class is used to build up a minivect AST. It can be used by a user
from a transform or otherwise, but the important bit is that we use it
in our code to build up an AST that can be overridden by the user,
and which makes it convenient to build up complex ASTs concisely.
"""
# the 'pos' attribute is set for each visit to each node by
# the ASTMapper
pos = None
temp_reprname_counter = 0
def __init__(self, context):
"""
:param context: the :py:class:`Context`
"""
self.context = context
def _infer_type(self, value):
"Used to infer types for self.constant()"
if isinstance(value, (int, long)):
return minitypes.IntType()
elif isinstance(value, float):
return minitypes.FloatType()
elif isinstance(value, str):
return minitypes.CStringType()
else:
raise minierror.InferTypeError()
def create_function_type(self, function, strides_args=True):
arg_types = []
for arg in function.arguments + function.scalar_arguments:
if arg.used:
if arg.type and arg.type.is_array and not strides_args:
arg_types.append(arg.data_pointer.type)
arg.variables = [arg.data_pointer]
else:
for variable in arg.variables:
arg_types.append(variable.type)
function.type = minitypes.FunctionType(
return_type=function.success_value.type, args=arg_types)
def function(self, name, body, args, shapevar=None, posinfo=None,
omp_size=None):
"""
Create a new function.
:type name: str
:param name: name of the function
:type args: [:py:class:`FunctionArgument`]
:param args: all array and scalar arguments to the function, excluding
shape or position information.
:param shapevar: the :py:class:`Variable` for the total broadcast shape
If ``None``, a default of ``Py_ssize_t *`` is assumed.
:type posinfo: :py:class:`FunctionArgument`
:param posinfo: if given, this will be the second, third and fourth
arguments to the function ``(filename, lineno, column)``.
"""
if shapevar is None:
shapevar = self.variable(self.context.shape_type, 'shape')
arguments, scalar_arguments = [], []
for arg in args:
if arg.type.is_array:
arguments.append(arg)
else:
scalar_arguments.append(arg)
arguments.insert(0, self.funcarg(shapevar))
if posinfo:
arguments.insert(1, posinfo)
body = self.stats(self.nditerate(body))
error_value = self.constant(-1)
success_value = self.constant(0)
function = FunctionNode(self.pos, name, body,
arguments, scalar_arguments,
shapevar, posinfo,
error_value=error_value,
success_value=success_value,
omp_size=omp_size or self.constant(1024))
# prepending statements, used during specialization
function.prepending = self.stats()
function.body = self.stats(function.prepending, function.body)
self.create_function_type(function)
return function
def build_function(self, variables, body, name=None, shapevar=None):
"Convenience method for building a minivect function"
args = []
for var in variables:
if var.type.is_array:
args.append(self.array_funcarg(var))
else:
args.append(self.funcarg(var))
name = name or 'function'
return self.function(name, body, args, shapevar=shapevar)
def function_from_numpy(self, name, ast, miniargs):
"""
Build a minivect function from the given ast and arguments. The
function takes a pointer to a numpy shape (npy_intp *).
"""
type = minitypes.npy_intp.pointer()
shape_variable = self.variable(type, 'shape')
return self.function(name, ast, miniargs, shapevar=shape_variable)
def funcarg(self, variable, *variables):
"""
Create a (compound) function argument consisting of one or multiple
argument Variables.
"""
if variable.type is not None and variable.type.is_array:
assert not variables
return self.array_funcarg(variable)
if not variables:
variables = [variable]
return FunctionArgument(self.pos, variable, list(variables))
def array_funcarg(self, variable):
"Create an array function argument"
return ArrayFunctionArgument(
self.pos, variable.type, name=variable.name,
variable=variable,
data_pointer=self.data_pointer(variable),
#shape_pointer=self.shapevar(variable),
strides_pointer=self.stridesvar(variable))
def incref(self, var, funcname='Py_INCREF'):
"Generate a Py_INCREF() statement"
functype = minitypes.FunctionType(return_type=minitypes.void,
args=[minitypes.object_])
py_incref = self.funcname(functype, funcname)
return self.expr_stat(self.funccall(py_incref, [var]))
def decref(self, var):
"Generate a Py_DECCREF() statement"
return self.incref(var, funcname='Py_DECREF')
def print_(self, *args):
"Print out all arguments to stdout"
return PrintNode(self.pos, args=list(args))
def funccall(self, func_or_pointer, args, inline=False):
"""
Generate a call to the given function (a :py:class:`FuncNameNode`) of
:py:class:`minivect.minitypes.function` or a
pointer to a function type and the given arguments.
"""
type = func_or_pointer.type
if type.is_pointer:
type = func_or_pointer.type.base_type
return FuncCallNode(self.pos, type.return_type,
func_or_pointer=func_or_pointer, args=args,
inline=inline)
def funcname(self, type, name, is_external=True):
assert type.is_function
return FuncNameNode(self.pos, type, name=name, is_external=is_external)
def nditerate(self, body):
"""
This node wraps the given AST expression in an :py:class:`NDIterate`
node, which will be expanded by the specializers to one or several
loops.
"""
return NDIterate(self.pos, body)
def for_(self, body, init, condition, step, index=None):
"""
Create a for loop node.
:param body: loop body
:param init: assignment expression
:param condition: boolean loop condition
:param step: step clause (assignment expression)
"""
return ForNode(self.pos, init, condition, step, body, index=index)
def for_range_upwards(self, body, upper, lower=None, step=None):
"""
Create a single upwards for loop, typically used from a specializer to
replace an :py:class:`NDIterate` node.
:param body: the loop body
:param upper: expression specifying an upper bound
"""
index_type = upper.type.unqualify("const")
if lower is None:
lower = self.constant(0, index_type)
if step is None:
step = self.constant(1, index_type)
temp = self.temp(index_type)
init = self.assign_expr(temp, lower)
condition = self.binop(minitypes.bool_, '<', temp, upper)
step = self.assign_expr(temp, self.add(temp, step))
result = self.for_(body, init, condition, step)
result.target = temp
return result
def omp_for(self, for_node, if_clause):
"""
Annotate the for loop with an OpenMP parallel for clause.
:param if_clause: the expression node that determines whether the
parallel section is executed or whether it is
executed sequentially (to avoid synchronization
overhead)
"""
if isinstance(for_node, PragmaForLoopNode):
for_node = for_node.for_node
return OpenMPLoopNode(self.pos, for_node=for_node,
if_clause=if_clause,
lastprivates=[for_node.init.lhs],
privates=[])
def omp_if(self, if_body, else_body=None):
return OpenMPConditionalNode(self.pos, if_body=if_body,
else_body=else_body)
def pragma_for(self, for_node):
"""
Annotate the for loop with pragmas.
"""
return PragmaForLoopNode(self.pos, for_node=for_node)
def stats(self, *statements):
"""
Wrap a bunch of statements in an AST node.
"""
return StatListNode(self.pos, list(statements))
def expr_stat(self, expr):
"Turn an expression into a statement"
return ExprStatNode(expr.pos, type=expr.type, expr=expr)
def expr(self, stats=(), expr=None):
"Evaluate a bunch of statements before evaluating an expression."
return ExprNodeWithStatement(self.pos, type=expr.type,
stat=self.stats(*stats), expr=expr)
def if_(self, cond, body):
"If statement"
return self.if_else(cond, body, None)
def if_else_expr(self, cond, lhs, rhs):
"If/else expression, resulting in lhs if cond else rhs"
type = self.context.promote_types(lhs.type, rhs.type)
return IfElseExprNode(self.pos, type=type, cond=cond, lhs=lhs, rhs=rhs)
def if_else(self, cond, if_body, else_body):
return IfNode(self.pos, cond=cond, body=if_body, else_body=else_body)
def promote(self, dst_type, node):
"Promote or demote the node to the given dst_type"
if node.type != dst_type:
if node.is_constant and node.type.kind == dst_type.kind:
node.type = dst_type
return node
return PromotionNode(self.pos, dst_type, node)
return node
def binop(self, type, op, lhs, rhs):
"""
Binary operation on two nodes.
:param type: the result type of the expression
:param op: binary operator
:type op: str
"""
return BinopNode(self.pos, type, op, lhs, rhs)
def add(self, lhs, rhs, result_type=None, op='+'):
"""
Shorthand for the + binop. Filters out adding 0 constants.
"""
if lhs.is_constant and lhs.value == 0:
return rhs
elif rhs.is_constant and rhs.value == 0:
return lhs
if result_type is None:
result_type = self.context.promote_types(lhs.type, rhs.type)
return self.binop(result_type, op, lhs, rhs)
def sub(self, lhs, rhs, result_type=None):
return self.add(lhs, rhs, result_type, op='-')
def mul(self, lhs, rhs, result_type=None, op='*'):
"""
Shorthand for the * binop. Filters out multiplication with 1 constants.
"""
if op == '*' and lhs.is_constant and lhs.value == 1:
return rhs
elif rhs.is_constant and rhs.value == 1:
return lhs
if result_type is None:
result_type = self.context.promote_types(lhs.type, rhs.type)
return self.binop(result_type, op, lhs, rhs)
def div(self, lhs, rhs, result_type=None):
return self.mul(lhs, rhs, result_type=result_type, op='/')
def min(self, lhs, rhs):
"""
Returns min(lhs, rhs) expression.
.. NOTE:: Make lhs and rhs temporaries if they should only be
evaluated once.
"""
type = self.context.promote_types(lhs.type, rhs.type)
cmp_node = self.binop(type, '<', lhs, rhs)
return self.if_else_expr(cmp_node, lhs, rhs)
def index(self, pointer, index, dest_pointer_type=None):
"""
Index a pointer with the given index node.
:param dest_pointer_type: if given, cast the result (*after* adding
the index) to the destination type and
dereference.
"""
if dest_pointer_type:
return self.index_multiple(pointer, [index], dest_pointer_type)
return SingleIndexNode(self.pos, pointer.type.base_type,
pointer, index)
def index_multiple(self, pointer, indices, dest_pointer_type=None):
"""
Same as :py:meth:`index`, but accepts multiple indices. This is
useful e.g. after multiplication of the indices with the strides.
"""
for index in indices:
pointer = self.add(pointer, index)
if dest_pointer_type is not None:
pointer = self.cast(pointer, dest_pointer_type)
return self.dereference(pointer)
def assign_expr(self, node, value, may_reorder=False):
"Create an assignment expression assigning ``value`` to ``node``"
assert node is not None
if not isinstance(value, Node):
value = self.constant(value)
return AssignmentExpr(self.pos, node.type, node, value,
may_reorder=may_reorder)
def assign(self, node, value, may_reorder=False):
"Assignment statement"
expr = self.assign_expr(node, value, may_reorder=may_reorder)
return self.expr_stat(expr)
def dereference(self, pointer):
"Dereference a pointer"
return DereferenceNode(self.pos, pointer.type.base_type, pointer)
def unop(self, type, operator, operand):
"Unary operation. ``type`` indicates the result type of the expression."
return UnopNode(self.pos, type, operator, operand)
def coerce_to_temp(self, expr):
"Coerce the given expression to a temporary"
type = expr.type
if type.is_array:
type = type.dtype
temp = self.temp(type)
return self.expr(stats=[self.assign(temp, expr)], expr=temp)
def temp(self, type, name=None):
"Allocate a temporary of a given type"
name = name or 'temp'
repr_name = '%s%d' % (name.rstrip(string.digits),
self.temp_reprname_counter)
self.temp_reprname_counter += 1
return TempNode(self.pos, type, name=name, repr_name=repr_name)
def constant(self, value, type=None):
"""
Create a constant from a Python value. If type is not given, it is
inferred (or it will raise a
:py:class:`minivect.minierror.InferTypeError`).
"""
if type is None:
type = self._infer_type(value)
return ConstantNode(self.pos, type, value)
def variable(self, type, name):
"""
Create a variable with a name and type. Variables
may refer to function arguments, functions, etc.
"""
return Variable(self.pos, type, name)
def resolved_variable(self, array_type, name, element):
"""
Creates a node that keeps the array operand information such as the
original array type, but references an actual element in the array.
:param type: original array type
:param name: original array's name
:param element: arbitrary expression that resolves some element in the
array
"""
return ResolvedVariable(self.pos, element.type, name,
element=element, array_type=array_type)
def cast(self, node, dest_type):
"Cast node to the given destination type"
return CastNode(self.pos, dest_type, node)
def return_(self, result):
"Return a result"
return ReturnNode(self.pos, result)
def data_pointer(self, variable):
"Return the data pointer of an array variable"
assert variable.type.is_array
return DataPointer(self.pos, variable.type.dtype.pointer(),
variable)
def shape_index(self, index, function):
"Index the shape of the array operands with integer `index`"
return self.index(function.shape, self.constant(index))
def extent(self, variable, index, function):
"Index the shape of a specific variable with integer `index`"
assert variable.type.is_array
offset = function.ndim - variable.type.ndim
return self.index(function.shape, self.constant(index + offset))
def stridesvar(self, variable):
"Return the strides variable for the given array operand"
return StridePointer(self.pos, self.context.strides_type, variable)
def stride(self, variable, index):
"Return the stride of array operand `variable` at integer `index`"
return self.index(self.stridesvar(variable), self.constant(index))
def sizeof(self, type):
"Return the expression sizeof(type)"
return SizeofNode(self.pos, minitypes.size_t, sizeof_type=type)
def jump(self, label):
"Jump to a label"
return JumpNode(self.pos, label)
def jump_target(self, label):
"""
Return a target that can be jumped to given a label. The label is
shared between the jumpers and the target.
"""
return JumpTargetNode(self.pos, label)
def label(self, name):
"Return a label with a name"
return LabelNode(self.pos, name)
def raise_exc(self, posinfo, exc_var, msg_val, fmt_args):
"""
Raise an exception given the positional information (see the `posinfo`
method), the exception type (PyExc_*), a formatted message string and
a list of values to be used for the format string.
"""
return RaiseNode(self.pos, posinfo, exc_var, msg_val, fmt_args)
def posinfo(self, posvars):
"""
Return position information given a list of position variables
(filename, lineno, column). This can be used for raising exceptions.
"""
return PositionInfoNode(self.pos, posinfo=posvars)
def error_handler(self, node):
"""
Wrap the given node, which may raise exceptions, in an error handler.
An error handler allows the code to clean up before propagating the
error, and finally returning an error indicator from the function.
"""
return ErrorHandler(self.pos, body=node,
error_label=self.label('error'),
cleanup_label=self.label('cleanup'))
def wrap(self, opaque_node, specialize_node_callback, **kwds):
"""
Wrap a node and type and return a NodeWrapper node. This node
will have to be handled by the caller in a code generator. The
specialize_node_callback is called when the NodeWrapper is
specialized by a Specializer.
"""
type = minitypes.TypeWrapper(self.context.gettype(opaque_node),
self.context)
return NodeWrapper(self.context.getpos(opaque_node), type,
opaque_node, specialize_node_callback, **kwds)
#
### Vectorization Functionality
#
def _vector_type(self, base_type, size):
return minitypes.VectorType(element_type=base_type, vector_size=size)
def vector_variable(self, variable, size):
"Return a vector variable for a data pointer variable"
type = self._vector_type(variable.type.dtype, size)
if size == 4:
name = 'xmm_%s' % variable.name
else:
name = 'ymm_%s' % variable.name
return VectorVariable(self.pos, type, name, variable=variable)
def vector_load(self, data_pointer, size):
"Load a SIMD vector of size `size` given an array operand variable"
type = self._vector_type(data_pointer.type.base_type, size)
return VectorLoadNode(self.pos, type, data_pointer, size=size)
def vector_store(self, data_pointer, vector_expr):
"Store a SIMD vector of size `size`"
assert data_pointer.type.base_type == vector_expr.type.element_type
return VectorStoreNode(self.pos, None, "=", data_pointer, vector_expr)
def vector_binop(self, operator, lhs, rhs):
"Perform a binary SIMD operation between two operands of the same type"
assert lhs.type == rhs.type, (lhs.type, rhs.type)
type = lhs.type
return VectorBinopNode(self.pos, type, operator, lhs=lhs, rhs=rhs)
def vector_unop(self, type, operator, operand):
return VectorUnopNode(self.pos, type, operator, operand)
def vector_const(self, type, constant):
return ConstantVectorNode(self.pos, type, constant=constant)
def noop_expr(self):
return NoopExpr(self.pos, type=None)
class DynamicArgumentASTBuilder(ASTBuilder):
"""
Create a function with a dynamic number of arguments. This means the
signature looks like
func(int *shape, float *data[n_ops], int *strides[n_ops])
To create minivect kernels supporting this signature, set the
astbuilder_cls attribute of Context to this class.
"""
def data_pointer(self, variable):
if not hasattr(variable, 'data_pointer'):
temp = self.temp(variable.type.dtype.pointer(),
variable.name + "_data_temp")
variable.data_pointer = temp
return variable.data_pointer
def _create_data_pointer(self, function, argument, i):
variable = argument.variable
temp = self.data_pointer(variable)
p = self.index(function.data_pointers, self.constant(i))
p = self.cast(p, variable.type.dtype.pointer())
assmt = self.assign(temp, p)
function.body.stats.insert(0, assmt)
return temp
def stridesvar(self, variable):
"Return the strides variable for the given array operand"
if not hasattr(variable, 'strides_pointer'):
temp = self.temp(self.context.strides_type,
variable.name + "_stride_temp")
variable.strides_pointer = temp
return variable.strides_pointer
def _create_strides_pointer(self, function, argument, i):
variable = argument.variable
temp = self.stridesvar(variable)
strides = self.index(function.strides_pointers, self.constant(i))
function.body.stats.insert(0, self.assign(temp, strides))
return temp
def function(self, name, body, args, shapevar=None, posinfo=None,
omp_size=None):
function = super(DynamicArgumentASTBuilder, self).function(
name, body, args, shapevar, posinfo, omp_size)
function.data_pointers = self.variable(
minitypes.void.pointer().pointer(), 'data_pointers')
function.strides_pointers = self.variable(
function.shape.type.pointer(), 'strides_pointer')
i = len(function.arrays) - 1
for argument in function.arrays[::-1]:
data_p = self._create_data_pointer(function, argument, i)
strides_p = self._create_strides_pointer(function, argument, i)
argument.data_pointer = data_p
argument.strides_pointer = strides_p
argument.used = False
i -= 1
argpos = 1
if posinfo:
argpos = 4
function.arguments.insert(argpos,
self.funcarg(function.strides_pointers))
function.arguments.insert(argpos,
self.funcarg(function.data_pointers))
self.create_function_type(function)
# print function.type
# print self.context.debug_c(
# function, specializers.StridedSpecializer, type(self))
return function
Context.astbuilder_cls = UndocClassAttribute(ASTBuilder)
class Position(object):
"Each node has a position which is an instance of this type."
def __init__(self, filename, line, col):
self.filename = filename
self.line = line
self.col = col
def __str__(self):
return "%s:%d:%d" % (self.filename, self.line, self.col)
class Node(miniutils.ComparableObjectMixin):
"""
Base class for AST nodes.
"""
is_expression = False
is_statlist = False
is_constant = False
is_assignment = False
is_unop = False
is_binop = False
is_node_wrapper = False
is_data_pointer = False
is_jump = False
is_label = False
is_temp = False
is_statement = False
is_sizeof = False
is_variable = False
is_function = False
is_funcarg = False
is_array_funcarg = False
is_specialized = False
child_attrs = []
def __init__(self, pos, **kwds):
self.pos = pos
vars(self).update(kwds)
def may_error(self, context):
"""
Return whether something may go wrong and we need to jump to an
error handler.
"""
visitor = minivisitor.MayErrorVisitor(context)
visitor.visit(self)
return visitor.may_error
def print_tree(self, context):
visitor = minivisitor.PrintTree(context)
visitor.visit(self)
@property
def children(self):
return [getattr(self, attr) for attr in self.child_attrs
if getattr(self, attr) is not None]
@property
def comparison_objects(self):
type = getattr(self, 'type', None)
if type is None:
return self.children
return tuple(self.children) + (type,)
def __eq__(self, other):
# Don't use isinstance here, compare on exact type to be consistent
# with __hash__. Override where sensible
return (isinstance(self, type(other)) and
self.comparison_objects == other.comparison_objects)
def __hash__(self):
h = hash(type(self))
for obj in self.comparison_objects:
h = h ^ hash(obj)
return h
class ExprNode(Node):
"Base class for expressions. Each node has a type."
is_expression = True
hoistable = False
need_temp = False
def __init__(self, pos, type, **kwds):
super(ExprNode, self).__init__(pos, **kwds)
self.type = type
class FunctionNode(Node):
"""
Function node. error_value and success_value are returned in case of
exceptions and success respectively.
.. attribute:: shape
the broadcast shape for all operands
.. attribute:: ndim
the ndim of the total broadcast' shape
.. attribute:: arguments
all array arguments
.. attribute:: scalar arguments
all non-array arguments
.. attribute:: posinfo
the position variables we can write to in case of an exception
.. attribute:: omp_size
the threshold of minimum data size needed before starting a parallel
section. May be overridden at any time before specialization time.
"""
is_function = True
child_attrs = ['body', 'arguments', 'scalar_arguments']
def __init__(self, pos, name, body, arguments, scalar_arguments,
shape, posinfo, error_value, success_value, omp_size):
super(FunctionNode, self).__init__(pos)
self.type = None # see ASTBuilder.create_function_type
self.name = name
self.body = body
self.arrays = [arg for arg in arguments if arg.type and arg.type.is_array]
self.arguments = arguments
self.scalar_arguments = scalar_arguments
self.shape = shape
self.posinfo = posinfo
self.error_value = error_value
self.success_value = success_value
self.omp_size = omp_size
self.args = dict((v.name, v) for v in arguments)
self.ndim = max(arg.type.ndim for arg in arguments
if arg.type and arg.type.is_array)
class FuncCallNode(ExprNode):
"""
Call a function given a pointer or its name (FuncNameNode)
"""
inline = False
child_attrs = ['func_or_pointer', 'args']
class FuncNameNode(ExprNode):
"""
Load an external function by its name.
"""
name = None
class ReturnNode(Node):
"Return an operand"
child_attrs = ['operand']
def __init__(self, pos, operand):
super(ReturnNode, self).__init__(pos)
self.operand = operand
class RaiseNode(Node):
"Raise a Python exception. The callee must hold the GIL."
child_attrs = ['posinfo', 'exc_var', 'msg_val', 'fmt_args']
def __init__(self, pos, posinfo, exc_var, msg_val, fmt_args):
super(RaiseNode, self).__init__(pos)
self.posinfo = posinfo
self.exc_var, self.msg_val, self.fmt_args = (exc_var, msg_val, fmt_args)
class PositionInfoNode(Node):
"""
Node that holds a position of where an error occurred. This position
needs to be returned to the callee if the callee supports it.
"""
class FunctionArgument(ExprNode):
"""
Argument to the FunctionNode. Array arguments contain multiple
actual arguments, e.g. the data and stride pointer.
.. attribute:: variable
some argument to the function (array or otherwise)
.. attribute:: variables
the actual variables this operand should be unpacked into
"""
child_attrs = ['variables']
if_funcarg = True
used = True
def __init__(self, pos, variable, variables):
super(FunctionArgument, self).__init__(pos, variable.type)
self.variables = variables
self.variable = variable
self.name = variable.name
self.args = dict((v.name, v) for v in variables)
class ArrayFunctionArgument(ExprNode):
"Array operand to the function"
child_attrs = ['data_pointer', 'strides_pointer']
is_array_funcarg = True
used = True
def __init__(self, pos, type, data_pointer, strides_pointer, **kwargs):
super(ArrayFunctionArgument, self).__init__(pos, type, **kwargs)
self.data_pointer = data_pointer
self.strides_pointer = strides_pointer
self.variables = [data_pointer, strides_pointer]
class PrintNode(Node):
"Print node for some arguments"
child_attrs = ['args']
class NDIterate(Node):
"""
Iterate in N dimensions. See :py:class:`ASTBuilder.nditerate`
"""
child_attrs = ['body']
def __init__(self, pos, body):
super(NDIterate, self).__init__(pos)
self.body = body
class ForNode(Node):
"""
A for loop, see :py:class:`ASTBuilder.for_`
"""
child_attrs = ['init', 'condition', 'step', 'body']
is_controlling_loop = False
is_tiling_loop = False
should_vectorize = False
is_fixup = False
def __init__(self, pos, init, condition, step, body, index=None):
super(ForNode, self).__init__(pos)
self.init = init
self.condition = condition
self.step = step
self.body = body
self.index = index or init.lhs
class IfNode(Node):
"An 'if' statement, see A for loop, see :py:class:`ASTBuilder.if_`"
child_attrs = ['cond', 'body', 'else_body']
should_vectorize = False
is_fixup = False
class StatListNode(Node):
"""
A node to wrap multiple statements, see :py:class:`ASTBuilder.stats`
"""
child_attrs = ['stats']
is_statlist = True
def __init__(self, pos, statements):
super(StatListNode, self).__init__(pos)
self.stats = statements
class ExprStatNode(Node):
"Turn an expression into a statement, see :py:class:`ASTBuilder.expr_stat`"
child_attrs = ['expr']
is_statement = True
class ExprNodeWithStatement(Node):
child_attrs = ['stat', 'expr']
class NodeWrapper(ExprNode):
"""
Adapt an opaque node to provide a consistent interface. This has to be
handled by the user's specializer. See :py:class:`ASTBuilder.wrap`
"""
is_node_wrapper = True
is_constant_scalar = False
child_attrs = []
def __init__(self, pos, type, opaque_node, specialize_node_callback,
**kwds):
super(NodeWrapper, self).__init__(pos, type)
self.opaque_node = opaque_node
self.specialize_node_callback = specialize_node_callback
vars(self).update(kwds)
def __hash__(self):
return hash(self.opaque_node)
def __eq__(self, other):
if getattr(other, 'is_node_wrapper ', False):
return self.opaque_node == other.opaque_node
return NotImplemented
def __deepcopy__(self, memo):
kwds = dict(vars(self))
kwds.pop('opaque_node')
kwds.pop('specialize_node_callback')
kwds = copy.deepcopy(kwds, memo)
opaque_node = self.specialize_node_callback(self, memo)
return type(self)(opaque_node=opaque_node,
specialize_node_callback=self.specialize_node_callback,
**kwds)
class BinaryOperationNode(ExprNode):
"Base class for binary operations"
child_attrs = ['lhs', 'rhs']
def __init__(self, pos, type, lhs, rhs, **kwds):
super(BinaryOperationNode, self).__init__(pos, type, **kwds)
self.lhs, self.rhs = lhs, rhs
class BinopNode(BinaryOperationNode):
"Node for binary operations"
is_binop = True
def __init__(self, pos, type, operator, lhs, rhs, **kwargs):
super(BinopNode, self).__init__(pos, type, lhs, rhs, **kwargs)
self.operator = operator
@property
def comparison_objects(self):
return (self.operator, self.lhs, self.rhs)
class SingleOperandNode(ExprNode):
"Base class for operations with one operand"
child_attrs = ['operand']
def __init__(self, pos, type, operand, **kwargs):
super(SingleOperandNode, self).__init__(pos, type, **kwargs)
self.operand = operand
class AssignmentExpr(BinaryOperationNode):
is_assignment = True
class IfElseExprNode(ExprNode):
child_attrs = ['cond', 'lhs', 'rhs']
class PromotionNode(SingleOperandNode):
pass
class UnopNode(SingleOperandNode):
is_unop = True
def __init__(self, pos, type, operator, operand, **kwargs):
super(UnopNode, self).__init__(pos, type, operand, **kwargs)
self.operator = operator
@property
def comparison_objects(self):
return (self.operator, self.operand)
class CastNode(SingleOperandNode):
is_cast = True
class DereferenceNode(SingleOperandNode):
is_dereference = True
class SingleIndexNode(BinaryOperationNode):
is_index = True
class ConstantNode(ExprNode):
is_constant = True
def __init__(self, pos, type, value):
super(ConstantNode, self).__init__(pos, type)
self.value = value
class SizeofNode(ExprNode):
is_sizeof = True
class Variable(ExprNode):
"""
Represents use of a function argument in the function.
"""
is_variable = True
mangled_name = None
hoisted = False
def __init__(self, pos, type, name, **kwargs):
super(Variable, self).__init__(pos, type, **kwargs)
self.name = name
self.array_type = None
def __eq__(self, other):
return isinstance(other, Variable) and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedVariable(Variable):
child_attrs = ['element']
def __eq__(self, other):
return (isinstance(other, ResolvedVariable) and
self.element == other.element)
class ArrayAttribute(Variable):
"Denotes an attribute of array operands, e.g. the data or stride pointers"
def __init__(self, pos, type, arrayvar):
super(ArrayAttribute, self).__init__(pos, type,
arrayvar.name + self._name)
self.arrayvar = arrayvar
class DataPointer(ArrayAttribute):
"Reference to the start of an array operand"
_name = '_data'
class StridePointer(ArrayAttribute):
"Reference to the stride pointer of an array variable operand"
_name = '_strides'
#class ShapePointer(ArrayAttribute):
# "Reference to the shape pointer of an array operand."
# _name = '_shape'
class TempNode(Variable):
"A temporary of a certain type"
is_temp = True
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(id(self))
class OpenMPLoopNode(Node):
"""
Execute a loop in parallel.
"""
child_attrs = ['for_node', 'if_clause', 'lastprivates', 'privates']
class OpenMPConditionalNode(Node):
"""
Execute if_body if _OPENMP, otherwise execute else_body.
"""
child_attrs = ['if_body', 'else_body']
class PragmaForLoopNode(Node):
"""
Generate compiler-specific pragmas to aid things like SIMDization.
"""
child_attrs = ['for_node']
class ErrorHandler(Node):
"""
A node to handle errors. If there is an error handler in the outer scope,
the specializer will first make this error handler generate disposal code
for the wrapped AST body, and then jump to the error label of the parent
error handler. At the outermost (function) level, the error handler simply
returns an error indication.
.. attribute:: error_label
point to jump to in case of an error
.. attribute:: cleanup_label
point to jump to in the normal case
It generates the following:
.. code-block:: c
error_var = 0;
...
goto cleanup;
error:
error_var = 1;
cleanup:
...
if (error_var)
goto outer_error_label;
"""
child_attrs = ['error_var_init', 'body', 'cleanup_jump',
'error_target_label', 'error_set', 'cleanup_target_label',
'cascade']
error_var_init = None
cleanup_jump = None
error_target_label = None
error_set = None
cleanup_target_label = None
cascade = None
class JumpNode(Node):
"A jump to a jump target"
child_attrs = ['label']
def __init__(self, pos, label):
Node.__init__(self, pos)
self.label = label
class JumpTargetNode(JumpNode):
"A point to jump to"
class LabelNode(ExprNode):
"A goto label or memory address that we can jump to"
def __init__(self, pos, name):
super(LabelNode, self).__init__(pos, None)
self.name = name
self.mangled_name = None
class NoopExpr(ExprNode):
"Do nothing expression"
#
### Vectorization Functionality
#
class VectorVariable(Variable):
child_attrs = ['variable']
class VectorLoadNode(SingleOperandNode):
"Load a SIMD vector"
class VectorStoreNode(BinopNode):
"Store a SIMD vector"
class VectorBinopNode(BinopNode):
"Binary operation on SIMD vectors"
class VectorUnopNode(SingleOperandNode):
"Unary operation on SIMD vectors"
class ConstantVectorNode(ExprNode):
"Load the constant into the vector register"
########NEW FILE########
__FILENAME__ = minicode
# -*- coding: utf-8 -*-
"""
Code writers and formatters. Subclass CodeWriter to suit the needs of
a certain code generator backend.
"""
from __future__ import print_function, division, absolute_import
try:
from Cython.Compiler import Tempita as tempita
except ImportError:
try:
import tempita
except ImportError:
tempita = None
class CodeWriter(object):
"""
Write code as objects for later assembly.
.. attribute:: loop_levels
CodeWriter objects just before the start of each loop
.. attribute:: tiled_loop_levels
same as loop_levels, but takes into account tiled loop patterns
.. attribute:: cleanup_levels
CodeWriter objects just after the end of each loop
.. attribute:: declaration_levels
same as loop_levels, but a valid insertion point for C89 declarations
"""
error_handler = None
def __init__(self, context, buffer=None):
self.buffer = buffer or _CodeTree()
self.context = context
self.loop_levels = []
self.tiled_loop_levels = []
self.declaration_levels = []
@classmethod
def clone(cls, other, context, buffer):
return cls(context, buffer)
def insertion_point(self):
"""
Create an insertion point for the code writer. Any code written
to this insertion point (later on) is inserted in the output code at
the point where this method was called.
"""
result = self.clone(self, self.context, self.buffer.insertion_point())
result.loop_levels = list(self.loop_levels)
result.tiled_loop_levels = list(self.tiled_loop_levels)
result.declaration_levels = list(self.declaration_levels)
return result
def write(self, value):
self.buffer.output.append(value)
def put_label(self, label):
"Insert a label in the code"
self.write(label)
def put_goto(self, label):
"Jump to a label. Implement in subclasses"
class CCodeWriter(CodeWriter):
"""
Code writer to write C code. Has both a prototype buffer and an
implementation buffer. The prototype buffer will contain the C
prototypes, and the implementation buffer the actual function
code.
"""
def __init__(self, context, buffer=None, proto_code=None):
super(CCodeWriter, self).__init__(context, buffer)
if proto_code is None:
self.proto_code = type(self)(context, proto_code=False)
self.indent = 0
def put_label(self, label):
"Insert a C label"
self.putln('%s:' % self.mangle(label.name))
def put_goto(self, label):
"Jump to (goto) a label"
self.putln("goto %s;" % self.mangle(label.name))
def putln(self, s):
"Write a code string as a line. Also performs indentation"
self.indent -= s.count('}')
self.write("%s%s\n" % (self.indent * ' ', s))
self.indent += s.count('{')
def mangle(self, s):
"Mangle symbol names"
return "__mini_mangle_%s" % s
@classmethod
def clone(cls, other, context, buffer):
result = super(CCodeWriter, cls).clone(other, context, buffer)
result.indent = other.indent
return result
def sub_tempita(s, context, file=None, name=None):
"Run the tempita template engine on string the given string."
if not s:
return None
if file:
context['__name'] = "%s:%s" % (file, name)
elif name:
context['__name'] = name
if tempita is None:
raise RuntimeError("Tempita was not installed")
return tempita.sub(s, **context)
class TempitaCodeWriter(CodeWriter):
"""
Code writer which supports writing Tempita strings. See
http://pythonpaste.org/tempita/ for documentation on Tempita.
"""
def putln(self, string, context_dict):
self.write(sub_tempita(string) + '\n')
class CodeFormatter(object):
"""
Default code formatting, which returns the formatted code as a list
of objects (the ones written to the :py:class:`minivect.codegen.CodeWriter`)
"""
def format(self, codewriter):
return codewriter.buffer.getvalue()
class CodeStringFormatter(CodeFormatter):
"Format code as strings"
def format(self, codewriter):
return "".join(codewriter.buffer.getvalue())
class CCodeStringFormatter(CodeStringFormatter):
"Format the prototype and code implementation"
def format(self, codewriter):
return ("".join(codewriter.proto_code.buffer.getvalue()),
"".join(codewriter.buffer.getvalue()))
class _CodeTree(object):
"""
See Cython/StringIOTree
"""
def __init__(self, output=None, condition=None):
self.prepended_children = []
self.output = output or []
def _getvalue(self, result):
for child in self.prepended_children:
child._getvalue(result)
result.extend(self.output)
def getvalue(self):
result = []
self._getvalue(result)
return result
def clone(self, output=None):
return type(self)(output)
def commit(self):
if self.output:
self.prepended_children.append(self.clone(self.output))
self.output = []
def insertion_point(self):
self.commit()
ip = self.clone()
self.prepended_children.append(ip)
return ip
########NEW FILE########
__FILENAME__ = minierror
# -*- coding: utf-8 -*-
"""
Define some errors that may be raised by the compiler.
"""
from __future__ import print_function, division, absolute_import
class Error(Exception):
"Base exception class"
def __repr__(self):
return '%s()' % type(self).__name__
class InferTypeError(Error):
"Raised when types of values cannot be inferred"
class UnmappableTypeError(Error):
"Raised when a type cannot be mapped"
class UnpromotableTypeError(Error):
"Raised when the compiler does not know how to promote two types."
class UnmappableFormatSpecifierError(Error):
"Raised when a type cannot be mapped to a (printf) format specifier"
class InvalidTypeSpecification(Error):
"Raised when a type is sliced incorrectly."
class CompileError(Error):
"Raised for miscellaneous errors"
def __init__(self, node, msg):
self.node = node
self.msg = msg
def __str__(self):
if self.node.pos is not None:
return "%s:%s:%s: %s" % self.node.pos + self.msg
return self.msg
########NEW FILE########
__FILENAME__ = minitypes
# -*- coding: utf-8 -*-
"""
This module provides a minimal type system, and ways to promote types, as
well as ways to convert to an LLVM type system. A set of predefined types are
defined. Types may be sliced to turn them into array types, in the same way
as the memoryview syntax.
>>> char
char
>>> int8[:, :, :]
int8[:, :, :]
>>> int8.signed
True
>>> uint8
uint8
>>> uint8.signed
False
>>> char.pointer()
char *
>>> int_[:, ::1]
int[:, ::1]
>>> int_[::1, :]
int[::1, :]
>>> double[:, ::1, :]
Traceback (most recent call last):
...
InvalidTypeSpecification: Step may only be provided once, and only in the first or last dimension.
"""
from __future__ import print_function, division, absolute_import
__all__ = ['Py_ssize_t', 'void', 'char', 'uchar', 'short', 'ushort',
'int_', 'uint', 'long_', 'ulong', 'longlong', 'ulonglong',
'size_t', 'npy_intp', 'c_string_type', 'bool_', 'object_',
'float_', 'double', 'longdouble', 'float32', 'float64', 'float128',
'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64',
'complex64', 'complex128', 'complex256', 'struct', 'Py_uintptr_t']
import sys
import math
import copy
import struct as struct_
import types
import textwrap
from . import miniutils
from . import minierror
from .miniutils import *
from .miniutils import have_ctypes, ctypes
_plat_bits = struct_.calcsize('@P') * 8
if struct_.pack('i', 1)[0] == '\1':
nbo = '<' # little endian
else:
nbo = '>' # big endian
class TypeMapper(object):
"""
Maps foreign types to minitypes. Users of minivect should implement
this and pass it to :py:class:`minivect.miniast.Context`.
>>> import miniast
>>> context = miniast.Context()
>>> miniast.typemapper = TypeMapper(context)
>>> tm = context.typemapper
>>> tm.promote_types(int8, double)
double
>>> tm.promote_types(int8, uint8)
uint8
>>> tm.promote_types(int8, complex128)
complex128
>>> tm.promote_types(int8, object_)
PyObject *
>>> tm.promote_types(int64, float32)
float32
>>> tm.promote_types(int64, complex64)
complex64
>>> tm.promote_types(float32, float64)
float64
>>> tm.promote_types(float32, complex64)
complex64
>>> tm.promote_types(complex64, complex128)
complex128
>>> tm.promote_types(complex256, object_)
PyObject *
>>> tm.promote_types(float32.pointer(), Py_ssize_t)
float32 *
>>> tm.promote_types(float32.pointer(), Py_ssize_t)
float32 *
>>> tm.promote_types(float32.pointer(), uint8)
float32 *
>>> tm.promote_types(float32.pointer(), float64.pointer())
Traceback (most recent call last):
...
UnpromotableTypeError: (float32 *, float64 *)
>>> tm.promote_types(float32[:, ::1], float32[:, ::1])
float32[:, ::1]
>>> tm.promote_types(float32[:, ::1], float64[:, ::1])
float64[:, ::1]
>>> tm.promote_types(float32[:, ::1], float64[::1, :])
float64[:, :]
>>> tm.promote_types(float32[:, :], complex128[:, :])
complex128[:, :]
>>> tm.promote_types(int_[:, :], object_[:, ::1])
PyObject *[:, :]
>>> tm.promote_types(npy_intp, float_)
float
>>> tm.promote_types(npy_intp, float32)
float32
>>> float32 == float_
True
>>> float64 == double
True
>>> float128 == longdouble
True
"""
def __init__(self, context):
self.context = context
def map_type(self, opaque_type):
"Map a foreign type to a minitype"
if opaque_type.is_int:
return int_
elif opaque_type.is_float:
return float_
elif opaque_type.is_double:
return double
elif opaque_type.is_pointer:
return PointerType(self.map_type(opaque_type.base_type))
elif opaque_type.is_py_ssize_t:
return Py_ssize_t
elif opaque_type.is_char:
return char
else:
raise minierror.UnmappableTypeError(opaque_type)
def to_llvm(self, type):
"Return an LLVM type for the given type."
raise NotImplementedError(type)
def from_python(self, value):
"Get a type from a python value"
np = sys.modules.get('numpy', None)
if isinstance(value, float):
return double
elif isinstance(value, bool):
return bool_
elif isinstance(value, (int, long)):
if abs(value) < 1:
bits = 0
else:
bits = math.ceil(math.log(abs(value), 2))
if bits < 32:
return int_
elif bits < 64:
return int64
else:
raise ValueError("Cannot represent %s as int32 or int64", value)
elif isinstance(value, complex):
return complex128
elif isinstance(value, str):
return c_string_type
elif np and isinstance(value, np.ndarray):
dtype = map_dtype(value.dtype)
return ArrayType(dtype, value.ndim,
is_c_contig=value.flags['C_CONTIGUOUS'],
is_f_contig=value.flags['F_CONTIGUOUS'])
else:
return object_
# raise minierror.UnmappableTypeError(type(value))
def promote_numeric(self, type1, type2):
"Promote two numeric types"
type = max([type1, type2], key=lambda type: type.rank)
if type1.kind != type2.kind:
def itemsize(type):
return type.itemsize // 2 if type.is_complex else type.itemsize
size = max(itemsize(type1), itemsize(type2))
if type.is_complex:
type = find_type_of_size(size * 2, complextypes)
elif type.is_float:
type = find_type_of_size(size, floating)
else:
assert type.is_int
type = find_type_of_size(size, integral)
return type
def promote_arrays(self, type1, type2):
"Promote two array types in an expression to a new array type"
equal_ndim = type1.ndim == type2.ndim
return ArrayType(self.promote_types(type1.dtype, type2.dtype),
ndim=max((type1.ndim, type2.ndim)),
is_c_contig=(equal_ndim and type1.is_c_contig and
type2.is_c_contig),
is_f_contig=(equal_ndim and type1.is_f_contig and
type2.is_f_contig))
def promote_types(self, type1, type2):
"Promote two arbitrary types"
string_types = c_string_type, char.pointer()
if type1.is_pointer and type2.is_int_like:
return type1
elif type2.is_pointer and type2.is_int_like:
return type2
elif type1.is_object or type2.is_object:
return object_
elif type1.is_numeric and type2.is_numeric:
return self.promote_numeric(type1, type2)
elif type1.is_array and type2.is_array:
return self.promote_arrays(type1, type2)
elif type1 in string_types and type2 in string_types:
return c_string_type
elif type1.is_bool and type2.is_bool:
return bool_
else:
raise minierror.UnpromotableTypeError((type1, type2))
def map_dtype(dtype):
"""
Map a NumPy dtype to a minitype.
>>> import numpy as np
>>> map_dtype(np.dtype(np.int32))
int32
>>> map_dtype(np.dtype(np.int64))
int64
>>> map_dtype(np.dtype(np.object))
PyObject *
>>> map_dtype(np.dtype(np.float64))
float64
>>> map_dtype(np.dtype(np.complex128))
complex128
"""
import numpy as np
if dtype.byteorder not in ('=', nbo, '|') and dtype.kind in ('iufbc'):
raise minierror.UnmappableTypeError(
"Only native byteorder is supported", dtype)
item_idx = int(math.log(dtype.itemsize, 2))
if dtype.kind == 'i':
return [int8, int16, int32, int64][item_idx]
elif dtype.kind == 'u':
return [uint8, uint16, uint32, uint64][item_idx]
elif dtype.kind == 'f':
if dtype.itemsize == 2:
pass # half floats not supported yet
elif dtype.itemsize == 4:
return float32
elif dtype.itemsize == 8:
return float64
elif dtype.itemsize == 16:
return float128
elif dtype.kind == 'b':
return int8
elif dtype.kind == 'c':
if dtype.itemsize == 8:
return complex64
elif dtype.itemsize == 16:
return complex128
elif dtype.itemsize == 32:
return complex256
elif dtype.kind == 'V':
fields = [(name, map_dtype(dtype.fields[name][0]))
for name in dtype.names]
is_aligned = dtype.alignment != 1
return struct(fields, packed=not getattr(dtype, 'isalignedstruct',
is_aligned))
elif dtype.kind == 'O':
return object_
def create_dtypes():
import numpy as np
minitype2dtype = {
int8 : np.int8,
int16 : np.int16,
int32 : np.int32,
int64 : np.int64,
uint8 : np.uint8,
uint16 : np.uint16,
uint32 : np.uint32,
uint64 : np.uint64,
longdouble: np.longdouble,
double : np.float64,
float_ : np.float32,
short : np.dtype('h'),
int_ : np.dtype('i'),
long_ : np.dtype('l'),
longlong : np.longlong,
ushort : np.dtype('H'),
uint : np.dtype('I'),
ulong : np.dtype('L'),
ulonglong: np.ulonglong,
complex64: np.complex64,
complex128: np.complex128,
complex256: getattr(np, 'complex256', None),
bool_ : np.bool,
object_ : np.object,
}
return dict((k, np.dtype(v)) for k, v in minitype2dtype.iteritems())
_dtypes = None
def map_minitype_to_dtype(type):
global _dtypes
if type.is_struct:
import numpy as np
fields = [(field_name, map_minitype_to_dtype(field_type))
for field_name, field_type in type.fields]
return np.dtype(fields, align=not type.packed)
if _dtypes is None:
_dtypes = create_dtypes()
if type.is_array:
type = type.dtype
dtype = _dtypes[type]
assert dtype is not None, "dtype not supported in this numpy build"
return dtype
def find_type_of_size(size, typelist):
for type in typelist:
if type.itemsize == size:
return type
assert False, "Type of size %d not found: %s" % (size, typelist)
NONE_KIND = 0
BOOL_KIND = 1
INT_KIND = 2
FLOAT_KIND = 3
COMPLEX_KIND = 4
class Type(miniutils.ComparableObjectMixin):
"""
Base class for all types.
.. attribute:: subtypes
The list of subtypes to allow comparing and hashing them recursively
"""
is_array = False
is_pointer = False
is_typewrapper = False
is_bool = False
is_numeric = False
is_py_ssize_t = False
is_char = False
is_int = False
is_float = False
is_c_string = False
is_object = False
is_function = False
is_int_like = False
is_complex = False
is_void = False
kind = NONE_KIND
subtypes = []
mutated = True
_ctypes_type = None
def __init__(self, **kwds):
vars(self).update(kwds)
self.qualifiers = kwds.get('qualifiers', frozenset())
def qualify(self, *qualifiers):
"Qualify this type with a qualifier such as ``const`` or ``restrict``"
qualifiers = list(qualifiers)
qualifiers.extend(self.qualifiers)
attribs = dict(vars(self), qualifiers=qualifiers)
return type(self)(**attribs)
def unqualify(self, *unqualifiers):
"Remove the given qualifiers from the type"
unqualifiers = set(unqualifiers)
qualifiers = [q for q in self.qualifiers if q not in unqualifiers]
attribs = dict(vars(self), qualifiers=qualifiers)
return type(self)(**attribs)
def pointer(self):
"Get a pointer to this type"
return PointerType(self)
@property
def subtype_list(self):
return [getattr(self, subtype) for subtype in self.subtypes]
@property
def comparison_type_list(self):
return self.subtype_list
def _is_object(self, context):
return context.is_object(self)
def __eq__(self, other):
# Don't use isinstance here, compare on exact type to be consistent
# with __hash__. Override where sensible
cmps = self.comparison_type_list
if not cmps:
return id(self) == id(other)
return (isinstance(self, type(other)) and
cmps == other.comparison_type_list)
def __ne__(self, other):
return not self == other
def __hash__(self):
cmps = self.comparison_type_list
if not cmps:
return hash(id(self))
h = hash(type(self))
for subtype in cmps:
h = h ^ hash(subtype)
return h
def __getitem__(self, item):
"""
Support array type creation by slicing, e.g. double[:, :] specifies
a 2D strided array of doubles. The syntax is the same as for
Cython memoryviews.
"""
assert isinstance(item, (tuple, slice))
def verify_slice(s):
if s.start or s.stop or s.step not in (None, 1):
raise minierror.InvalidTypeSpecification(
"Only a step of 1 may be provided to indicate C or "
"Fortran contiguity")
if isinstance(item, tuple):
step_idx = None
for idx, s in enumerate(item):
verify_slice(s)
if s.step and (step_idx or idx not in (0, len(item) - 1)):
raise minierror.InvalidTypeSpecification(
"Step may only be provided once, and only in the "
"first or last dimension.")
if s.step == 1:
step_idx = idx
return ArrayType(self, len(item),
is_c_contig=step_idx == len(item) - 1,
is_f_contig=step_idx == 0)
else:
verify_slice(item)
return ArrayType(self, 1, is_c_contig=bool(item.step))
def declare(self):
return str(self)
def to_llvm(self, context):
"Get a corresponding llvm type from this type"
return context.to_llvm(self)
def to_ctypes(self):
"""
Convert type to ctypes. The result may be cached!
"""
if self._ctypes_type is None:
from . import ctypes_conversion
self._ctypes_type = ctypes_conversion.convert_to_ctypes(self)
self.mutated = False
assert not self.mutated
return self._ctypes_type
def get_dtype(self):
return map_minitype_to_dtype(self)
def is_string(self):
return self.is_c_string or self == char.pointer()
def __getattr__(self, attr):
if attr.startswith('is_'):
return False
return getattr(type(self), attr)
def __call__(self, *args):
"""Return a function with return_type and args set
"""
if len(args) == 1 and not isinstance(args[0], Type):
# Cast in Python space
# TODO: Create proxy object
# TODO: Fully customizable type system (do this in Numba, not
# minivect)
return args[0]
return FunctionType(self, args)
class KeyHashingType(Type):
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return hasattr(other, 'key') and self.key == other.key
class ArrayType(Type):
"""
An array type. ArrayType may be sliced to obtain a subtype:
>>> double[:, :, ::1][1:]
double[:, ::1]
>>> double[:, :, ::1][:-1]
double[:, :]
>>> double[::1, :, :][:-1]
double[::1, :]
>>> double[::1, :, :][1:]
double[:, :]
"""
is_array = True
subtypes = ['dtype']
def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,
inner_contig=False, broadcasting=None):
super(ArrayType, self).__init__()
assert dtype is not None
self.dtype = dtype
self.ndim = ndim
self.is_c_contig = is_c_contig
self.is_f_contig = is_f_contig
if ndim == 1 and (is_c_contig or is_f_contig):
self.is_c_contig = True
self.is_f_contig = True
self.inner_contig = inner_contig or is_c_contig or is_f_contig
self.broadcasting = broadcasting
@property
def comparison_type_list(self):
return [self.dtype, self.is_c_contig, self.is_f_contig,
self.inner_contig, self.ndim]
def pointer(self):
raise Exception("You probably want a pointer type to the dtype")
def to_llvm(self, context):
# raise Exception("Obtain a pointer to the dtype and convert that "
# "to an LLVM type")
return context.to_llvm(self)
def __repr__(self):
axes = [":"] * self.ndim
if self.is_c_contig and self.ndim > 0:
axes[-1] = "::1"
elif self.is_f_contig and self.ndim > 0:
axes[0] = "::1"
return "%s[%s]" % (self.dtype, ", ".join(axes))
def copy(self, **kwargs):
if 'dtype' in kwargs:
assert kwargs['dtype'] is not None
array_type = copy.copy(self)
vars(array_type).update(kwargs)
return array_type
@property
def strided(self):
type = self.copy()
type.is_c_contig = False
type.is_f_contig = False
type.inner_contig = False
type.broadcasting = None
return type
def __getitem__(self, index):
assert isinstance(index, slice)
assert index.step is None
assert index.start is not None or index.stop is not None
start = 0
stop = self.ndim
if index.start is not None:
start = index.start
if index.stop is not None:
stop = index.stop
ndim = len(range(self.ndim)[start:stop])
if ndim == 0:
type = self.dtype
elif ndim > 0:
type = self.strided
type.ndim = ndim
type.is_c_contig = self.is_c_contig and stop == self.ndim
type.is_f_contig = self.is_f_contig and start == 0
type.inner_contig = type.is_c_contig or type.is_f_contig
if type.broadcasting:
type.broadcasting = self.broadcasting[start:stop]
else:
raise IndexError(index, ndim)
return type
class PointerType(Type):
is_pointer = True
subtypes = ['base_type']
def __init__(self, base_type, **kwds):
super(PointerType, self).__init__(**kwds)
self.base_type = base_type
self.itemsize = struct_.calcsize("P")
def __repr__(self):
return "%s *%s" % (self.base_type, " ".join(self.qualifiers))
def to_llvm(self, context):
if self.base_type.is_void:
llvm_base_type = int_.to_llvm(context)
else:
llvm_base_type = self.base_type.to_llvm(context)
return llvm.core.Type.pointer(llvm_base_type)
class CArrayType(Type):
is_carray = True
subtypes = ['base_type']
def __init__(self, base_type, size, **kwds):
super(CArrayType, self).__init__(**kwds)
self.base_type = base_type
self.size = size
def __repr__(self):
return "%s[%d]" % (self.base_type, self.size)
def to_llvm(self, context):
return llvm.core.Type.array(self.base_type.to_llvm(context), self.size)
class TypeWrapper(Type):
is_typewrapper = True
subtypes = ['opaque_type']
def __init__(self, opaque_type, context, **kwds):
super(TypeWrapper, self).__init__(**kwds)
self.opaque_type = opaque_type
self.context = context
def __repr__(self):
return self.context.declare_type(self)
def __deepcopy__(self, memo):
return self
class NamedType(Type):
name = None
def __eq__(self, other):
return isinstance(other, NamedType) and self.name == other.name
__hash__ = Type.__hash__ # !@#$ py3k
def __repr__(self):
if self.qualifiers:
return "%s %s" % (self.name, " ".join(self.qualifiers))
return str(self.name)
class NumericType(NamedType):
"""
Base class for numeric types.
.. attribute:: name
name of the type
.. attribute:: itemsize
sizeof(type)
.. attribute:: rank
ordering of numeric types
"""
is_numeric = True
class IntType(NumericType):
is_int = True
is_int_like = True
name = "int"
signed = True
rank = 4
itemsize = 4
typecode = None
kind = INT_KIND
def __init__(self, typecode=None, **kwds):
super(IntType, self).__init__(**kwds)
self.typecode = typecode
if typecode is not None:
self.itemsize = struct_.calcsize(typecode)
def to_llvm(self, context):
if self.itemsize == 1:
return lc.Type.int(8)
elif self.itemsize == 2:
return lc.Type.int(16)
elif self.itemsize == 4:
return lc.Type.int(32)
else:
assert self.itemsize == 8, self
return lc.Type.int(64)
def declare(self):
if self.name.endswith(('16', '32', '64')):
return self.name + "_t"
else:
return str(self)
class BoolType(IntType):
is_bool = True
name = "bool"
kind = BOOL_KIND
rank = 0
def __repr__(self):
return ("int %s" % " ".join(self.qualifiers)).rstrip()
def __str__(self):
return ("bool %s" % " ".join(self.qualifiers)).rstrip()
def to_llvm(self, context):
return llvm.core.Type.int(1)
class FloatType(NumericType):
is_float = True
kind = FLOAT_KIND
def declare(self):
if self.itemsize == 4:
return "float"
elif self.itemsize == 8:
return "double"
else:
return str(self)
@property
def comparison_type_list(self):
return self.subtype_list + [self.itemsize]
def __eq__(self, other):
return isinstance(other, FloatType) and self.itemsize == other.itemsize
__hash__ = NumericType.__hash__
def to_llvm(self, context):
if self.itemsize == 4:
return lc.Type.float()
elif self.itemsize == 8:
return lc.Type.double()
else:
is_ppc, is_x86 = get_target_triple()
if self.itemsize == 16:
if is_ppc:
return lc.Type.ppc_fp128()
else:
return lc.Type.fp128()
else:
assert self.itemsize == 10 and is_x86
return lc.Type.x86_fp80()
class ComplexType(NumericType):
is_complex = True
subtypes = ['base_type']
kind = COMPLEX_KIND
class Py_ssize_t_Type(IntType):
is_py_ssize_t = True
name = "Py_ssize_t"
rank = 9
signed = True
def __init__(self, **kwds):
super(Py_ssize_t_Type, self).__init__(**kwds)
if have_ctypes:
if hasattr(ctypes, 'c_ssize_t'):
self.itemsize = ctypes.sizeof(ctypes.c_ssize_t)
else:
self.itemsize = size_t.itemsize
else:
self.itemsize = _plat_bits // 8
class NPyIntp(IntType):
is_numpy_intp = True
name = "npy_intp"
rank = 10
def __init__(self, **kwds):
super(NPyIntp, self).__init__(**kwds)
ctypes_array = np.empty(0).ctypes.strides
self.itemsize = ctypes.sizeof(ctypes_array._type_)
class CharType(IntType):
is_char = True
name = "char"
rank = 1
signed = True
def to_llvm(self, context):
return lc.Type.int(8)
class CStringType(Type):
is_c_string = True
def __repr__(self):
return "const char *"
def to_llvm(self, context):
return char.pointer().to_llvm(context)
class VoidType(NamedType):
is_void = True
name = "void"
def to_llvm(self, context):
return lc.Type.void()
class ObjectType(Type):
is_object = True
itemsize = VoidType().pointer().itemsize
def __repr__(self):
return "PyObject *"
def pass_by_ref(type):
return type.is_struct or type.is_complex or type.is_datetime or type.is_timedelta
class Function(object):
"""
Function types may be called with Python functions to create a Function
object. This may be used to minivect users for their own purposes. e.g.
@double(double, double)
def myfunc(...):
...
"""
def __init__(self, signature, py_func):
self.signature = signature
self.py_func = py_func
def __call__(self, *args, **kwargs):
"""
Implement this to pass the callable test for classmethod/staticmethod.
E.g.
@classmethod
@void()
def m(self):
...
"""
raise minierror.Error("Not a callable function")
class FunctionType(Type):
subtypes = ['return_type', 'args']
is_function = True
is_vararg = False
struct_by_reference = False
def __init__(self, return_type, args, name=None, is_vararg=False, **kwds):
super(FunctionType, self).__init__(**kwds)
self.return_type = return_type
self.args = tuple(args)
self.name = name
self.is_vararg = is_vararg
def to_llvm(self, context):
assert self.return_type is not None
self = self.actual_signature
arg_types = [arg_type.pointer() if arg_type.is_function else arg_type
for arg_type in self.args]
return lc.Type.function(self.return_type.to_llvm(context),
[arg_type.to_llvm(context)
for arg_type in arg_types],
self.is_vararg)
def __repr__(self):
args = [str(arg) for arg in self.args]
if self.is_vararg:
args.append("...")
if self.name:
namestr = self.name
else:
namestr = ''
return "%s (*%s)(%s)" % (self.return_type, namestr, ", ".join(args))
@property
def actual_signature(self):
"""
Passing structs by value is not properly supported for different
calling conventions in LLVM, so we take an extra argument
pointing to a caller-allocated struct value.
"""
if self.struct_by_reference:
args = []
for arg in self.args:
if pass_by_ref(arg):
arg = arg.pointer()
args.append(arg)
return_type = self.return_type
if pass_by_ref(self.return_type):
return_type = void
args.append(self.return_type.pointer())
self = FunctionType(return_type, args)
return self
@property
def struct_return_type(self):
# Function returns a struct.
return self.return_type.pointer()
def __call__(self, *args):
if len(args) != 1 or isinstance(args[0], Type):
return super(FunctionType, self).__call__(*args)
assert self.return_type is not None
assert self.args is not None
func, = args
return Function(self, func)
class VectorType(Type):
subtypes = ['element_type']
is_vector = True
vector_size = None
def __init__(self, element_type, vector_size, **kwds):
super(VectorType, self).__init__(**kwds)
assert ((element_type.is_int or element_type.is_float) and
element_type.itemsize in (4, 8)), element_type
self.element_type = element_type
self.vector_size = vector_size
self.itemsize = element_type.itemsize * vector_size
def to_llvm(self, context):
return lc.Type.vector(self.element_type.to_llvm(context),
self.vector_size)
@property
def comparison_type_list(self):
return self.subtype_list + [self.vector_size]
def __repr__(self):
itemsize = self.element_type.itemsize
if self.element_type.is_float:
if itemsize == 4:
return '__m128'
else:
return '__m128d'
else:
if itemsize == 4:
return '__m128i'
else:
raise NotImplementedError
def _sort_types_key(field_type):
if field_type.is_complex:
return field_type.base_type.rank * 2
elif field_type.is_numeric or field_type.is_struct:
return field_type.rank
elif field_type.is_vector:
return _sort_types_key(field_type.element_type) * field_type.vector_size
elif field_type.is_carray:
return _sort_types_key(field_type.base_type) * field_type.size
elif field_type.is_pointer or field_type.is_object or field_type.is_array:
return 8
else:
return 1
def _sort_key(keyvalue):
field_name, field_type = keyvalue
return _sort_types_key(field_type)
def sort_types(types_dict):
# reverse sort on rank, forward sort on name
d = {}
for field in types_dict.iteritems():
key = _sort_key(field)
d.setdefault(key, []).append(field)
def key(keyvalue):
field_name, field_type = keyvalue
return field_name
fields = []
for rank in sorted(d, reverse=True):
fields.extend(sorted(d[rank], key=key))
return fields
class struct(Type):
"""
Create a struct type. Fields may be ordered or unordered. Unordered fields
will be ordered from big types to small types (for better alignment).
>>> struct([('a', int_), ('b', float_)], name='Foo') # ordered struct
struct Foo { int a, float b }
>>> struct(a=int_, b=float_, name='Foo') # unordered struct
struct Foo { float b, int a }
>>> struct(a=int32, b=int32, name='Foo') # unordered struct
struct Foo { int32 a, int32 b }
>>> S = struct(a=complex128, b=complex64, c=struct(f1=double, f2=double, f3=int32))
>>> S
struct { struct { double f1, double f2, int32 f3 } c, complex128 a, complex64 b }
>>> S.offsetof('a')
24
"""
is_struct = True
def __init__(self, fields=(), name=None, readonly=False, packed=False, **kwargs):
super(struct, self).__init__()
if fields and kwargs:
raise minierror.InvalidTypeSpecification(
"The struct must be either ordered or unordered")
if kwargs:
fields = sort_types(kwargs)
self.fields = list(fields)
self.name = name
self.readonly = readonly
self.fielddict = dict(self.fields)
self.packed = packed
self.update_mutated()
def copy(self):
return struct(self.fields, self.name, self.readonly, self.packed)
def __eq__(self, other):
return other.is_struct and self.fields == other.fields
def __repr__(self):
if self.name:
name = self.name + ' '
else:
name = ''
return 'struct %s{ %s }' % (
name, ", ".join("%s %s" % (field_type, field_name)
for field_name, field_type in self.fields))
def to_llvm(self, context):
if self.packed:
lstruct = llvm.core.Type.packed_struct
else:
lstruct = llvm.core.Type.struct
return lstruct([field_type.to_llvm(context)
for field_name, field_type in self.fields])
@property
def comparison_type_list(self):
return self.fields
@property
def subtype_list(self):
return [field[1] for field in self.fields]
def __hash__(self):
return hash(tuple(self.fields))
def is_prefix(self, other_struct):
other_fields = other_struct.fields[:len(self.fields)]
return self.fields == other_fields
def add_field(self, name, type):
assert name not in self.fielddict
self.fielddict[name] = type
self.fields.append((name, type))
self.mutated = True
def update_mutated(self):
self.rank = sum([_sort_key(field) for field in self.fields])
self.mutated = False
def offsetof(self, field_name):
"""
Compute the offset of a field. Must be used only after mutation has
finished.
"""
ctype = self.to_ctypes()
return getattr(ctype, field_name).offset
def getsize(ctypes_name, default):
try:
return ctypes.sizeof(getattr(ctypes, ctypes_name))
except ImportError:
return default
def get_target_triple():
target_machine = llvm.ee.TargetMachine.new()
is_ppc = target_machine.triple.startswith("ppc")
is_x86 = target_machine.triple.startswith("x86")
return is_ppc, is_x86
#
### Internal types
#
c_string_type = CStringType()
void = VoidType()
#
### Public types
#
try:
npy_intp = NPyIntp()
except ImportError:
npy_intp = None
size_t = IntType(name="size_t", rank=8.5,
itemsize=getsize('c_size_t', _plat_bits // 8), signed=False)
Py_ssize_t = Py_ssize_t_Type()
Py_uintptr_t = IntType(name='Py_uintptr_t',
itemsize=getsize('c_void_p', Py_ssize_t.itemsize),
rank=8.5)
char = CharType(name="char", typecode='b')
short = IntType(name="short", rank=2, typecode='h')
int_ = IntType(name="int", rank=4, typecode='i')
long_ = IntType(name="long", rank=5, typecode='l')
longlong = IntType(name="PY_LONG_LONG", rank=8, typecode='q')
uchar = CharType(name="unsigned char", signed=False, typecode='B')
ushort = IntType(name="unsigned short", rank=2.5,
typecode='H', signed=False)
uint = IntType(name="unsigned int", rank=4.5, typecode='I', signed=False)
ulong = IntType(name="unsigned long", rank=5.5, typecode='L', signed=False)
ulonglong = IntType(name="unsigned PY_LONG_LONG", rank=8.5,
typecode='Q', signed=False)
float_ = FloatType(name="float", rank=20, itemsize=4)
double = FloatType(name="double", rank=21, itemsize=8)
longdouble = FloatType(name="long double", rank=22,
itemsize=ctypes.sizeof(ctypes.c_longdouble))
bool_ = BoolType()
object_ = ObjectType()
int8 = IntType(name="int8", rank=1, itemsize=1)
int16 = IntType(name="int16", rank=2, itemsize=2)
int32 = IntType(name="int32", rank=4, itemsize=4)
int64 = IntType(name="int64", rank=8, itemsize=8)
uint8 = IntType(name="uint8", rank=1.5, signed=False, itemsize=1)
uint16 = IntType(name="uint16", rank=2.5, signed=False, itemsize=2)
uint32 = IntType(name="uint32", rank=4.5, signed=False, itemsize=4)
uint64 = IntType(name="uint64", rank=8.5, signed=False, itemsize=8)
float32 = FloatType(name="float32", rank=20, itemsize=4)
float64 = FloatType(name="float64", rank=21, itemsize=8)
float128 = FloatType(name="float128", rank=22, itemsize=16)
complex64 = ComplexType(name="complex64", base_type=float32,
rank=30, itemsize=8)
complex128 = ComplexType(name="complex128", base_type=float64,
rank=31, itemsize=16)
complex256 = ComplexType(name="complex256", base_type=float128,
rank=32, itemsize=32)
integral = []
native_integral = []
floating = []
complextypes = []
for typename in __all__:
minitype = globals()[typename]
if minitype is None:
continue
if minitype.is_int:
integral.append(minitype)
elif minitype.is_float:
floating.append(minitype)
elif minitype.is_complex:
complextypes.append(minitype)
numeric = integral + floating + complextypes
native_integral.extend((Py_ssize_t, size_t))
integral.sort(key=_sort_types_key)
native_integral = [minitype for minitype in integral
if minitype.typecode is not None]
floating.sort(key=_sort_types_key)
complextypes.sort(key=_sort_types_key)
def get_utility():
import numpy
return textwrap.dedent("""\
#include <stdint.h>
#ifndef HAVE_LONGDOUBLE
#define HAVE_LONGDOUBLE %d
#endif
typedef struct {
float real;
float imag;
} complex64;
typedef struct {
double real;
double imag;
} complex128;
#if HAVE_LONGDOUBLE
typedef struct {
long double real;
long double imag;
} complex256;
#endif
typedef float float32;
typedef double float64;
#if HAVE_LONGDOUBLE
typedef long double float128;
#endif
""" % hasattr(numpy, 'complex256'))
if __name__ == '__main__':
import doctest
doctest.testmod()
########NEW FILE########
__FILENAME__ = miniutils
# -*- coding: utf-8 -*-
"""
Miscellaneous (convenience) utilities.
"""
from __future__ import print_function, division, absolute_import
__all__ = ['ctypes', 'np', 'llvm', 'lc', 'MiniFunction']
try:
import __builtin__ as builtins
except ImportError:
import builtins
class UnavailableImport(object):
def __init__(self, import_name):
self.import_name = import_name
def __getattr__(self, attr):
__import__(self.import_name)
try:
import ctypes
have_ctypes = True
except ImportError:
ctypes = UnavailableImport("ctypes")
have_ctypes = False
try:
import numpy as np
except ImportError:
np = UnavailableImport("np")
try:
import llvm.core
from llvm import core as lc
except ImportError:
llvm = UnavailableImport("llvm")
lc = UnavailableImport("llvm.core")
from . import treepath
#
### Convenience utilities
#
def build_kernel_call(func_name, signature, miniargs, builder):
"""
Call the kernel `lfunc` in a bunch of loops with scalar arguments.
"""
# Build the kernel function signature
funcname = builder.funcname(signature, func_name, is_external=False)
# Generate 'lhs[i, j] = kernel(A[i, j], B[i, j])'
lhs = miniargs[0].variable
kernel_args = [arg.variable for arg in miniargs[1:]]
funccall = builder.funccall(funcname, kernel_args, inline=True)
assmt = builder.assign(lhs, funccall)
if lhs.type.is_object and not lhs.type.is_array:
assmt = builder.stats(builder.decref(lhs), assmt)
return assmt
def specialize(context, specializer_cls, ast, print_tree=False):
"Specialize an AST with given specializer and compile"
context = context or getcontext()
specializers = [specializer_cls]
result = next(iter(context.run(ast, specializers, print_tree=print_tree)))
_, specialized_ast, _, code_result = result
if not context.use_llvm:
prototype, code_result = code_result
return specialized_ast, code_result
class MiniFunction(object):
"""
Convenience class to compile a function using LLVM and to invoke the
function with ctypes given numpy arrays as input.
"""
def __init__(self, context, specializer, variables, expr, name=None):
self.b = context.astbuilder
self.context = context
self.specializer = specializer
self.variables = variables
self.minifunc = self.b.build_function(variables, expr, name)
self.specialized_ast, (self.lfunc, self.ctypes_func) = specialize(
context, specializer, self.minifunc)
def get_ctypes_func_and_args(self, arrays):
from .ctypes_conversion import get_data_pointer
fist_array = arrays[0]
shape = fist_array.shape
for variable, array in zip(self.variables, arrays):
for dim, extent in enumerate(array.shape):
if extent != shape[dim] and extent != 1:
raise ValueError("Differing extents in dim %d (%s, %s)" %
(dim, extent, shape[dim]))
args = [fist_array.ctypes.shape]
for variable, array in zip(self.variables, arrays):
if variable.type.is_array:
data_pointer = get_data_pointer(array, variable.type)
args.append(data_pointer)
if not self.specializer.is_contig_specializer:
args.append(array.ctypes.strides)
else:
raise NotImplementedError
return args
def __call__(self, *args, **kwargs):
import numpy as np
# print self.minifunc.ndim
# self.minifunc.print_tree(self.context)
# print self.context.debug_c(self.minifunc, self.specializer)
out = kwargs.pop('out', None)
assert not kwargs, kwargs
if out is None:
from . import minitypes
dtype = minitypes.map_minitype_to_dtype(self.variables[0].type)
broadcast = np.broadcast(*args)
out = np.empty(broadcast.shape, dtype=dtype)
arrays = [out]
arrays.extend(args)
assert len(arrays) == len(self.variables)
args = self.get_ctypes_func_and_args(arrays)
self.ctypes_func(*args)
return out
def xpath(ast, expr):
return treepath.find_all(ast, expr)
# Compatibility with Python 2.4
def any(it):
for obj in it:
if obj:
return True
return False
def all(it):
for obj in it:
if not obj:
return False
return True
class ComparableObjectMixin(object):
"Make sure subclasses implement comparison and hashing methods"
def __hash__(self):
"Implement in subclasses"
raise NotImplementedError
def __eq__(self, other):
"Implement in subclasses"
return NotImplemented
########NEW FILE########
__FILENAME__ = minivisitor
# -*- coding: utf-8 -*-
"""
Adapted from Cython/Compiler/Visitor.py, see this module for detailed
explanations.
"""
from __future__ import print_function, division, absolute_import
import inspect
miniast = None # avoid circular import AttributeError for sphinx-apidoc
from . import treepath
class TreeVisitor(object):
"""
Non-mutating visitor. Subclass and implement visit_MyNode methods.
A user can traverse a foreign AST by implementing
:py:class:`minivect.miniast.Context.getchildren`
"""
want_access_path = False
def __init__(self, context):
self.context = context
self.dispatch_table = {}
if self.want_access_path:
self.access_path = []
else:
self._visitchild = self.visit
def _find_handler(self, obj):
# to resolve, try entire hierarchy
cls = type(obj)
pattern = "visit_%s"
mro = inspect.getmro(cls)
handler_method = None
for mro_cls in mro:
handler_method = getattr(self, pattern % mro_cls.__name__, None)
if handler_method is not None:
return handler_method
raise RuntimeError("Visitor %r does not accept object: %s" % (self, obj))
def visit(self, obj, *args):
"Visit a single child."
try:
handler_method = self.dispatch_table[type(obj)]
except KeyError:
handler_method = self._find_handler(obj)
self.dispatch_table[type(obj)] = handler_method
return handler_method(obj)
def _visitchild(self, child, parent, attrname, idx):
self.access_path.append((parent, attrname, idx))
result = self.visit(child)
self.access_path.pop()
return result
def visit_childlist(self, child, parent=None, attr=None):
if isinstance(child, list):
childretval = [self._visitchild(child_node, parent, attr, idx)
for idx, child_node in enumerate(child)]
else:
childretval = self._visitchild(child, parent, attr, None)
if isinstance(childretval, list):
raise RuntimeError(
'Cannot insert list here: %s in %r' % (attr, node))
return childretval
def visitchildren(self, parent, attrs=None):
"Visits the children of the given node."
if parent is None:
return None
if attrs is None:
attrs = self.context.getchildren(parent)
result = {}
for attr in attrs:
child = getattr(parent, attr)
if child is not None:
result[attr] = self.visit_childlist(child, parent, attr)
return result
def treepath(self, node, xpath_expr):
return treepath.iterfind(node, xpath_expr)
def treepath_first(self, node, xpath_expr):
return treepath.find_first(node, xpath_expr)
def p(self, node):
node.print_tree(self.context)
class VisitorTransform(TreeVisitor):
"""
Mutating transform. Each attribute is replaced by the result of the
corresponding visit_MyNode method.
"""
def visitchildren(self, parent, attrs=None):
result = super(VisitorTransform, self).visitchildren(parent, attrs)
for attr, newnode in result.iteritems():
if not isinstance(newnode, list):
setattr(parent, attr, newnode)
else:
# Flatten the list one level and remove any None
newlist = []
for x in newnode:
if x is not None:
if isinstance(x, list):
newlist += x
else:
newlist.append(x)
setattr(parent, attr, newlist)
return result
class GenericVisitor(TreeVisitor):
"Generic visitor that automatically visits children"
def visit_Node(self, node):
self.visitchildren(node)
return node
class GenericTransform(VisitorTransform, GenericVisitor):
"Generic transform that automatically visits children"
class MayErrorVisitor(TreeVisitor):
"""
Determine whether code generated by an AST can raise exceptions.
"""
may_error = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_NodeWrapper(self, node):
self.may_error = (self.may_error or
self.context.may_error(node.opaque_node))
def visit_ForNode(self, node):
self.visit(node.init)
self.visit(node.condition)
self.visit(node.step)
class PrintTree(TreeVisitor):
"""
Print an AST, see also :py:class:`minivect.miniast.Node.print_tree`.
"""
indent = 0
want_access_path = True
def format_value(self, node):
from . import miniast
if node.is_temp:
format_value = node.repr_name
elif (isinstance(node, miniast.Variable) or
isinstance(node, miniast.FuncNameNode) or
node.is_funcarg):
format_value = node.name
elif node.is_binop or node.is_unop:
format_value = node.operator
elif node.is_constant:
format_value = node.value
elif node.is_sizeof:
format_value = str(node.type)
else:
return None
return format_value
def format_node(self, node, want_type_info=True):
result = type(node).__name__
format_value = self.format_value(node)
if node.is_expression and want_type_info:
if format_value is not None:
format_value = "%s, type=%s" % (format_value, node.type)
else:
format_value = "type=%s" % (node.type,)
if format_value:
return "%s(%s)" % (result, format_value)
else:
return result
def visit_Node(self, node):
if self.access_path:
parent, attr, idx = self.access_path[-1]
else:
attr = "(root)"
idx = None
prefix = "%s%s" % (self.indent * " ", attr)
if idx is not None:
prefix = "%s[%d]" % (prefix, idx)
print(("%s: %s" % (prefix, self.format_node(node))))
self.indent += 1
self.visitchildren(node)
self.indent -= 1
########NEW FILE########
__FILENAME__ = optimize
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# -*- encoding: UTF-8 -*-
from . import minivisitor
from . import miniutils
from . import minitypes
from . import specializers
def admissible(broadcasting_tuple, n_loops):
"""
Check for admissibility. Indicates whether partial hoisting is the most
efficient thing to perform. See also partially_hoistable()
"""
if len(broadcasting_tuple) < n_loops:
# In this this situation, we pad with leading broadcasting dimensions.
# This means we have to hoist all the way
return False
# Filter leading False values
i = 0
for i, broadcasting in enumerate(broadcasting_tuple):
if broadcasting:
break
# Check for all trailing values (at least one) being True
return broadcasting_tuple[i:] and miniutils.all(broadcasting_tuple[i:])
def partially_hoistable(broadcasting_tuple, n_loops):
"""
This function indicates, when admissible() returns false, whether an
expression is partially hoistable. This means the caller must establish
whether repeated computation or an array temporary will be more beneficial.
If the expression is a variable, there is no repeaetd computation, and
it should be hoisted as far as possible.
"""
return broadcasting_tuple[-1]
def broadcasting(broadcasting_tuple1, broadcasting_tuple2):
return broadcasting_tuple1 != broadcasting_tuple2
class HoistBroadcastingExpressions(specializers.BaseSpecializer):
"""
This transform hoists out part of sub-expressions which are broadcasting.
There are two cases:
1) We can hoist out the sub-expression and store it in a scalar for
broadcasting
2) We have to hoist the sub-expression out entirely and store it in
a temporary array
As an alternative to 2), we could swap axes to return to situation 1),
i.e. reorder the element-wise traversal order. We do not choose this
option, since the loop order is tailored to cache-friendliness.
We determine where to hoist sub-expression to based on broadcasting
information. Each type has a broadcasting tuple with a true/false value
for each dimension, specifying whether it will broadcast in that dimension
(i.e. broadcasting is not optional in that dimension).
We make the following observations:
1) Trailing truth values mean we can hoist the sub-expression out just
before the first truth value in the consecutive sequence of truth
values
Example ``(False, True, True)``::
A[:, None, None] * A[:, None, None] * B[:, :,ย :]
becomes::
for i in shape[0]:
temp = A[i, 0, 0] * A[i, 0, 0]
for j in shape[1]:
for k in shape[2]:
temp * B[i, j, k]
2) If not all consecutive leading values are false, we have to assign
to a temporary array (i.e., hoist out all the way)
Example ``(True, True, False)``::
A[None, None, :] * A[None, None, :] * B[:, :,ย :]
becomes::
allocate temp
for k in shape[2]:
temp[k] = A[0, 0, k] * A[0, 0, k]
for i in shape[0]:
for j in shape[1]:
for k in shape[2]:
temp[k] * B[i, j, k]
deallocate temp
More generally, if the index sequence of array A is not an admissible prefix
of the total index sequence, we have situation 2). For instance,
``(True, False, True)`` would mean we could hoist out the expression one
level, but we would still have repeated computation. What we could do in
this case, in addition to 2), is reduce indexing overhead, i.e. generate::
for j in shape[1]:
temp[j] = A[0, j, 0] * A[0, j, 0]
for i in shape[0]:
for j in shape[1]:
temp_scalar = temp[j]
for k in shape[2]:
temp_scalar * B[i, j, k]
This is bonus points.
"""
def visit_FunctionNode(self, node):
self.function = node
inner_loop = node.for_loops[-1]
self.visitchildren(inner_loop)
return node
def visit_Variable(self, node):
type = node.type
if type.is_array and type.broadcasting is not None:
n_loops = len(self.function.for_loops)
if admissible(type.broadcasting, n_loops):
node.hoistable = True
elif partially_hoistable(type.broadcasting, n_loops):
# TODO: see whether `node` should be fully (array temporary)
# TODO: or partially hoisted
node.hoistable = True
elif miniutils.any(type.broadcasting):
pass # enable when temporaries are implemented in minivect
# node.need_temp = True
node.broadcasting = type.broadcasting
return node
def visit_ArrayAtribute(self, node):
return node
def _hoist_binop_operands(self, b, node):
# perform hoisitng. Does not handle all expressions correctly yet.
if not node.lhs.hoistable or not node.rhs.hoistable:
if node.lhs.hoistable:
node.lhs = self.hoist(node.lhs)
else:
node.rhs = self.hoist(node.rhs)
return node
lhs_hoisting_level = self.hoisting_level(node.lhs)
rhs_hoisting_level = self.hoisting_level(node.rhs)
if lhs_hoisting_level == rhs_hoisting_level:
node.hoistable = True
node.broadcasting = node.lhs.broadcasting
return node
def binop():
result = b.binop(node.type, node.operator, node.lhs, node.rhs)
result.broadcasting = broadcasting
result.hoistable = True
return result
if lhs_hoisting_level < rhs_hoisting_level:
broadcasting = node.rhs.broadcasting
node.lhs = self.hoist(node.lhs)
return self.hoist(binop())
else: # lhs_hoisting_level > rhs_hoisting_level
broadcasting = node.lhs.broadcasting
node.rhs = self.hoist(node.rhs)
return self.hoist(binop())
def _make_temp_binop_operands(self, node):
if broadcasting(node.lhs.broadcasting, node.rhs.broadcasting):
node.need_temp = True
else:
if node.lhs.need_temp:
node.lhs = self.make_temp(node.lhs)
if node.rhs.need_temp:
node.rhs = self.make_temp(node.rhs)
def visit_BinaryOperationNode(self, node):
b = self.astbuilder
self.visitchildren(node)
node.broadcasting = None
if node.lhs.need_temp or node.rhs.need_temp:
return self._make_temp_binop_operands(node)
elif node.lhs.hoistable or node.rhs.hoistable:
return self._hoist_binop_operands(b, node)
return node
def visit_ForNode(self, node):
self.visitchildren(node)
self.handle_pending_stats(node)
return node
def visit_BinopNode(self, node):
# used in superclass, override here
return self.visit_BinaryOperationNode(node)
def visit_AssignmentExpr(self, node):
rhs = self.visit(node.rhs)
node.rhs = self.process_expr(rhs)
return node
def visit_UnopNode(self, node):
o = node.operand = self.visit(node.operand)
node.hoistable = o.hoistable
node.need_temp = o.need_temp
node.broadcasting = o.broadcasting
return node
def process_expr(self, expr):
if expr.hoistable:
return self.hoist(expr)
elif expr.need_temp:
return self.make_temp(expr)
else:
return expr
def make_temp(self, node):
"Not implemented yet"
return node
def hoisting_level(self, node):
i = 0
for i, broadcasting in enumerate(node.broadcasting[::-1]):
if not broadcasting:
break
return self.function.ndim - 1 - i
def hoist(self, node):
if not node.hoistable:
return node
b = self.astbuilder
hoisting_level = self.hoisting_level(node)
if hoisting_level < 0:
for_loop = self.function
else:
for_loop = self.function.for_loops[hoisting_level]
temp = b.temp(node.type.dtype, name='hoisted_temp')
temp.broadcasting = None
# TODO: keep track of the variables
for variable in self.treepath(node, '//Variable'):
variable.hoisted = True
stat = b.assign(temp, node, may_reorder=False)
for_loop.body = b.stats(stat, for_loop.body)
return self.visit(temp)
########NEW FILE########
__FILENAME__ = pydot
# -*- coding: utf-8 -*-
"""Graphviz's dot language Python interface.
This module provides with a full interface to create handle modify
and process graphs in Graphviz's dot language.
References:
pydot Homepage: http://code.google.com/p/pydot/
Graphviz: http://www.graphviz.org/
DOT Language: http://www.graphviz.org/doc/info/lang.html
Programmed and tested with Graphviz 2.26.3 and Python 2.6 on OSX 10.6.4
Copyright (c) 2005-2011 Ero Carrera <ero.carrera@gmail.com>
Distributed under MIT license [http://opensource.org/licenses/mit-license.html].
"""
from __future__ import print_function, division, absolute_import
__revision__ = "$LastChangedRevision: 27 $"
__author__ = 'Ero Carrera'
__version__ = '1.0.%d' % int( __revision__[21:-2] )
__license__ = 'MIT'
import os
import re
import subprocess
import tempfile
import copy
try:
import dot_parser
except Exception as e:
# print "Couldn't import dot_parser, loading of dot files will not be possible."
pass
GRAPH_ATTRIBUTES = set( ['Damping', 'K', 'URL', 'aspect', 'bb', 'bgcolor',
'center', 'charset', 'clusterrank', 'colorscheme', 'comment', 'compound',
'concentrate', 'defaultdist', 'dim', 'dimen', 'diredgeconstraints',
'dpi', 'epsilon', 'esep', 'fontcolor', 'fontname', 'fontnames',
'fontpath', 'fontsize', 'id', 'label', 'labeljust', 'labelloc',
'landscape', 'layers', 'layersep', 'layout', 'levels', 'levelsgap',
'lheight', 'lp', 'lwidth', 'margin', 'maxiter', 'mclimit', 'mindist',
'mode', 'model', 'mosek', 'nodesep', 'nojustify', 'normalize', 'nslimit',
'nslimit1', 'ordering', 'orientation', 'outputorder', 'overlap',
'overlap_scaling', 'pack', 'packmode', 'pad', 'page', 'pagedir',
'quadtree', 'quantum', 'rankdir', 'ranksep', 'ratio', 'remincross',
'repulsiveforce', 'resolution', 'root', 'rotate', 'searchsize', 'sep',
'showboxes', 'size', 'smoothing', 'sortv', 'splines', 'start',
'stylesheet', 'target', 'truecolor', 'viewport', 'voro_margin',
# for subgraphs
'rank' ] )
EDGE_ATTRIBUTES = set( ['URL', 'arrowhead', 'arrowsize', 'arrowtail',
'color', 'colorscheme', 'comment', 'constraint', 'decorate', 'dir',
'edgeURL', 'edgehref', 'edgetarget', 'edgetooltip', 'fontcolor',
'fontname', 'fontsize', 'headURL', 'headclip', 'headhref', 'headlabel',
'headport', 'headtarget', 'headtooltip', 'href', 'id', 'label',
'labelURL', 'labelangle', 'labeldistance', 'labelfloat', 'labelfontcolor',
'labelfontname', 'labelfontsize', 'labelhref', 'labeltarget',
'labeltooltip', 'layer', 'len', 'lhead', 'lp', 'ltail', 'minlen',
'nojustify', 'penwidth', 'pos', 'samehead', 'sametail', 'showboxes',
'style', 'tailURL', 'tailclip', 'tailhref', 'taillabel', 'tailport',
'tailtarget', 'tailtooltip', 'target', 'tooltip', 'weight',
'rank' ] )
NODE_ATTRIBUTES = set( ['URL', 'color', 'colorscheme', 'comment',
'distortion', 'fillcolor', 'fixedsize', 'fontcolor', 'fontname',
'fontsize', 'group', 'height', 'id', 'image', 'imagescale', 'label',
'labelloc', 'layer', 'margin', 'nojustify', 'orientation', 'penwidth',
'peripheries', 'pin', 'pos', 'rects', 'regular', 'root', 'samplepoints',
'shape', 'shapefile', 'showboxes', 'sides', 'skew', 'sortv', 'style',
'target', 'tooltip', 'vertices', 'width', 'z',
# The following are attributes dot2tex
'texlbl', 'texmode' ] )
CLUSTER_ATTRIBUTES = set( ['K', 'URL', 'bgcolor', 'color', 'colorscheme',
'fillcolor', 'fontcolor', 'fontname', 'fontsize', 'label', 'labeljust',
'labelloc', 'lheight', 'lp', 'lwidth', 'nojustify', 'pencolor',
'penwidth', 'peripheries', 'sortv', 'style', 'target', 'tooltip'] )
#
# Extented version of ASPN's Python Cookbook Recipe:
# Frozen dictionaries.
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/414283
#
# This version freezes dictionaries used as values within dictionaries.
#
class frozendict(dict):
def _blocked_attribute(obj):
raise AttributeError("A frozendict cannot be modified.")
_blocked_attribute = property(_blocked_attribute)
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kw):
new = dict.__new__(cls)
args_ = []
for arg in args:
if isinstance(arg, dict):
arg = copy.copy(arg)
for k, v in arg.iteritems():
if isinstance(v, frozendict):
arg[k] = v
elif isinstance(v, dict):
arg[k] = frozendict(v)
elif isinstance(v, list):
v_ = list()
for elm in v:
if isinstance(elm, dict):
v_.append( frozendict(elm) )
else:
v_.append( elm )
arg[k] = tuple(v_)
args_.append( arg )
else:
args_.append( arg )
dict.__init__(new, *args_, **kw)
return new
def __init__(self, *args, **kw):
pass
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(tuple(sorted(self.iteritems())))
return h
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
dot_keywords = ['graph', 'subgraph', 'digraph', 'node', 'edge', 'strict']
id_re_alpha_nums = re.compile('^[_a-zA-Z][a-zA-Z0-9_,]*$', re.UNICODE)
id_re_alpha_nums_with_ports = re.compile('^[_a-zA-Z][a-zA-Z0-9_,:\"]*[a-zA-Z0-9_,\"]+$', re.UNICODE)
id_re_num = re.compile('^[0-9,]+$', re.UNICODE)
id_re_with_port = re.compile('^([^:]*):([^:]*)$', re.UNICODE)
id_re_dbl_quoted = re.compile('^\".*\"$', re.S|re.UNICODE)
id_re_html = re.compile('^<.*>$', re.S|re.UNICODE)
def needs_quotes( s ):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in dot_keywords:
return False
chars = [ord(c) for c in s if ord(c)>0x7f or ord(c)==0]
if chars and not id_re_dbl_quoted.match(s) and not id_re_html.match(s):
return True
for test_re in [id_re_alpha_nums, id_re_num, id_re_dbl_quoted, id_re_html, id_re_alpha_nums_with_ports]:
if test_re.match(s):
return False
m = id_re_with_port.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True
def quote_if_necessary(s):
if isinstance(s, bool):
if s is True:
return 'True'
return 'False'
if not isinstance( s, basestring ):
return s
if not s:
return s
if needs_quotes(s):
replace = {'"' : r'\"',
"\n" : r'\n',
"\r" : r'\r'}
for (a,b) in replace.items():
s = s.replace(a, b)
return '"' + s + '"'
return s
def graph_from_dot_data(data):
"""Load graph as defined by data in DOT format.
The data is assumed to be in DOT format. It will
be parsed and a Dot class will be returned,
representing the graph.
"""
return dot_parser.parse_dot_data(data)
def graph_from_dot_file(path):
"""Load graph as defined by a DOT file.
The file is assumed to be in DOT format. It will
be loaded, parsed and a Dot class will be returned,
representing the graph.
"""
fd = file(path, 'rb')
data = fd.read()
fd.close()
return graph_from_dot_data(data)
def graph_from_edges(edge_list, node_prefix=None, directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
"""
if node_prefix is None:
node_prefix = unicode('')
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for edge in edge_list:
if isinstance(edge[0], str):
src = node_prefix + edge[0]
else:
src = node_prefix + str(edge[0])
if isinstance(edge[1], str):
dst = node_prefix + edge[1]
else:
dst = node_prefix + str(edge[1])
e = Edge( src, dst )
graph.add_edge(e)
return graph
def graph_from_adjacency_matrix(matrix, node_prefix= '', directed=False):
"""Creates a basic graph out of an adjacency matrix.
The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
if not directed:
skip = matrix.index(row)
r = row[skip:]
else:
skip = 0
r = row
node_dest = skip+1
for e in r:
if e:
graph.add_edge(
Edge( node_prefix + node_orig,
node_prefix + node_dest) )
node_dest += 1
node_orig += 1
return graph
def graph_from_incidence_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c*node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge( node_prefix + abs(nodes[0]),
node_prefix + nodes[1] ))
if not directed:
graph.set_simplify(True)
return graph
def __find_executables(path):
"""Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None
"""
success = False
progs = {'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': '', 'sfdp': ''}
was_quoted = False
path = path.strip()
if path.startswith('"') and path.endswith('"'):
path = path[1:-1]
was_quoted = True
if os.path.isdir(path) :
for prg in progs.iterkeys():
if progs[prg]:
continue
if os.path.exists( os.path.join(path, prg) ):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg) + '"'
else:
progs[prg] = os.path.join(path, prg)
success = True
elif os.path.exists( os.path.join(path, prg + '.exe') ):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg + '.exe') + '"'
else:
progs[prg] = os.path.join(path, prg + '.exe')
success = True
if success:
return progs
else:
return None
# The multi-platform version of this 'find_graphviz' function was
# contributed by Peter Cock
#
def find_graphviz():
"""Locate Graphviz's executables in the system.
Tries three methods:
First: Windows Registry (Windows only)
This requires Mark Hammond's pywin32 is installed.
Secondly: Search the path
It will look for 'dot', 'twopi' and 'neato' in all the directories
specified in the PATH environment variable.
Thirdly: Default install location (Windows only)
It will look for 'dot', 'twopi' and 'neato' in the default install
location under the "Program Files" directory.
It will return a dictionary containing the program names as keys
and their paths as values.
If this fails, it returns None.
"""
# Method 1 (Windows only)
#
if os.sys.platform == 'win32':
HKEY_LOCAL_MACHINE = 0x80000002
KEY_QUERY_VALUE = 0x0001
RegOpenKeyEx = None
RegQueryValueEx = None
RegCloseKey = None
try:
import win32api, win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegQueryValueEx = win32api.RegQueryValueEx
RegCloseKey = win32api.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
#
pass
try:
import ctypes
def RegOpenKeyEx(key, subkey, opt, sam):
result = ctypes.c_uint(0)
ctypes.windll.advapi32.RegOpenKeyExA(key, subkey, opt, sam, ctypes.byref(result))
return result.value
def RegQueryValueEx( hkey, valuename ):
data_type = ctypes.c_uint(0)
data_len = ctypes.c_uint(1024)
data = ctypes.create_string_buffer( 1024 )
res = ctypes.windll.advapi32.RegQueryValueExA(hkey, valuename, 0,
ctypes.byref(data_type), data, ctypes.byref(data_len))
return data.value
RegCloseKey = ctypes.windll.advapi32.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
#
pass
if RegOpenKeyEx is not None:
# Get the GraphViz install path from the registry
#
hkey = None
potentialKeys = [
"SOFTWARE\\ATT\\Graphviz",
"SOFTWARE\\AT&T Research Labs\\Graphviz",
]
for potentialKey in potentialKeys:
try:
hkey = RegOpenKeyEx( HKEY_LOCAL_MACHINE,
potentialKey, 0, KEY_QUERY_VALUE )
if hkey is not None:
path = RegQueryValueEx( hkey, "InstallPath" )
RegCloseKey( hkey )
# The regitry variable might exist, left by old installations
# but with no value, in those cases we keep searching...
if not path:
continue
# Now append the "bin" subdirectory:
#
path = os.path.join(path, "bin")
progs = __find_executables(path)
if progs is not None :
#print "Used Windows registry"
return progs
except Exception as excp:
#raise excp
pass
else:
break
# Method 2 (Linux, Windows etc)
#
if 'PATH' in os.environ:
for path in os.environ['PATH'].split(os.pathsep):
progs = __find_executables(path)
if progs is not None :
#print "Used path"
return progs
# Method 3 (Windows only)
#
if os.sys.platform == 'win32':
# Try and work out the equivalent of "C:\Program Files" on this
# machine (might be on drive D:, or in a different language)
#
if 'PROGRAMFILES' in os.environ:
# Note, we could also use the win32api to get this
# information, but win32api may not be installed.
path = os.path.join(os.environ['PROGRAMFILES'], 'ATT', 'GraphViz', 'bin')
else:
#Just in case, try the default...
path = r"C:\Program Files\att\Graphviz\bin"
progs = __find_executables(path)
if progs is not None :
#print "Used default install location"
return progs
for path in (
'/usr/bin', '/usr/local/bin',
'/opt/local/bin',
'/opt/bin', '/sw/bin', '/usr/share',
'/Applications/Graphviz.app/Contents/MacOS/' ):
progs = __find_executables(path)
if progs is not None :
#print "Used path"
return progs
# Failed to find GraphViz
#
return None
class Common(object):
"""Common information to several classes.
Should not be directly used, several classes are derived from
this one.
"""
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def __get_attribute__(self, attr):
"""Look for default attributes for this node"""
attr_val = self.obj_dict['attributes'].get(attr, None)
if attr_val is None:
# get the defaults for nodes/edges
default_node_name = self.obj_dict['type']
# The defaults for graphs are set on a node named 'graph'
if default_node_name in ('subgraph', 'digraph', 'cluster'):
default_node_name = 'graph'
g = self.get_parent_graph()
if g is not None:
defaults = g.get_node( default_node_name )
else:
return None
# Multiple defaults could be set by having repeated 'graph [...]'
# 'node [...]', 'edge [...]' statements. In such case, if the
# same attribute is set in different statements, only the first
# will be returned. In order to get all, one would call the
# get_*_defaults() methods and handle those. Or go node by node
# (of the ones specifying defaults) and modify the attributes
# individually.
#
if not isinstance(defaults, (list, tuple)):
defaults = [defaults]
for default in defaults:
attr_val = default.obj_dict['attributes'].get(attr, None)
if attr_val:
return attr_val
else:
return attr_val
return None
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
def get_parent_graph(self):
return self.obj_dict.get('parent_graph', None)
def set(self, name, value):
"""Set an attribute value by name.
Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
set_'name'(value)
which are defined for all the existing attributes.
"""
self.obj_dict['attributes'][name] = value
def get(self, name):
"""Get an attribute value by name.
Given an attribute 'name' it will get its value.
There's always the possibility of using the methods:
get_'name'()
which are defined for all the existing attributes.
"""
return self.obj_dict['attributes'].get(name, None)
def get_attributes(self):
""""""
return self.obj_dict['attributes']
def set_sequence(self, seq):
self.obj_dict['sequence'] = seq
def get_sequence(self):
return self.obj_dict['sequence']
def create_attribute_methods(self, obj_attributes):
#for attr in self.obj_dict['attributes']:
for attr in obj_attributes:
# Generate all the Setter methods.
#
self.__setattr__( 'set_'+attr, lambda x, a=attr : self.obj_dict['attributes'].__setitem__(a, x) )
# Generate all the Getter methods.
#
self.__setattr__('get_'+attr, lambda a=attr : self.__get_attribute__(a))
class Error(Exception):
"""General error handling class.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class InvocationException(Exception):
"""To indicate that a ploblem occurred while running any of the GraphViz executables.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class Node(Common):
"""A graph node.
This class represents a graph's node with all its attributes.
node(name, attribute=value, ...)
name: node's name
All the attributes defined in the Graphviz dot language should
be supported.
"""
def __init__(self, name = '', obj_dict = None, **attrs):
#
# Nodes will take attributes of all other types because the defaults
# for any GraphViz object are dealt with as if they were Node definitions
#
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict[ 'attributes' ] = dict( attrs )
self.obj_dict[ 'type' ] = 'node'
self.obj_dict[ 'parent_graph' ] = None
self.obj_dict[ 'parent_node_list' ] = None
self.obj_dict[ 'sequence' ] = None
# Remove the compass point
#
port = None
if isinstance(name, basestring) and not name.startswith('"'):
idx = name.find(':')
if idx > 0 and idx+1 < len(name):
name, port = name[:idx], name[idx:]
if isinstance(name, (long, int)):
name = str(name)
self.obj_dict['name'] = quote_if_necessary( name )
self.obj_dict['port'] = port
self.create_attribute_methods(NODE_ATTRIBUTES)
def set_name(self, node_name):
"""Set the node's name."""
self.obj_dict['name'] = node_name
def get_name(self):
"""Get the node's name."""
return self.obj_dict['name']
def get_port(self):
"""Get the node's port."""
return self.obj_dict['port']
def add_style(self, style):
styles = self.obj_dict['attributes'].get('style', None)
if not styles and style:
styles = [ style ]
else:
styles = styles.split(',')
styles.append( style )
self.obj_dict['attributes']['style'] = ','.join( styles )
def to_string(self):
"""Returns a string representation of the node in dot language.
"""
# RMF: special case defaults for node, edge and graph properties.
#
node = quote_if_necessary(self.obj_dict['name'])
node_attr = list()
for attr, value in self.obj_dict['attributes'].iteritems():
if value is not None:
node_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) )
else:
node_attr.append( attr )
# No point in having nodes setting any defaults if the don't set
# any attributes...
#
if node in ('graph', 'node', 'edge') and len(node_attr) == 0:
return ''
node_attr = ', '.join(node_attr)
if node_attr:
node += ' [' + node_attr + ']'
return node + ';'
class Edge(Common):
"""A graph edge.
This class represents a graph's edge with all its attributes.
edge(src, dst, attribute=value, ...)
src: source node's name
dst: destination node's name
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_label, set_fontname
or directly by using the instance's special dictionary:
Edge.obj_dict['attributes'][attribute name], i.e.
edge_instance.obj_dict['attributes']['label']
edge_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, src='', dst='', obj_dict=None, **attrs):
if isinstance(src, (list, tuple)) and dst == '':
src, dst = src
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict[ 'attributes' ] = dict( attrs )
self.obj_dict[ 'type' ] = 'edge'
self.obj_dict[ 'parent_graph' ] = None
self.obj_dict[ 'parent_edge_list' ] = None
self.obj_dict[ 'sequence' ] = None
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
points = ( quote_if_necessary( src) , quote_if_necessary( dst) )
self.obj_dict['points'] = points
self.create_attribute_methods(EDGE_ATTRIBUTES)
def get_source(self):
"""Get the edges source node name."""
return self.obj_dict['points'][0]
def get_destination(self):
"""Get the edge's destination node name."""
return self.obj_dict['points'][1]
def __hash__(self):
return hash( hash(self.get_source()) + hash(self.get_destination()) )
def __eq__(self, edge):
"""Compare two edges.
If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A
If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A
"""
if not isinstance(edge, Edge):
raise Error("Can't compare and edge to a non-edge object.")
if self.get_parent_graph().get_top_graph_type() == 'graph':
# If the graph is undirected, the edge has neither
# source nor destination.
#
if ( ( self.get_source() == edge.get_source() and self.get_destination() == edge.get_destination() ) or
( edge.get_source() == self.get_destination() and edge.get_destination() == self.get_source() ) ):
return True
else:
if self.get_source()==edge.get_source() and self.get_destination()==edge.get_destination() :
return True
return False
def parse_node_ref(self, node_str):
if not isinstance(node_str, str):
return node_str
if node_str.startswith('"') and node_str.endswith('"'):
return node_str
node_port_idx = node_str.rfind(':')
if node_port_idx>0 and node_str[0]=='"' and node_str[node_port_idx-1]=='"':
return node_str
if node_port_idx>0:
a = node_str[:node_port_idx]
b = node_str[node_port_idx+1:]
node = quote_if_necessary(a)
node += ':'+quote_if_necessary(b)
return node
return node_str
def to_string(self):
"""Returns a string representation of the edge in dot language.
"""
src = self.parse_node_ref( self.get_source() )
dst = self.parse_node_ref( self.get_destination() )
if isinstance(src, frozendict):
edge = [ Subgraph(obj_dict=src).to_string() ]
elif isinstance(src, (int, long)):
edge = [ str(src) ]
else:
edge = [ src ]
if (self.get_parent_graph() and
self.get_parent_graph().get_top_graph_type() and
self.get_parent_graph().get_top_graph_type() == 'digraph' ):
edge.append( '->' )
else:
edge.append( '--' )
if isinstance(dst, frozendict):
edge.append( Subgraph(obj_dict=dst).to_string() )
elif isinstance(dst, (int, long)):
edge.append( str(dst) )
else:
edge.append( dst )
edge_attr = list()
for attr, value in self.obj_dict['attributes'].iteritems():
if value is not None:
edge_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) )
else:
edge_attr.append( attr )
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append( ' [' + edge_attr + ']' )
return ' '.join(edge) + ';'
class Graph(Common):
"""Class representing a graph in Graphviz's dot language.
This class implements the methods to work on a representation
of a graph in Graphviz's dot language.
graph( graph_name='G', graph_type='digraph',
strict=False, suppress_disconnected=False, attribute=value, ...)
graph_name:
the graph's name
graph_type:
can be 'graph' or 'digraph'
suppress_disconnected:
defaults to False, which will remove from the
graph any disconnected nodes.
simplify:
if True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Graph.obj_dict['attributes'][attribute name], i.e.
graph_instance.obj_dict['attributes']['label']
graph_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, graph_name='G', obj_dict=None, graph_type='digraph', strict=False,
suppress_disconnected=False, simplify=False, **attrs):
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
self.obj_dict['attributes'] = dict(attrs)
if graph_type not in ['graph', 'digraph']:
raise Error('Invalid type "%s". Accepted graph types are: graph, digraph, subgraph' % graph_type)
self.obj_dict['name'] = quote_if_necessary(graph_name)
self.obj_dict['type'] = graph_type
self.obj_dict['strict'] = strict
self.obj_dict['suppress_disconnected'] = suppress_disconnected
self.obj_dict['simplify'] = simplify
self.obj_dict['current_child_sequence'] = 1
self.obj_dict['nodes'] = dict()
self.obj_dict['edges'] = dict()
self.obj_dict['subgraphs'] = dict()
self.set_parent_graph(self)
self.create_attribute_methods(GRAPH_ATTRIBUTES)
def get_graph_type(self):
return self.obj_dict['type']
def get_top_graph_type(self):
parent = self
while True:
parent_ = parent.get_parent_graph()
if parent_ == parent:
break
parent = parent_
return parent.obj_dict['type']
def set_graph_defaults(self, **attrs):
self.add_node( Node('graph', **attrs) )
def get_graph_defaults(self, **attrs):
graph_nodes = self.get_node('graph')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_node_defaults(self, **attrs):
self.add_node( Node('node', **attrs) )
def get_node_defaults(self, **attrs):
graph_nodes = self.get_node('node')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_edge_defaults(self, **attrs):
self.add_node( Node('edge', **attrs) )
def get_edge_defaults(self, **attrs):
graph_nodes = self.get_node('edge')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_simplify(self, simplify):
"""Set whether to simplify or not.
If True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
"""
self.obj_dict['simplify'] = simplify
def get_simplify(self):
"""Get whether to simplify or not.
Refer to set_simplify for more information.
"""
return self.obj_dict['simplify']
def set_type(self, graph_type):
"""Set the graph's type, 'graph' or 'digraph'."""
self.obj_dict['type'] = graph_type
def get_type(self):
"""Get the graph's type, 'graph' or 'digraph'."""
return self.obj_dict['type']
def set_name(self, graph_name):
"""Set the graph's name."""
self.obj_dict['name'] = graph_name
def get_name(self):
"""Get the graph's name."""
return self.obj_dict['name']
def set_strict(self, val):
"""Set graph to 'strict' mode.
This option is only valid for top level graphs.
"""
self.obj_dict['strict'] = val
def get_strict(self, val):
"""Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.
"""
return self.obj_dict['strict']
def set_suppress_disconnected(self, val):
"""Suppress disconnected nodes in the output graph.
This option will skip nodes in the graph with no incoming or outgoing
edges. This option works also for subgraphs and has effect only in the
current graph/subgraph.
"""
self.obj_dict['suppress_disconnected'] = val
def get_suppress_disconnected(self, val):
"""Get if suppress disconnected is set.
Refer to set_suppress_disconnected for more information.
"""
return self.obj_dict['suppress_disconnected']
def get_next_sequence_number(self):
seq = self.obj_dict['current_child_sequence']
self.obj_dict['current_child_sequence'] += 1
return seq
def add_node(self, graph_node):
"""Adds a node object to the graph.
It takes a node object as its only argument and returns
None.
"""
if not isinstance(graph_node, Node):
raise TypeError('add_node() received a non node class object: ' + str(graph_node))
node = self.get_node(graph_node.get_name())
if not node:
self.obj_dict['nodes'][graph_node.get_name()] = [ graph_node.obj_dict ]
#self.node_dict[graph_node.get_name()] = graph_node.attributes
graph_node.set_parent_graph(self.get_parent_graph())
else:
self.obj_dict['nodes'][graph_node.get_name()].append( graph_node.obj_dict )
graph_node.set_sequence(self.get_next_sequence_number())
def del_node(self, name, index=None):
"""Delete a node from the graph.
Given a node's name all node(s) with that same name
will be deleted if 'index' is not specified or set
to None.
If there are several nodes with that same name and
'index' is given, only the node in that position
will be deleted.
'index' should be an integer specifying the position
of the node to delete. If index is larger than the
number of nodes with that name, no action is taken.
If nodes are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(name, Node):
name = name.get_name()
if name in self.obj_dict['nodes']:
if index is not None and index < len(self.obj_dict['nodes'][name]):
del self.obj_dict['nodes'][name][index]
return True
else:
del self.obj_dict['nodes'][name]
return True
return False
def get_node(self, name):
"""Retrieve a node from the graph.
Given a node's name the corresponding Node
instance will be returned.
If one or more nodes exist with that name a list of
Node instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict['nodes']:
match.extend( [ Node( obj_dict = obj_dict ) for obj_dict in self.obj_dict['nodes'][name] ])
return match
def get_nodes(self):
"""Get the list of Node instances."""
return self.get_node_list()
def get_node_list(self):
"""Get the list of Node instances.
This method returns the list of Node instances
composing the graph.
"""
node_objs = list()
for node, obj_dict_list in self.obj_dict['nodes'].iteritems():
node_objs.extend( [ Node( obj_dict = obj_d ) for obj_d in obj_dict_list ] )
return node_objs
def add_edge(self, graph_edge):
"""Adds an edge object to the graph.
It takes a edge object as its only argument and returns
None.
"""
if not isinstance(graph_edge, Edge):
raise TypeError('add_edge() received a non edge class object: ' + str(graph_edge))
edge_points = ( graph_edge.get_source(), graph_edge.get_destination() )
if edge_points in self.obj_dict['edges']:
edge_list = self.obj_dict['edges'][edge_points]
edge_list.append(graph_edge.obj_dict)
else:
self.obj_dict['edges'][edge_points] = [ graph_edge.obj_dict ]
graph_edge.set_sequence( self.get_next_sequence_number() )
graph_edge.set_parent_graph( self.get_parent_graph() )
def del_edge(self, src_or_list, dst=None, index=None):
"""Delete an edge from the graph.
Given an edge's (source, destination) node names all
matching edges(s) will be deleted if 'index' is not
specified or set to None.
If there are several matching edges and 'index' is
given, only the edge in that position will be deleted.
'index' should be an integer specifying the position
of the edge to delete. If index is larger than the
number of matching edges, no action is taken.
If edges are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance( src_or_list, (list, tuple)):
if dst is not None and isinstance(dst, (int, long)):
index = dst
src, dst = src_or_list
else:
src, dst = src_or_list, dst
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
if (src, dst) in self.obj_dict['edges']:
if index is not None and index < len(self.obj_dict['edges'][(src, dst)]):
del self.obj_dict['edges'][(src, dst)][index]
return True
else:
del self.obj_dict['edges'][(src, dst)]
return True
return False
def get_edge(self, src_or_list, dst=None):
"""Retrieved an edge from the graph.
Given an edge's source and destination the corresponding
Edge instance(s) will be returned.
If one or more edges exist with that source and destination
a list of Edge instances is returned.
An empty list is returned otherwise.
"""
if isinstance( src_or_list, (list, tuple)) and dst is None:
edge_points = tuple(src_or_list)
edge_points_reverse = (edge_points[1], edge_points[0])
else:
edge_points = (src_or_list, dst)
edge_points_reverse = (dst, src_or_list)
match = list()
if edge_points in self.obj_dict['edges'] or (
self.get_top_graph_type() == 'graph' and edge_points_reverse in self.obj_dict['edges']):
edges_obj_dict = self.obj_dict['edges'].get(
edge_points,
self.obj_dict['edges'].get( edge_points_reverse, None ))
for edge_obj_dict in edges_obj_dict:
match.append( Edge( edge_points[0], edge_points[1], obj_dict = edge_obj_dict ) )
return match
def get_edges(self):
return self.get_edge_list()
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
"""
edge_objs = list()
for edge, obj_dict_list in self.obj_dict['edges'].iteritems():
edge_objs.extend( [ Edge( obj_dict = obj_d ) for obj_d in obj_dict_list ] )
return edge_objs
def add_subgraph(self, sgraph):
"""Adds an subgraph object to the graph.
It takes a subgraph object as its only argument and returns
None.
"""
if not isinstance(sgraph, Subgraph) and not isinstance(sgraph, Cluster):
raise TypeError('add_subgraph() received a non subgraph class object:' + str(sgraph))
if sgraph.get_name() in self.obj_dict['subgraphs']:
sgraph_list = self.obj_dict['subgraphs'][ sgraph.get_name() ]
sgraph_list.append( sgraph.obj_dict )
else:
self.obj_dict['subgraphs'][ sgraph.get_name() ] = [ sgraph.obj_dict ]
sgraph.set_sequence( self.get_next_sequence_number() )
sgraph.set_parent_graph( self.get_parent_graph() )
def get_subgraph(self, name):
"""Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict['subgraphs']:
sgraphs_obj_dict = self.obj_dict['subgraphs'].get( name )
for obj_dict_list in sgraphs_obj_dict:
#match.extend( Subgraph( obj_dict = obj_d ) for obj_d in obj_dict_list )
match.append( Subgraph( obj_dict = obj_dict_list ) )
return match
def get_subgraphs(self):
return self.get_subgraph_list()
def get_subgraph_list(self):
"""Get the list of Subgraph instances.
This method returns the list of Subgraph instances
in the graph.
"""
sgraph_objs = list()
for sgraph, obj_dict_list in self.obj_dict['subgraphs'].iteritems():
sgraph_objs.extend( [ Subgraph( obj_dict = obj_d ) for obj_d in obj_dict_list ] )
return sgraph_objs
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
for obj_list in self.obj_dict['nodes'].itervalues():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['edges'].itervalues():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['subgraphs'].itervalues():
for obj in obj_list:
Graph(obj_dict=obj).set_parent_graph(parent_graph)
def to_string(self):
"""Returns a string representation of the graph in dot language.
It will return the graph and all its subelements in string from.
"""
graph = list()
if self.obj_dict.get('strict', None) is not None:
if self==self.get_parent_graph() and self.obj_dict['strict']:
graph.append('strict ')
if self.obj_dict['name'] == '':
if 'show_keyword' in self.obj_dict and self.obj_dict['show_keyword']:
graph.append( 'subgraph {\n' )
else:
graph.append( '{\n' )
else:
graph.append( '%s %s {\n' % (self.obj_dict['type'], self.obj_dict['name']) )
for attr in self.obj_dict['attributes'].iterkeys():
if self.obj_dict['attributes'].get(attr, None) is not None:
val = self.obj_dict['attributes'].get(attr)
if val is not None:
graph.append( '%s=%s' % (attr, quote_if_necessary(val)) )
else:
graph.append( attr )
graph.append( ';\n' )
edges_done = set()
edge_obj_dicts = list()
for e in self.obj_dict['edges'].itervalues():
edge_obj_dicts.extend(e)
if edge_obj_dicts:
edge_src_set, edge_dst_set = zip( *[obj['points'] for obj in edge_obj_dicts] )
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for e in self.obj_dict['nodes'].itervalues():
node_obj_dicts.extend(e)
sgraph_obj_dicts = list()
for sg in self.obj_dict['subgraphs'].itervalues():
sgraph_obj_dicts.extend(sg)
obj_list = sorted([ (obj['sequence'], obj) for obj in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts) ])
for idx, obj in obj_list:
if obj['type'] == 'node':
node = Node(obj_dict=obj)
if self.obj_dict.get('suppress_disconnected', False):
if (node.get_name() not in edge_src_set and
node.get_name() not in edge_dst_set):
continue
graph.append( node.to_string()+'\n' )
elif obj['type'] == 'edge':
edge = Edge(obj_dict=obj)
if self.obj_dict.get('simplify', False) and edge in edges_done:
continue
graph.append( edge.to_string() + '\n' )
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append( sgraph.to_string()+'\n' )
graph.append( '}\n' )
return ''.join(graph)
class Subgraph(Graph):
"""Class representing a subgraph in Graphviz's dot language.
This class implements the methods to work on a representation
of a subgraph in Graphviz's dot language.
subgraph(graph_name='subG', suppress_disconnected=False, attribute=value, ...)
graph_name:
the subgraph's name
suppress_disconnected:
defaults to false, which will remove from the
subgraph any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Subgraph.obj_dict['attributes'][attribute name], i.e.
subgraph_instance.obj_dict['attributes']['label']
subgraph_instance.obj_dict['attributes']['fontname']
"""
# RMF: subgraph should have all the attributes of graph so it can be passed
# as a graph to all methods
#
def __init__(self, graph_name='', obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected, simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
class Cluster(Graph):
"""Class representing a cluster in Graphviz's dot language.
This class implements the methods to work on a representation
of a cluster in Graphviz's dot language.
cluster(graph_name='subG', suppress_disconnected=False, attribute=value, ...)
graph_name:
the cluster's name (the string 'cluster' will be always prepended)
suppress_disconnected:
defaults to false, which will remove from the
cluster any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_color, set_fontname
or using the instance's attributes:
Cluster.obj_dict['attributes'][attribute name], i.e.
cluster_instance.obj_dict['attributes']['label']
cluster_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, graph_name='subG', obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected, simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
self.obj_dict['name'] = 'cluster_'+graph_name
self.create_attribute_methods(CLUSTER_ATTRIBUTES)
class Dot(Graph):
"""A container for handling a dot language file.
This class implements methods to write and process
a dot language file. It is a derived class of
the base class 'Graph'.
"""
def __init__(self, *argsl, **argsd):
Graph.__init__(self, *argsl, **argsd)
self.shape_files = list()
self.progs = None
self.formats = ['canon', 'cmap', 'cmapx', 'cmapx_np', 'dia', 'dot',
'fig', 'gd', 'gd2', 'gif', 'hpgl', 'imap', 'imap_np', 'ismap',
'jpe', 'jpeg', 'jpg', 'mif', 'mp', 'pcl', 'pdf', 'pic', 'plain',
'plain-ext', 'png', 'ps', 'ps2', 'svg', 'svgz', 'vml', 'vmlz',
'vrml', 'vtx', 'wbmp', 'xdot', 'xlib' ]
self.prog = 'dot'
# Automatically creates all the methods enabling the creation
# of output in any of the supported formats.
for frmt in self.formats:
self.__setattr__(
'create_'+frmt,
lambda f=frmt, prog=self.prog : self.create(format=f, prog=prog))
f = self.__dict__['create_'+frmt]
f.__doc__ = '''Refer to the docstring accompanying the 'create' method for more information.'''
for frmt in self.formats+['raw']:
self.__setattr__(
'write_'+frmt,
lambda path, f=frmt, prog=self.prog : self.write(path, format=f, prog=prog))
f = self.__dict__['write_'+frmt]
f.__doc__ = '''Refer to the docstring accompanying the 'write' method for more information.'''
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def set_shape_files(self, file_paths):
"""Add the paths of the required image files.
If the graph needs graphic objects to be used as shapes or otherwise
those need to be in the same folder as the graph is going to be rendered
from. Alternatively the absolute path to the files can be specified when
including the graphics in the graph.
The files in the location pointed to by the path(s) specified as arguments
to this method will be copied to the same temporary location where the
graph is going to be rendered.
"""
if isinstance( file_paths, basestring ):
self.shape_files.append( file_paths )
if isinstance( file_paths, (list, tuple) ):
self.shape_files.extend( file_paths )
def set_prog(self, prog):
"""Sets the default program.
Sets the default program in charge of processing
the dot file into a graph.
"""
self.prog = prog
def set_graphviz_executables(self, paths):
"""This method allows to manually specify the location of the GraphViz executables.
The argument to this method should be a dictionary where the keys are as follows:
{'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': ''}
and the values are the paths to the corresponding executable, including the name
of the executable itself.
"""
self.progs = paths
def write(self, path, prog=None, format='raw'):
"""Writes a graph to a file.
Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object and in the format specified by
'format'.
The format 'raw' is used to dump the string representation
of the Dot object, without further processing.
The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'
Returns True or False according to the success of the write
operation.
There's also the preferred possibility of using:
write_'format'(path, prog='program')
which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
"""
if prog is None:
prog = self.prog
dot_fd = file(path, "w+b")
if format == 'raw':
data = self.to_string()
if isinstance(data, basestring):
if not isinstance(data, unicode):
try:
data = unicode(data, 'utf-8')
except:
pass
try:
data = data.encode('utf-8')
except:
pass
dot_fd.write(data)
else:
dot_fd.write(self.create(prog, format))
dot_fd.close()
return True
def create(self, prog=None, format='ps'):
"""Creates and returns a Postscript representation of the graph.
create will write the graph to a temporary dot file and process
it with the program given by 'prog' (which defaults to 'twopi'),
reading the Postscript output and returning it as a string is the
operation is successful.
On failure None is returned.
There's also the preferred possibility of using:
create_'format'(prog='program')
which are automatically defined for all the supported formats.
[create_ps(), create_gif(), create_dia(), ...]
If 'prog' is a list instead of a string the fist item is expected
to be the program name, followed by any optional command-line
arguments for it:
[ 'twopi', '-Tdot', '-s10' ]
"""
if prog is None:
prog = self.prog
if isinstance(prog, (list, tuple)):
prog, args = prog[0], prog[1:]
else:
args = []
if self.progs is None:
self.progs = find_graphviz()
if self.progs is None:
raise InvocationException(
'GraphViz\'s executables not found' )
if prog not in self.progs:
raise InvocationException(
'GraphViz\'s executable "%s" not found' % prog )
if not os.path.exists( self.progs[prog] ) or not os.path.isfile( self.progs[prog] ):
raise InvocationException(
'GraphViz\'s executable "%s" is not a file or doesn\'t exist' % self.progs[prog] )
tmp_fd, tmp_name = tempfile.mkstemp()
os.close(tmp_fd)
self.write(tmp_name)
tmp_dir = os.path.dirname(tmp_name )
# For each of the image files...
#
for img in self.shape_files:
# Get its data
#
f = file(img, 'rb')
f_data = f.read()
f.close()
# And copy it under a file with the same name in the temporary directory
#
f = file( os.path.join( tmp_dir, os.path.basename(img) ), 'wb' )
f.write(f_data)
f.close()
cmdline = [self.progs[prog], '-T'+format, tmp_name] + args
p = subprocess.Popen(
cmdline,
cwd=tmp_dir,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stderr = p.stderr
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
stdout_output = ''.join(stdout_output)
if not stderr.closed:
stderr_output = list()
while True:
data = stderr.read()
if not data:
break
stderr_output.append(data)
stderr.close()
if stderr_output:
stderr_output = ''.join(stderr_output)
#pid, status = os.waitpid(p.pid, 0)
status = p.wait()
if status != 0 :
raise InvocationException(
'Program terminated with status: %d. stderr follows: %s' % (
status, stderr_output) )
elif stderr_output:
print(stderr_output)
# For each of the image files...
#
for img in self.shape_files:
# remove it
#
os.unlink( os.path.join( tmp_dir, os.path.basename(img) ) )
os.unlink(tmp_name)
return stdout_output
########NEW FILE########
__FILENAME__ = specializers
# -*- coding: utf-8 -*-
"""
Specializers for various sorts of data layouts and memory alignments.
These specializers operate on a copy of the simplified array expression
representation (i.e., one with an NDIterate node). This node is replaced
with one or several ForNode nodes in a specialized order.
For auto-tuning code for tile size and OpenMP size, see
https://github.com/markflorisson88/cython/blob/_array_expressions/Cython/Utility/Vector.pyx
"""
from __future__ import print_function, division, absolute_import
import sys
import copy
from functools import reduce
try:
from functools import wraps
except ImportError:
def wraps(wrapped):
def decorator(wrapper):
return wrapper
return decorator
from . import minivisitor
from . import miniutils
from . import minitypes
from . import minierror
from . import codegen
strength_reduction = True
def debug(*args):
sys.stderr.write(" ".join(str(arg) for arg in args) + '\n')
def specialize_ast(ast):
return copy.deepcopy(ast)
class ASTMapper(minivisitor.VisitorTransform):
"""
Base class to map foreign ASTs onto a minivect AST, or vice-versa.
This sets the current node's position in the astbuilder for each
node that is being visited, to make it easy to build new AST nodes
without passing in source position information everywhere.
"""
def __init__(self, context):
super(ASTMapper, self).__init__(context)
self.astbuilder = context.astbuilder
def getpos(self, opaque_node):
return self.context.getpos(opaque_node)
def map_type(self, opaque_node, **kwds):
"Return a mapped type for the foreign node."
return self.context.typemapper.map_type(
self.context.gettype(opaque_node), **kwds)
def visit(self, node, *args):
prev = self.astbuilder.pos
self.astbuilder.pos = node.pos
result = super(ASTMapper, self).visit(node)
self.astbuilder.pos = prev
return result
class BaseSpecializer(ASTMapper):
"""
Base class for specialization. Does not perform any specialization itself.
"""
def getpos(self, node):
return node.pos
def get_type(self, type):
"Resolve the type to the dtype of the array if an array type"
if type.is_array:
return type.dtype
return type
def visit(self, node, *args):
result = super(BaseSpecializer, self).visit(node)
if result is not None:
result.is_specialized = True
return result
def visit_Node(self, node):
# node = copy.copy(node)
self.visitchildren(node)
return node
def init_pending_stats(self, node):
"""
Allow modifications while visiting some descendant of this node
This happens especially while variables are resolved, which
calls compute_inner_dim_pointer()
"""
b = self.astbuilder
if not node.is_function:
node.prepending = b.stats()
node.appending = b.stats()
def handle_pending_stats(self, node):
"""
Handle any pending statements that need to be inserted further
up in the AST.
"""
b = self.astbuilder
# self.visitchildren(node.prepending)
# self.visitchildren(node.appending)
if node.is_function:
# prepending is a StatListNode already part of the function body
# assert node.prepending in list(self.treepath(node, '//StatListNode'))
node.body = b.stats(node.body, node.appending)
else:
node.body = b.stats(node.prepending, node.body, node.appending)
if not self.context.use_llvm:
node.body = self.fuse_omp_stats(node.body)
def get_loop(self, loop_level):
if loop_level:
return self.function.for_loops[self.loop_level - 1]
return self.function
def fuse_omp_stats(self, node):
"""
Fuse consecutive OpenMPConditionalNodes.
"""
from . import miniast
if not node.stats:
return node
b = self.astbuilder
stats = [node.stats[0]]
for next_stat in node.stats[1:]:
stat = stats[-1]
c1 = isinstance(stat, miniast.OpenMPConditionalNode)
c2 = isinstance(next_stat, miniast.OpenMPConditionalNode)
if c1 and c2:
if_body = None
else_body = None
if stat.if_body or next_stat.if_body:
if_body = b.stats(stat.if_body, next_stat.if_body)
if stat.else_body or next_stat.else_body:
else_body = b.stats(stat.else_body, next_stat.else_body)
stats[-1] = b.omp_if(if_body, else_body)
else:
stats.append(next_stat)
node.stats[:] = stats
return node
#
### Stubs for cooperative multiple inheritance
#
def visit_NDIterate(self, node):
# Do not visit children
return node
visit_AssignmentExpr = visit_Node
visit_ErrorHandler = visit_Node
visit_BinopNode = visit_Node
visit_UnopNode = visit_Node
visit_IfNode = visit_Node
class Specializer(BaseSpecializer):
"""
Base class for most specializers, provides some basic functionality
for subclasses. Implement visit_* methods to specialize nodes
to some pattern.
Implements implementations to handle errors and cleanups, adds a return
statement to the function and can insert debug print statements if
context.debug is set to a true value.
"""
is_contig_specializer = False
is_tiled_specializer = False
is_vectorizing_specializer = False
is_inner_contig_specializer = False
is_strided_specializer = False
vectorized_equivalents = None
def __init__(self, context, specialization_name=None):
super(Specializer, self).__init__(context)
if specialization_name is not None:
self.specialization_name = specialization_name
self.variables = {}
def _index_list(self, pointer, ndim):
"Return a list of indexed pointers"
return [self.astbuilder.index(pointer, self.astbuilder.constant(i))
for i in range(ndim)]
def _debug_function_call(self, b, node):
"""
Generate debug print statements when the specialized function is
called.
"""
stats = [
b.print_(b.constant(
"Calling function %s (%s specializer)" % (
node.mangled_name, self.specialization_name)))
]
if self.is_vectorizing_specializer:
stats.append(
b.print_(b.constant("Vectorized version size=%d" %
self.vector_size)))
stats.append(
b.print_(b.constant("shape:"), *self._index_list(node.shape,
node.ndim)))
if self.is_tiled_specializer:
stats.append(b.print_(b.constant("blocksize:"), self.get_blocksize()))
if not self.is_contig_specializer:
for idx, arg in enumerate(node.arguments):
if arg.is_array_funcarg:
stats.append(b.print_(b.constant("strides operand%d:" % idx),
*self._index_list(arg.strides_pointer,
arg.type.ndim)))
stats.append(b.print_(b.constant("data pointer %d:" % idx),
arg.data_pointer))
node.prepending.stats.append(b.stats(*stats))
def visit_FunctionNode(self, node):
"""
Handle a FunctionNode. Sets node.total_shape to the product of the
shape, wraps the function's body in a
:py:class:`minivect.miniast.ErrorHandler` if needed and adds a
return statement.
"""
b = self.astbuilder
self.compute_total_shape(node)
node.mangled_name = self.context.mangle_function_name(node.name)
# set this so bad people can specialize during code generation time
node.specializer = self
node.specialization_name = self.specialization_name
self.function = node
if self.context.debug:
self._debug_function_call(b, node)
if node.body.may_error(self.context):
node.body = b.error_handler(node.body)
node.body = b.stats(node.body, b.return_(node.success_value))
self.visitchildren(node)
# if not self.is_contig_specializer:
# self.compute_temp_strides(b, node)
return node
def visit_ForNode(self, node):
if node.body.may_error(self.context):
node.body = self.astbuilder.error_handler(node.body)
self.visitchildren(node)
return node
def visit_Variable(self, node):
if node.name not in self.variables:
self.variables[node.name] = node
return self.visit_Node(node)
def get_data_pointer(self, variable, loop_level):
return self.function.args[variable.name].data_pointer
def omp_for(self, node):
"""
Insert an OpenMP for loop with an 'if' clause that checks to see
whether the total data size exceeds the given OpenMP auto-tuned size.
The caller needs to adjust the size, set in the FunctionNode's
'omp_size' attribute, depending on the number of computations.
"""
if_clause = self.astbuilder.binop(minitypes.bool_, '>',
self.function.total_shape,
self.function.omp_size)
return self.astbuilder.omp_for(node, if_clause)
class FinalSpecializer(BaseSpecializer):
"""
Perform any final specialization and optimizations. The initial specializer
is concerned with specializing for the given data layouts, whereas this
specializer is concerned with any rewriting of the AST to support
fundamental operations.
"""
vectorized_equivalents = None
in_lhs_expr = False
should_vectorize = False
def __init__(self, context, previous_specializer):
super(FinalSpecializer, self).__init__(context)
self.previous_specializer = previous_specializer
self.sp = previous_specializer
self.error_handlers = []
self.loop_level = 0
self.variables = {}
self.strides = {}
self.outer_pointers = {}
self.vector_temps = {}
def run_optimizations(self, node):
"""
Run any optimizations on the AST. Currently only loop-invariant code
motion is implemented when broadcasting information is present.
"""
from . import optimize
# TODO: support vectorized specializations
if (self.context.optimize_broadcasting and not
self.sp.is_contig_specializer or
self.sp.is_vectorizing_specializer):
optimizer = optimize.HoistBroadcastingExpressions(self.context)
node = optimizer.visit(node)
return node
def visit_Variable(self, node):
"""
Process variables, which includes arrays and scalars. For arrays,
this means retrieving the element from the array. Performs strength
reduction for index calculation of array variables.
"""
if node.type.is_array:
tiled = self.sp.is_tiled_specializer
last_loop_level = (self.loop_level == self.function.ndim or
(self.sp.is_vectorizing_specializer and not
self.should_vectorize))
inner_contig = (
self.sp.is_inner_contig_specializer and
(last_loop_level or node.hoisted) and
(not self.sp.is_strided_specializer or
self.sp.matching_contiguity(node.type)))
contig = self.sp.is_contig_specializer
# Get the array data pointer
arg_data_pointer = self.function.args[node.name].data_pointer
if self.sp.is_contig_specializer:
# Contiguous, no strength reduction needed
data_pointer = arg_data_pointer
else:
# Compute strength reduction pointers for all dimensions leading
# up the the dimension this variable occurs in.
self.compute_temp_strides(node, inner_contig, tiled=tiled)
data_pointer = self.compute_data_pointer(
node, arg_data_pointer, inner_contig, tiled)
# Get the loop level corresponding to the occurrence of the variable
for_node = self.function.for_loops[self.loop_level - 1]
if self.should_vectorize:
return self.handle_vector_variable(node, data_pointer, for_node,
inner_contig, contig)
else:
element = self.element_location(data_pointer, for_node,
inner_contig, contig,
tiled=tiled, variable=node)
return self.astbuilder.resolved_variable(
node.name, node.type, element)
else:
return node
def visit_VectorVariable(self, vector_variable):
# use visit_Variable, since is does the strength reduction and such
return self.visit_Variable(vector_variable.variable)
def element_location(self, data_pointer, for_node,
inner_contig, is_contig, tiled, variable):
"Return the element in the array for the current index set"
b = self.astbuilder
def debug(item):
if self.context.debug_elements:
string = b.constant("Referenced element from %s:" %
variable.name)
print_ = self.visit(b.print_(string, item))
for_node = self.function.for_loops[self.loop_level - 1]
for_node.prepending.stats.append(print_)
if not is_contig:
stats = []
for i, stride in enumerate(self.strides[variable]):
if stride is not None:
string = b.constant("%s step[%d]:" % (variable.name, i))
stats.append(b.print_(string, stride))
print_steps = b.stats(*stats)
self.function.prepending.stats.append(self.visit(print_steps))
return item
if inner_contig or is_contig:
# contiguous access, index the data pointer in the inner dimension
return debug(b.index(data_pointer, for_node.index))
else:
# strided access, this dimension is performing strength reduction,
# so we just need to dereference the data pointer
return debug(b.dereference(data_pointer))
def handle_vector_variable(self, variable, data_pointer, for_node,
inner_contig, is_contig):
"Same as `element_location`, except for Vector variables"
b = self.astbuilder
# For array operands, load reads into registers, and store
# writes back into the data pointer. For assignment to a register
# we use a vector type, for assignment to a data pointer, the
# data pointer type
if inner_contig or is_contig:
data_pointer = b.add(data_pointer, for_node.index)
if self.in_lhs_expr:
return data_pointer
else:
variable = b.vector_variable(variable, self.sp.vector_size)
if variable in self.vector_temps:
return self.vector_temps[variable]
rhs = b.vector_load(data_pointer, self.sp.vector_size)
temp = b.temp(variable.type, 'xmm')
self.vector_temps[variable] = temp
for_node.prepending.stats.append(b.assign(temp, rhs))
return self.visit(temp)
def compute_temp_strides(self, variable, handle_inner_dim, tiled=False):
"""
Compute the temporary strides needed for the strength reduction. These
should be small constants, so division should be fast. We could use
char * instead of element_type *, but it's nicer to avoid the casts.
"""
b = self.astbuilder
if variable in self.strides:
return self.strides[variable]
start = 0
stop = variable.type.ndim
if handle_inner_dim:
if self.sp.order == "F":
start = 1
else:
stop = stop - 1
self.strides[variable] = strides = [None] * len(self.function.for_loops)
for dim in range(start, stop):
stride = b.stride(variable, dim)
temp_stride = b.temp(stride.type.unqualify("const"),
name="%s_stride%d" % (variable.name, dim))
stat = b.assign(temp_stride,
b.div(stride, b.sizeof(variable.type.dtype)))
self.function.prepending.stats.append(stat)
strides[dim] = temp_stride
return strides
def compute_data_pointer(self, variable, argument_data_pointer,
handle_inner_dim, tiled):
"""
Compute the data pointer for the dimension the variable is located in
(the loop level). This involves generating a strength reduction in
each outer dimension.
Variables referring to the same array may be found on different
loop levels.
"""
b = self.astbuilder
assert variable.type.is_array
pointer_type = argument_data_pointer.type.unqualify("const")
loop_level = self.loop_level
offset = self.function.ndim - variable.type.ndim
stop = loop_level - handle_inner_dim
if self.outer_pointers.get(variable):
start = len(self.outer_pointers[variable])
if stop <= start:
return self.outer_pointers[variable][stop - 1]
else:
self.outer_pointers[variable] = []
start = max(offset - 1, 0)
outer_pointers = self.outer_pointers[variable]
temp = argument_data_pointer
for_loops = self.function.for_loops[start:stop]
def generate_temp():
# Generate: temp_data_pointer = outer_data_pointer
temp = b.temp(pointer_type)
assmt = b.assign(temp, outer_pointer)
outer_node.prepending.stats.append(assmt)
return temp
# Loop over all outer loop levels
for i, for_node in zip(range(start, stop), for_loops):
# Allocate a temp_data_pointer on each outer loop level
if not outer_pointers:
outer_pointer = self.function.args[variable.name].data_pointer
else:
outer_pointer = outer_pointers[-1]
if i == 0:
outer_node = self.function
else:
outer_node = self.function.for_loops[i - 1]
temp = generate_temp()
if for_node.dim < offset:
# No stride addition needed
continue
dim = for_node.dim - offset
stride = original_stride = self.strides[variable][dim]
assert stride is not None, ('strides', self.strides[variable],
'dim', dim, 'start', start,
'stop', stop, 'offset', offset,
'specializer', self.sp)
if for_node.is_controlling_loop:
# controlling loop for tiled specializations, multiply by the
# tiling blocksize for this dimension
stride = b.mul(stride, for_node.blocksize)
# Generate: temp_data_pointer += stride
stat = b.assign(temp, b.add(temp, stride))
if not outer_pointers:
# Outermost loop level, generate some additional OpenMP
# parallel-loop-compatible code
# Generate: temp_data_pointer = data_pointer + i * stride0
omp_body = b.assign(temp, b.add(outer_pointer,
b.mul(original_stride, for_node.index)))
for_node.prepending.stats.append(b.omp_if(omp_body))
for_node.appending.stats.append(b.omp_if(None, stat))
omp_for = self.treepath_first(self.function, '//OpenMPLoopNode')
if omp_for is not None:
omp_for.privates.append(temp)
else:
for_node.appending.stats.append(stat)
self.outer_pointers[variable].append(temp)
return temp
def visit_FunctionNode(self, node):
self.function = node
self.indices = self.sp.indices
node = self.run_optimizations(node)
self.init_pending_stats(node)
self.visitchildren(node)
self.handle_pending_stats(node)
return node
def _visit_set_vectorizing_flag(self, node):
was_vectorizing = self.should_vectorize
self.should_vectorize = node.should_vectorize
self.visitchildren(node)
self.should_vectorize = was_vectorizing
return node
def visit_ForNode(self, node):
is_nd_fornode = node in self.function.for_loops or node.is_fixup
self.loop_level += is_nd_fornode
self.init_pending_stats(node)
self._visit_set_vectorizing_flag(node)
self.handle_pending_stats(node)
self.loop_level -= is_nd_fornode
return node
def visit_IfNode(self, node):
self.loop_level += node.is_fixup
result = self._visit_set_vectorizing_flag(node)
self.loop_level -= node.is_fixup
return result
def visit_AssignmentExpr(self, node):
# assignment expressions should not be nested
self.in_lhs_expr = True
node.lhs = self.visit(node.lhs)
self.in_lhs_expr = False
node.rhs = self.visit(node.rhs)
if node.lhs.type.is_pointer and node.rhs.type.is_vector:
# This expression must be a statement
return self.astbuilder.vector_store(node.lhs, node.rhs)
return node
def visit_TempNode(self, node):
self.visitchildren(node)
return node
def visit_BinopNode(self, node):
type = self.get_type(node.type)
if node.operator == '%' and type.is_float and not self.context.use_llvm:
# rewrite modulo for floats to fmod()
b = self.astbuilder
functype = minitypes.FunctionType(return_type=type,
args=[type, type])
if type.itemsize == 4:
modifier = "f"
elif type.itemsize == 8:
modifier = ""
else:
modifier = "l"
fmod = b.variable(functype, "fmod%s" % modifier)
return self.visit(b.funccall(fmod, [node.lhs, node.rhs]))
self.visitchildren(node)
return node
def visit_UnopNode(self, node):
if node.type.is_vector and node.operator == '-':
# rewrite unary subtract
type = node.operand.type
if type.is_float:
constant = 0.0
else:
constant = 0
lhs = self.astbuilder.vector_const(type, constant)
node = self.astbuilder.binop(type, '-', lhs, node.operand)
return self.visit(node)
self.visitchildren(node)
return node
def visit_DereferenceNode(self, node):
node.operand = self.visit(node.operand)
if self.context.llvm:
node = self.astbuilder.index(node, self.astbuilder.constant(0))
return node
def visit_IfElseExprNode(self, node):
self.visitchildren(node)
if self.context.use_llvm:
# Rewrite 'cond ? x : y' expressions to if/else statements
b = self.astbuilder
temp = b.temp(node.lhs.type, name='if_temp')
stat = b.if_else(node.cond, b.assign(temp, node.lhs),
b.assign(temp, node.rhs))
for_node = self.get_loop(self.loop_level)
for_node.prepending.stats.append(stat)
node = temp
return node
def visit_PrintNode(self, node):
b = self.astbuilder
printf_type = minitypes.FunctionType(
return_type=minitypes.int_,
args=[minitypes.CStringType()],
is_vararg=True)
printf = b.funcname(printf_type, 'printf')
args = []
specifiers = []
for i, arg in enumerate(node.args):
specifier, arg = codegen.format_specifier(arg, b)
args.append(arg)
specifiers.append(specifier)
args.insert(0, b.constant(" ".join(specifiers) + "\n"))
return b.expr_stat(b.funccall(printf, args))
def visit_PositionInfoNode(self, node):
"""
Replace with the setting of positional source information in case
of an error.
"""
b = self.astbuidler
posinfo = self.function.posinfo
if posinfo:
pos = node.posinfo
return b.stats(
b.assign(b.deref(posinfo.filename), b.constant(pos.filename)),
b.assign(b.deref(posinfo.lineno), b.constant(pos.lineno)),
b.assign(b.deref(posinfo.column), b.constant(pos.column)))
def visit_RaiseNode(self, node):
"""
Generate a call to PyErr_Format() to set an exception.
"""
from .minitypes import FunctionType, object_
b = self.astbuilder
args = [object_] * (2 + len(node.fmt_args))
functype = FunctionType(return_type=object_, args=args)
return b.expr_stat(
b.funccall(b.funcname(functype, "PyErr_Format"),
[node.exc_var, node.msg_val] + node.fmt_args))
def visit_ErrorHandler(self, node):
"""
See miniast.ErrorHandler for an explanation of what this needs to do.
"""
b = self.astbuilder
node.error_variable = b.temp(minitypes.bool_)
node.error_var_init = b.assign(node.error_variable, 0)
node.cleanup_jump = b.jump(node.cleanup_label)
node.error_target_label = b.jump_target(node.error_label)
node.cleanup_target_label = b.jump_target(node.cleanup_label)
node.error_set = b.assign(node.error_variable, 1)
if self.error_handlers:
cascade_code = b.jump(self.error_handlers[-1].error_label)
else:
cascade_code = b.return_(self.function.error_value)
node.cascade = b.if_(node.error_variable, cascade_code)
self.error_handlers.append(node)
self.visitchildren(node)
self.error_handlers.pop()
return node
def visit_PragmaForLoopNode(self, node):
if self.previous_specializer.is_vectorizing_specializer:
return self.visit(node.for_node)
else:
self.visitchildren(node)
return node
def visit_StatListNode(self, node):
self.visitchildren(node)
return self.fuse_omp_stats(node)
class OrderedSpecializer(Specializer):
"""
Specializer that understands C and Fortran data layout orders.
"""
vectorized_equivalents = None
def compute_total_shape(self, node):
"""
Compute the product of the shape (entire length of array output).
Sets the total shape as attribute of the function (total_shape).
"""
b = self.astbuilder
# compute the product of the shape and insert it into the function body
extents = [b.index(node.shape, b.constant(i))
for i in range(node.ndim)]
node.total_shape = b.temp(node.shape.type.base_type)
init_shape = b.assign(node.total_shape, reduce(b.mul, extents),
may_reorder=True)
node.body = b.stats(init_shape, node.body)
return node.total_shape
def loop_order(self, order, ndim=None):
"""
Returns arguments to (x)range() to process something in C or Fortran
order.
"""
if ndim is None:
ndim = self.function.ndim
if order == "C":
return self.c_loop_order(ndim)
else:
return self.f_loop_order(ndim)
def c_loop_order(self, ndim):
return ndim - 1, -1, -1
def f_loop_order(self, ndim):
return 0, ndim, 1
def order_indices(self, indices):
"""
Put the indices of the for loops in the right iteration order. The
loops were build backwards (Fortran order), so for C we need to
reverse them.
Note: the indices are always ordered on the dimension they index
"""
if self.order == "C":
indices.reverse()
def ordered_loop(self, node, result_indices, lower=None, upper=None,
step=None, loop_order=None):
"""
Return a ForNode ordered in C or Fortran order.
"""
b = self.astbuilder
if lower is None:
lower = lambda i: None
if upper is None:
upper = lambda i: b.shape_index(i, self.function)
if loop_order is None:
loop_order = self.loop_order(self.order)
indices = []
for_loops = []
for i in range(*loop_order):
node = b.for_range_upwards(node, lower=lower(i), upper=upper(i),
step=step)
node.dim = i
for_loops.append(node)
indices.append(node.target)
self.order_indices(indices)
result_indices.extend(indices)
return for_loops[::-1], node
def _index_pointer(self, pointer, indices, strides):
"""
Return an element for an N-dimensional index into a strided array.
"""
b = self.astbuilder
return b.index_multiple(
b.cast(pointer, minitypes.char.pointer()),
[b.mul(index, stride) for index, stride in zip(indices, strides)],
dest_pointer_type=pointer.type)
def _strided_element_location(self, node, indices=None, strides_index_offset=0,
ndim=None, pointer=None):
"""
Like _index_pointer, but given only an array operand indices. It first
needs to get the data pointer and stride nodes.
"""
indices = indices or self.indices
b = self.astbuilder
if ndim is None:
ndim = node.type.ndim
if pointer is None:
pointer = b.data_pointer(node)
indices = [index for index in indices[len(indices) - ndim:]]
strides = [b.stride(node, i + strides_index_offset)
for i, idx in enumerate(indices)]
node = self._index_pointer(pointer, indices, strides)
self.visitchildren(node)
return node
def get_any_array_argument(arguments):
for arg in arguments:
if arg.type is not None and arg.type.is_array:
return arg
class CanVectorizeVisitor(minivisitor.TreeVisitor):
"""
Determines whether we can vectorize a given expression. Currently only
support arithmetic on floats and doubles.
"""
can_vectorize = True
def _valid_type(self, type):
if type.is_array:
type = type.dtype
return type.is_float and type.itemsize in (4, 8)
def visit_FunctionNode(self, node):
array_dtypes = [
arg.type.dtype for arg in node.arguments[1:]
if arg.type is not None and arg.type.is_array]
all_the_same = miniutils.all(
dtype == array_dtypes[0] for dtype in array_dtypes)
self.can_vectorize = all_the_same and self._valid_type(array_dtypes[0])
if self.can_vectorize:
self.visitchildren(node)
def visit_BinopNode(self, node):
if node.lhs.type != node.rhs.type or not self._valid_type(node.lhs.type):
self.can_vectorize = False
else:
self.visitchildren(node)
def visit_UnopNode(self, node):
if self._valid_type(node.type):
self.visitchildren(node)
else:
self.can_vectorize = False
def visit_FuncCallNode(self, node):
self.can_vectorize = False
def visit_NodeWrapper(self, node):
# TODO: dispatch to self.context.can_vectorize
self.can_vectorize = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_if_should_vectorize(func):
"""
Visits the given method if we are vectorizing, otherwise visit the
superclass' method of :py:class:`VectorizingSpecialization`
"""
@wraps(func)
def wrapper(self, node):
if self.should_vectorize:
return func(self, node)
else:
method = getattr(super(VectorizingSpecializer, self), func.__name__)
return method(node)
return wrapper
class VectorizingSpecializer(Specializer):
"""
Generate explicitly vectorized code if supported.
:param vector_size: number of 32-bit operands in the vector
"""
is_vectorizing_specializer = True
can_vectorize_visitor = CanVectorizeVisitor
vectorized_equivalents = None
# set in subclasses
vector_size = None
def __init__(self, context, specialization_name=None):
super(VectorizingSpecializer, self).__init__(context,
specialization_name)
# temporary registers
self.temps = {}
# Flag to vectorize expressions in a vectorized loop
self.should_vectorize = True
@classmethod
def can_vectorize(cls, context, ast):
visitor = cls.can_vectorize_visitor(context)
visitor.visit(ast)
# print visitor.can_vectorize, ast.pos
return visitor.can_vectorize
@visit_if_should_vectorize
def visit_FunctionNode(self, node):
self.dtype = get_any_array_argument(node.arguments).type.dtype
return super(VectorizingSpecializer, self).visit_FunctionNode(node)
@visit_if_should_vectorize
def visit_Variable(self, variable):
if variable.type.is_array:
variable = self.astbuilder.vector_variable(variable, self.vector_size)
return variable
@visit_if_should_vectorize
def visit_BinopNode(self, node):
self.visitchildren(node)
if node.lhs.type.is_vector:
# TODO: promotion
node = self.astbuilder.vector_binop(node.operator,
node.lhs, node.rhs)
return node
@visit_if_should_vectorize
def visit_UnopNode(self, node):
self.visitchildren(node)
if node.operand.type.is_vector:
if node.operator == '+':
node = node.operand
else:
assert node.operator == '~'
raise NotImplementedError
node = self.astbuilder.vector_unop(node.type, node.operator,
self.visit(node.operand))
return node
@visit_if_should_vectorize
def visit_ForNode(self, node):
node.should_vectorize = True
self.visitchildren(node)
return node
@visit_if_should_vectorize
def visit_IfNode(self, node):
node.should_vectorize = True
self.visitchildren(node)
return node
def _modify_inner_loop(self, b, elements_per_vector, node, step):
"""
Turn 'for (i = 0; i < N; i++)' into 'for (i = 0; i < N - 3; i += 4)'
for a vector size of 4. In case the data size is not a multiple of
4, we can only SIMDize that part, and need a fixup loop for any
remaining elements. Returns the upper limit and the counter (N and i).
"""
i = node.step.lhs
N = node.condition.rhs
# Adjust step
step = b.mul(step, b.constant(elements_per_vector))
node.step = b.assign_expr(i, b.add(i, step))
# Adjust condition
vsize_minus_one = b.constant(elements_per_vector - 1)
node.condition.rhs = b.sub(N, vsize_minus_one)
return N, i
def fixup_loop(self, i, N, body, elements_per_vector):
"""
Generate a loop to fix up any remaining elements that didn't fit into
our SIMD vectors.
"""
b = self.astbuilder
cond = b.binop(minitypes.bool_, '<', i, N)
if elements_per_vector - 1 == 1:
fixup_loop = b.if_(cond, body)
else:
# fixup_loop = b.for_range_upwards(body, lower=i, upper=N)
init = b.noop_expr()
step = b.assign_expr(i, b.add(i, b.constant(1)))
fixup_loop = b.for_(body, init, cond, step, index=i)
fixup_loop.is_fixup = True
self.should_vectorize = False
fixup_loop = self.visit(fixup_loop)
self.should_vectorize = True
return fixup_loop
def process_inner_forloop(self, node, original_expression, step=None):
"""
Process an inner loop, adjusting the step accordingly and injecting
any temporary assignments where necessary. Returns the fixup loop,
needed when the data size is not a multiple of the vector size.
:param original_expression: original, unmodified, array expression (
the body of the NDIterate node)
"""
b = self.astbuilder
if step is None:
step = b.constant(1)
elements_per_vector = self.vector_size * 4 / self.dtype.itemsize
N, i = self._modify_inner_loop(b, elements_per_vector, node, step)
return self.fixup_loop(i, N, original_expression, elements_per_vector)
class StridedCInnerContigSpecializer(OrderedSpecializer):
"""
Specialize on the first or last dimension being contiguous (depending
on the 'order' attribute).
"""
specialization_name = "inner_contig"
order = "C"
is_inner_contig_specializer = True
vectorized_equivalents = None
def __init__(self, context, specialization_name=None):
super(StridedCInnerContigSpecializer, self).__init__(
context, specialization_name)
self.indices = []
def _generate_inner_loop(self, b, node):
"""
Generate innermost loop, injecting the pointer assignments in the
right place
"""
loop = node
if len(self.indices) > 1:
for index in self.indices[:-2]:
loop = node.body
self.inner_loop = loop.body
loop.body = b.pragma_for(self.inner_loop)
node = self.omp_for(node)
else:
self.inner_loop = loop
node = self.omp_for(b.pragma_for(self.inner_loop))
return loop, node
def _vectorize_inner_loop(self, b, loop, node, original_expr):
"Vectorize the inner loop and insert the fixup loop"
if self.is_vectorizing_specializer:
fixup_loop = self.process_inner_forloop(self.inner_loop,
original_expr)
if len(self.indices) > 1:
loop.body = b.stats(loop.body, fixup_loop)
else:
node = b.stats(node, fixup_loop)
return node
def visit_NDIterate(self, node):
"""
Replace this node with ordered loops and a direct index into a
temporary data pointer in the contiguous dimension.
"""
b = self.astbuilder
assert not list(self.treepath(node, '//NDIterate'))
original_expr = specialize_ast(node.body)
# start by generating a C or Fortran ordered loop
self.function.for_loops, node = self.ordered_loop(node.body,
self.indices)
loop, node = self._generate_inner_loop(b, node)
result = self.visit(node)
node = self._vectorize_inner_loop(b, loop, node, original_expr)
return result
def index(self, loop_level):
if self.order == 'C':
return self.indices[loop_level]
else:
return self.indices[-loop_level]
def strided_indices(self):
"Return the list of strided indices for this order"
return self.indices[:-1]
def contig_index(self):
"The contiguous index"
return self.indices[-1]
def get_data_pointer(self, variable, loop_level):
return self.compute_inner_dim_pointer(variable, loop_level)
class StridedFortranInnerContigSpecializer(StridedCInnerContigSpecializer):
"""
Specialize on the first dimension being contiguous.
"""
order = "F"
specialization_name = "inner_contig_fortran"
vectorized_equivalents = None
def strided_indices(self):
return self.indices[1:]
def contig_index(self):
return self.indices[0]
class StrengthReducingStridedSpecializer(StridedCInnerContigSpecializer):
"""
Specialize on strided operands. If some operands are contiguous in the
dimension compatible with the order we are specializing for (the first
if Fortran, the last if C), then perform a direct index into a temporary
date pointer. For strided operands, perform strength reduction in the
inner dimension by adding the stride to the data pointer in each iteration.
"""
specialization_name = "strided"
order = "C"
is_strided_specializer = True
vectorized_equivalents = None
def matching_contiguity(self, type):
"""
Check whether the array operand for the given type can be directly
indexed.
"""
return ((type.is_c_contig and self.order == "C") or
(type.is_f_contig and self.order == "F"))
def visit_NDIterate(self, node):
b = self.astbuilder
outer_loop = super(StridedSpecializer, self).visit_NDIterate(node)
# outer_loop = self.strength_reduce_inner_dimension(outer_loop,
# self.inner_loop)
return outer_loop
def strength_reduce_inner_dimension(self, outer_loop, inner_loop):
"""
Reduce the strength of strided array operands in the inner dimension,
by adding the stride to the temporary pointer.
"""
b = self.astbuilder
outer_stats = []
stats = []
for arg in self.function.arguments:
type = arg.variable.type
if type is None:
continue
contig = self.matching_contiguity(type)
if arg.variable in self.pointers and not contig:
p = self.pointers[arg.variable]
if self.order == "C":
inner_dim = type.ndim - 1
else:
inner_dim = 0
# Implement: temp_stride = strides[inner_dim] / sizeof(dtype)
stride = b.stride(arg.variable, inner_dim)
temp_stride = b.temp(stride.type.qualify("const"),
name="temp_stride")
outer_stats.append(
b.assign(temp_stride, b.div(stride, b.sizeof(type.dtype))))
# Implement: temp_pointer += temp_stride
stats.append(b.assign(p, b.add(p, temp_stride)))
inner_loop.body = b.stats(inner_loop.body, *stats)
outer_stats.append(outer_loop)
return b.stats(*outer_stats)
class StrengthReducingStridedFortranSpecializer(
StridedFortranInnerContigSpecializer, StrengthReducingStridedSpecializer):
"""
Specialize on Fortran order for strided operands and apply strength
reduction in the inner dimension.
"""
specialization_name = "strided_fortran"
order = "F"
vectorized_equivalents = None
class StridedSpecializer(StridedCInnerContigSpecializer):
"""
Specialize on strided operands. If some operands are contiguous in the
dimension compatible with the order we are specializing for (the first
if Fortran, the last if C), then perform a direct index into a temporary
date pointer.
"""
specialization_name = "strided"
order = "C"
vectorized_equivalents = None
is_strided_specializer = True
def matching_contiguity(self, type):
"""
Check whether the array operand for the given type can be directly
indexed.
"""
return ((type.is_c_contig and self.order == "C") or
(type.is_f_contig and self.order == "F"))
def _element_location(self, variable, loop_level):
"""
Generate a strided or directly indexed load of a single element.
"""
#if variable in self.pointers:
if self.matching_contiguity(variable.type):
return super(StridedSpecializer, self)._element_location(variable,
loop_level)
b = self.astbuilder
pointer = self.get_data_pointer(variable, loop_level)
indices = [self.contig_index()]
if self.order == "C":
inner_dim = variable.type.ndim - 1
else:
inner_dim = 0
strides = [b.stride(variable, inner_dim)]
return self._index_pointer(pointer, indices, strides)
class StridedFortranSpecializer(StridedFortranInnerContigSpecializer,
StridedSpecializer):
"""
Specialize on Fortran order for strided operands.
"""
specialization_name = "strided_fortran"
order = "F"
vectorized_equivalents = None
if strength_reduction:
StridedSpecializer = StrengthReducingStridedSpecializer
StridedFortranSpecializer = StrengthReducingStridedFortranSpecializer
class ContigSpecializer(OrderedSpecializer):
"""
Specialize on all specializations being contiguous (all F or all C).
"""
specialization_name = "contig"
is_contig_specializer = True
def visit_FunctionNode(self, node):
node = super(ContigSpecializer, self).visit_FunctionNode(node)
self.astbuilder.create_function_type(node, strides_args=False)
return node
def visit_NDIterate(self, node):
"""
Generate a single ForNode over the total data size.
"""
b = self.astbuilder
original_expr = specialize_ast(node.body)
node = super(ContigSpecializer, self).visit_NDIterate(node)
for_node = b.for_range_upwards(node.body,
upper=self.function.total_shape)
self.function.for_loops = [for_node]
self.indices = [for_node.index]
node = self.omp_for(b.pragma_for(for_node))
self.target = for_node.target
node = self.visit(node)
if self.is_vectorizing_specializer:
fixup_loop = self.process_inner_forloop(for_node, original_expr)
node = b.stats(node, fixup_loop)
return node
def visit_StridePointer(self, node):
return None
def _element_location(self, node, loop_level):
"Directly index the data pointer"
data_pointer = self.astbuilder.data_pointer(node)
return self.astbuilder.index(data_pointer, self.target)
def index(self, loop_level):
return self.target
def contig_index(self):
return self.target
class CTiledStridedSpecializer(StridedSpecializer):
"""
Generate tiled code for the last two (C) or first two (F) dimensions.
The blocksize may be overridden through the get_blocksize method, in
a specializer subclass or mixin (see miniast.Context.specializer_mixin_cls).
"""
specialization_name = "tiled"
order = "C"
is_tiled_specializer = True
vectorized_equivalents = None
def get_blocksize(self):
"""
Get the tile size. Override in subclasses to provide e.g. parametric
tiling.
"""
return self.astbuilder.constant(64)
def tiled_order(self):
"Tile in the last two dimensions"
return self.function.ndim - 1, self.function.ndim - 1 - 2, -1
def untiled_order(self):
return self.function.ndim - 1 - 2, -1, -1
def visit_NDIterate(self, node):
assert self.function.ndim >= 2
return self._tile_in_two_dimensions(node)
def _tile_in_two_dimensions(self, node):
"""
This version generates tiling loops in the first or last two dimensions
(depending on C or Fortran order).
"""
b = self.astbuilder
self.tiled_indices = []
self.indices = []
self.blocksize = self.get_blocksize()
# Generate the two outer tiling loops
tiled_loop_body = b.stats(b.constant(0)) # fake empty loop body
controlling_loops, body = self.ordered_loop(
tiled_loop_body, self.tiled_indices, step=self.blocksize,
loop_order=self.tiled_order())
del tiled_loop_body.stats[:]
# Generate some temporaries to store the upper limit of the inner
# tiled loops
upper_limits = {}
stats = []
# sort the indices in forward order, to match up with the ordered
# indices
tiled_order = sorted(range(*self.tiled_order()))
for i, index in zip(tiled_order, self.tiled_indices):
upper_limit = b.temp(index.type)
tiled_loop_body.stats.append(
b.assign(upper_limit, b.min(b.add(index, self.blocksize),
b.shape_index(i, self.function))))
upper_limits[i] = upper_limit
tiled_indices = dict(zip(tiled_order, self.tiled_indices))
def lower(i):
if i in tiled_indices:
return tiled_indices[i]
return None
def upper(i):
if i in upper_limits:
return upper_limits[i]
return b.shape_index(i, self.function)
# Generate the inner tiled loops
outer_for_node = node.body
inner_body = node.body
tiling_loops, inner_loops = self.ordered_loop(
node.body, self.indices,
lower=lower, upper=upper,
loop_order=self.tiled_order())
tiled_loop_body.stats.append(inner_loops)
innermost_loop = inner_loops.body
# Generate the outer loops (in case the array operands have more than
# two dimensions)
indices = []
outer_loops, body = self.ordered_loop(body, indices,
loop_order=self.untiled_order())
body = self.omp_for(body)
# At this point, 'self.indices' are the indices of the tiled loop
# (the indices in the first two dimensions for Fortran,
# the indices in the last two # dimensions for C)
# 'indices' are the indices of the outer loops
if self.order == "C":
self.indices = indices + self.indices
else:
self.indices = self.indices + indices
# if strength_reduction:
# body = self.strength_reduce_inner_dimension(body, innermost_loop)
for dim, for_node in enumerate(controlling_loops):
for_node.is_controlling_loop = True
for_node.blocksize = self.blocksize
for dim, for_node in enumerate(tiling_loops):
for_node.is_tiling_loop = True
self.set_dims(controlling_loops)
self.set_dims(tiling_loops)
self.function.controlling_loops = controlling_loops
self.function.tiling_loops = tiling_loops
self.function.outer_loops = outer_loops
self.function.for_loops = outer_loops + controlling_loops + tiling_loops
self.function.lower_tiling_limits = tiled_indices
self.function.upper_tiling_limits = upper_limits
return self.visit(body)
def set_dims(self, tiled_loops):
"Set the 'dim' attributes of the tiling and controlling loops"
# We need to reverse our tiled order, since this order is used to
# build up the for nodes in reverse. We have an ordered list of for
# nodes.
tiled_order = reversed(range(*self.tiled_order()))
for dim, for_node in zip(tiled_order, tiled_loops):
for_node.dim = dim
def _tile_in_all_dimensions(self, node):
"""
This version generates tiling loops in all dimensions.
"""
b = self.astbuilder
self.tiled_indices = []
self.indices = []
self.blocksize = self.get_blocksize()
tiled_loop_body = b.stats(b.constant(0)) # fake empty loop body
controlling_loops, body = self.ordered_loop(tiled_loop_body,
self.tiled_indices,
step=self.blocksize)
body = self.omp_for(body)
del tiled_loop_body.stats[:]
upper_limits = []
stats = []
for i, index in enumerate(self.tiled_indices):
upper_limit = b.temp(index.type)
tiled_loop_body.stats.append(
b.assign(upper_limit, b.min(b.add(index, self.blocksize),
b.shape_index(i, self.function))))
upper_limits.append(upper_limit)
tiling_loops, inner_body = self.ordered_loop(
node.body, self.indices,
lower=lambda i: self.tiled_indices[i],
upper=lambda i: upper_limits[i])
tiled_loop_body.stats.append(inner_body)
self.function.controlling_loops = controlling_loops
self.function.tiling_loops = tiling_loops
self.function.outer_loops = []
self.function.for_loops = tiling_loops
return self.visit(body)
def strided_indices(self):
return self.indices[:-1] + [self.tiled_indices[1]]
def _element_location(self, variable, loop_level):
"""
Return data + i * strides[0] + j * strides[1] when we are not using
strength reduction. Otherwise generate temp_data += strides[1]. For
this to work, temp_data must be set to
data + i * strides[0] + outer_j * strides[1]. This happens through
_compute_inner_dim_pointers with tiled=True.
"""
if strength_reduction:
return super(CTiledStridedSpecializer, self)._element_location(
variable, loop_level)
else:
return self._strided_element_location(variable)
def get_data_pointer(self, variable, loop_level):
return self.compute_inner_dim_pointer(variable, loop_level, tiled=True)
class FTiledStridedSpecializer(StridedFortranSpecializer,
#StrengthReducingStridedFortranSpecializer,
CTiledStridedSpecializer):
"Tile in Fortran order"
specialization_name = "tiled_fortran"
order = "F"
def tiled_order(self):
"Tile in the first two dimensions"
return 0, 2, 1
def untiled_order(self):
return 2, self.function.ndim, 1
def strided_indices(self):
return [self.tiled_indices[0]] + self.indices[1:]
#
### Vectorized specializer equivalents
#
def create_vectorized_specializers(specializer_cls):
"""
Creates Vectorizing specializer classes from the given specializer for
SSE and AVX.
"""
bases = (VectorizingSpecializer, specializer_cls)
d = dict(vectorized_equivalents=None)
name = 'Vectorized%%d%s' % specializer_cls.__name__
cls1 = type(name % 4, bases, dict(d, vector_size=4))
cls2 = type(name % 8, bases, dict(d, vector_size=8))
return cls1, cls2
ContigSpecializer.vectorized_equivalents = (
create_vectorized_specializers(ContigSpecializer))
StridedCInnerContigSpecializer.vectorized_equivalents = (
create_vectorized_specializers(StridedCInnerContigSpecializer))
StridedFortranInnerContigSpecializer.vectorized_equivalents = (
create_vectorized_specializers(StridedFortranInnerContigSpecializer))
#
### Create cict of all specializers
#
_specializer_list = [
ContigSpecializer,
StridedCInnerContigSpecializer, StridedFortranInnerContigSpecializer,
StridedSpecializer, StridedFortranSpecializer,
CTiledStridedSpecializer, FTiledStridedSpecializer,
]
specializers = {}
for sp in _specializer_list:
specializers[sp.specialization_name] = sp
vectorizers = getattr(sp, 'vectorized_equivalents', None)
if vectorizers:
specializers[sp.specialization_name + '_sse'] = vectorizers[0]
specializers[sp.specialization_name + '_avx'] = vectorizers[1]
########NEW FILE########
__FILENAME__ = llvm_testutils
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
from . import testutils
from .testutils import *
llvm_context = context = get_llvm_context()
b = context.astbuilder
#context.debug = True
# context.debug_elements = True
def get_array(shape=(130, 160), dtype=np.float32, order='C'):
return np.arange(np.prod(shape), dtype=dtype).reshape(shape, order=order)
def specialize(specializer_cls, ast, context=None, print_tree=False):
return testutils.specialize(specializer_cls, ast,
context=context or llvm_context,
print_tree=print_tree)
class MiniFunction(miniutils.MiniFunction):
def __init__(self, sp_name, variables, expr, name=None, context=None):
context = context or llvm_context
specializer = sps[sp_name]
super(MiniFunction, self).__init__(context, specializer, variables,
expr, name)
########NEW FILE########
__FILENAME__ = testutils
# -*- coding: utf-8 -*-
"""
Module providing some test utilities.
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import miniast
import specializers
import minitypes
import miniutils
import codegen
import treepath
from minitypes import *
from miniutils import *
from xmldumper import *
from specializers import specializers as sps
from ctypes_conversion import get_data_pointer, convert_to_ctypes
def getcontext():
return miniast.CContext()
def get_llvm_context():
context = miniast.LLVMContext()
context.shape_type = minitypes.npy_intp.pointer()
context.strides_type = context.shape_type
return context
def build_vars(*types):
return [b.variable(type, 'op%d' % i) for i, type in enumerate(types)]
def build_function(variables, body, name=None):
qualify = lambda type: type.qualify("const", "restrict")
func = context.astbuilder.build_function(variables, body, name)
func.shape.type = qualify(func.shape.type)
for arg in func.arguments:
if arg.type.is_array:
arg.data_pointer.type = qualify(arg.data_pointer.type)
arg.strides_pointer.type = qualify(arg.strides_pointer.type)
return func
def specialize(specializer_cls, ast, context=None, print_tree=False):
context = context or getcontext()
return miniutils.specialize(context, specializer_cls, ast,
print_tree=print_tree)
def run(specializers, ast):
context = getcontext()
for result in context.run(ast, specializers):
_, specialized_ast, _, (proto, impl) = result
yield specialized_ast, impl
def toxml(function):
return xmldumper.XMLDumper(context).visit(function)
# Convenience variables
context = getcontext()
b = context.astbuilder
########NEW FILE########
__FILENAME__ = test_hoisting
# -*- coding: utf-8 -*-
"""
Test loop-invariant code motion. Write more tests with different associations.
NOTE: most of the tests are part of Cython:
https://github.com/markflorisson88/cython/tree/_array_expressions/tests/array_expressions
"""
from __future__ import print_function, division, absolute_import
from .testutils import *
import pytest
cinner = sps['inner_contig']
@pytest.mark.skipif('not xmldumper.have_lxml')
def test_hoist():
"""
>> test_hoist()
"""
type1 = double[:, :]
type2 = double[:, :]
type1.broadcasting = (False, False)
type2.broadcasting = (False, True)
var1, var2 = vars = build_vars(type1, type2)
expr = b.add(var1, b.add(var2, var2))
expr = b.add(var1, var2)
body = b.assign(var1, expr)
func = build_function(vars, body)
result_ast, code_output = specialize(cinner, func)
e = toxml(result_ast)
assert e.xpath('not(//NDIterate)')
# Check the loop level of the hoisted expression
op1, op2 = e.xpath(
'//FunctionNode//ArrayFunctionArgument/DataPointer/@value')
broadcasting_pointer_temp, = e.xpath(
'//AssignmentExpr[./rhs/DataPointer[@value="%s"]]/lhs/TempNode/@value' % op2)
q = '//ForNode[.//AssignmentExpr/rhs//TempNode[@value="%s"]]/@loop_level'
loop_level, = e.xpath(q % broadcasting_pointer_temp)
assert loop_level == "0", loop_level
def test_hoist_3d():
"""
>>> test_hoist_3d()
"""
type1 = npy_intp[:, :, :]
type2 = npy_intp[:, :, :]
type3 = npy_intp[:, :, :]
type1.broadcasting = (False, True, True)
type2.broadcasting = (True, False, True)
type3.broadcasting = (True, True, False)
out_type = npy_intp[:, :, :]
out, var1, var2, var3 = vars = build_vars(out_type, type1, type2, type3)
#v1 = b.mul(var1, var2)
#v2 = b.mul(var2, var2)
#v3 = b.mul(var3, var3)
expr = b.mul(b.mul(var1, var2), var3)
body = b.assign(out, expr)
func = build_function(vars, body)
result_ast, code_output = specialize(cinner, func)
if __name__ == '__main__':
import doctest
doctest.testmod()
########NEW FILE########
__FILENAME__ = test_operators
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from .llvm_testutils import *
def build_expr(type, op):
out, v1, v2 = vars = build_vars(type, type, type)
expr = b.assign(out, b.binop(type, op, v1, v2))
return vars, expr
def build_kernel(specialization_name, ndim, type, op, **kw):
vars, expr = build_expr(minitypes.ArrayType(type, ndim, **kw), op)
func = MiniFunction(specialization_name, vars, expr, '%s_%s_%s' % (specialization_name, type.name, op))
return func
comparison_operators = ['<', '<=', '>', '>=', '==', '!=']
arithmetic_operators = ['+', '-', '*', '/', '%'] # + ['**'] + # + comparison_operators
#bitwise_operators = ['<<', '>>', '|', '^', '&']
bitwise_operators = ['|', '^', '&']
a = np.random.random_sample((10, 20))
def _impl(type, op, x, y):
func = build_kernel('strided', 2, type, op)
dtype = minitypes.map_minitype_to_dtype(type)
x = x.astype(dtype)
y = y.astype(dtype)
numpy_result = eval('a %s b' % (op,), {'a': x, 'b': y})
our_result = func(x, y)
assert np.all(numpy_result == our_result)
@parametrize(type=[short, int32, int64, float_, double], op=arithmetic_operators)
def test_arithmetic_operators(type, op):
x = a
y = np.arange(1, 10 * 20 + 1).reshape(10, 20)
_impl(type, op, x, y)
@parametrize(type=[short, int32, int64], op=bitwise_operators)
def test_bitwise_operators(type, op):
_impl(type, op, a * 100, a * 10)
########NEW FILE########
__FILENAME__ = test_specializations
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from .llvm_testutils import *
def build_expr(type):
out, v1, v2, v3 = vars = build_vars(type, type, type, type)
expr = b.assign(out, b.add(v1, b.mul(v2, v3)))
return vars, expr
def build_kernel(specialization_name, ndim, **kw):
vars, expr = build_expr(minitypes.ArrayType(float_, ndim, **kw))
func = MiniFunction(specialization_name, vars, expr, '%s_%d' % (specialization_name, ndim))
return func
def build_kernels(specialization_name, min_ndim=1, max_ndim=3, **kw):
return [build_kernel(specialization_name, ndim)
for ndim in range(min_ndim, max_ndim + 1)]
arrays2d = [get_array(), get_array(), get_array()]
arrays1d = [a[0] for a in arrays2d]
arrays3d = [a[:, None, :] for a in arrays2d]
arrays = [(arrays1d, arrays2d, arrays3d)]
"""
Generate tests, but skip vectorized versions (not supported for llvm
code backend yet)
"""
specializations = [s for s in sps.keys()
if not s.endswith(('_sse', '_avx'))]
print(specializations)
@parametrize(arrays=arrays, specialization_name=specializations, ndim=range(1, 4))
def test_specializations(arrays, specialization_name, ndim):
if 'tiled' in specialization_name and ndim < 2:
return
# FIXME: these fail
if specialization_name == 'inner_contig_fortran' and ndim >= 2:
return
if 'fortran' in specialization_name:
arrays = [(x.T, y.T, z.T) for x, y, z in arrays]
func = build_kernel(specialization_name, ndim)
x, y, z = arrays[ndim - 1]
print((x.strides, y.strides, z.strides))
assert np.all(func(x, y, z) == x + y * z)
########NEW FILE########
__FILENAME__ = treepath
# -*- coding: utf-8 -*-
"""
Taken from Cython/Compiler/TreePath.py
A simple XPath-like language for tree traversal.
This works by creating a filter chain of generator functions. Each
function selects a part of the expression, e.g. a child node, a
specific descendant or a node that holds an attribute.
"""
from __future__ import print_function, division, absolute_import
import re
import sys
path_tokenizer = re.compile(
"("
"'[^']*'|\"[^\"]*\"|"
"//?|"
"\(\)|"
"==?|"
"[/.*\[\]\(\)@])|"
"([^/\[\]\(\)@=\s]+)|"
"\s+"
).findall
def iterchildren(node, attr_name):
# returns an iterable of all child nodes of that name
child = getattr(node, attr_name)
if child is not None:
if isinstance(child, list):
return child
else:
return [child]
else:
return ()
def _get_first_or_none(it):
try:
try:
_next = it.__next__
except AttributeError:
return next(it)
else:
return _next()
except StopIteration:
return None
def type_name(node):
return node.__class__.__name__.split('.')[-1]
def parse_func(next, token):
name = token[1]
token = next()
if token[0] != '(':
raise ValueError("Expected '(' after function name '%s'" % name)
predicate = handle_predicate(next, token)
return name, predicate
def handle_func_not(next, token):
"""
not(...)
"""
name, predicate = parse_func(next, token)
def select(result):
for node in result:
if _get_first_or_none(predicate([node])) is None:
yield node
return select
def handle_name(next, token):
"""
/NodeName/
or
func(...)
"""
name = token[1]
if name in functions:
return functions[name](next, token)
def select(result):
for node in result:
for attr_name in node.child_attrs:
for child in iterchildren(node, attr_name):
if type_name(child) == name:
yield child
return select
def handle_star(next, token):
"""
/*/
"""
def select(result):
for node in result:
for name in node.child_attrs:
for child in iterchildren(node, name):
yield child
return select
def handle_dot(next, token):
"""
/./
"""
def select(result):
return result
return select
def handle_descendants(next, token):
"""
//...
"""
token = next()
if token[0] == "*":
def iter_recursive(node):
for name in node.child_attrs:
for child in iterchildren(node, name):
yield child
for c in iter_recursive(child):
yield c
elif not token[0]:
node_name = token[1]
def iter_recursive(node):
for name in node.child_attrs:
for child in iterchildren(node, name):
if type_name(child) == node_name:
yield child
for c in iter_recursive(child):
yield c
else:
raise ValueError("Expected node name after '//'")
def select(result):
for node in result:
for child in iter_recursive(node):
yield child
return select
def handle_attribute(next, token):
token = next()
if token[0]:
raise ValueError("Expected attribute name")
name = token[1]
value = None
try:
token = next()
except StopIteration:
pass
else:
if token[0] == '=':
value = parse_path_value(next)
if sys.version_info >= (2,6) or (sys.version_info >= (2,4) and '.' not in name):
import operator
readattr = operator.attrgetter(name)
else:
name_path = name.split('.')
def readattr(node):
attr_value = node
for attr in name_path:
attr_value = getattr(attr_value, attr)
return attr_value
if value is None:
def select(result):
for node in result:
try:
attr_value = readattr(node)
except AttributeError:
continue
if attr_value is not None:
yield attr_value
else:
def select(result):
for node in result:
try:
attr_value = readattr(node)
except AttributeError:
continue
if attr_value == value:
yield attr_value
return select
def parse_path_value(next):
token = next()
value = token[0]
if value:
if value[:1] == "'" or value[:1] == '"':
return value[1:-1]
try:
return int(value)
except ValueError:
pass
else:
name = token[1].lower()
if name == 'true':
return True
elif name == 'false':
return False
raise ValueError("Invalid attribute predicate: '%s'" % value)
def handle_predicate(next, token):
token = next()
selector = []
while token[0] != ']':
selector.append( operations[token[0]](next, token) )
try:
token = next()
except StopIteration:
break
else:
if token[0] == "/":
token = next()
if not token[0] and token[1] == 'and':
return logical_and(selector, handle_predicate(next, token))
def select(result):
for node in result:
subresult = iter((node,))
for select in selector:
subresult = select(subresult)
predicate_result = _get_first_or_none(subresult)
if predicate_result is not None:
yield node
return select
def logical_and(lhs_selects, rhs_select):
def select(result):
for node in result:
subresult = iter((node,))
for select in lhs_selects:
subresult = select(subresult)
predicate_result = _get_first_or_none(subresult)
subresult = iter((node,))
if predicate_result is not None:
for result_node in rhs_select(subresult):
yield node
return select
operations = {
"@": handle_attribute,
"": handle_name,
"*": handle_star,
".": handle_dot,
"//": handle_descendants,
"[": handle_predicate,
}
functions = {
'not' : handle_func_not
}
def _build_path_iterator(path):
# parse pattern
stream = iter([ (special,text)
for (special,text) in path_tokenizer(path)
if special or text ])
try:
_next = stream.__next__
except AttributeError:
# Python 3
def _next():
return next(stream)
token = _next()
selector = []
while True:
try:
selector.append(operations[token[0]](_next, token))
except StopIteration:
raise ValueError("invalid path")
try:
token = _next()
if token[0] == "/":
token = _next()
except StopIteration:
break
return selector
# main module API
def iterfind(node, path):
selector_chain = _build_path_iterator(path)
result = iter((node,))
for select in selector_chain:
result = select(result)
return result
def find_first(node, path):
return _get_first_or_none(iterfind(node, path))
def find_all(node, path):
return list(iterfind(node, path))
########NEW FILE########
__FILENAME__ = type_promoter
# -*- coding: utf-8 -*-
"""
Promote and demote values of differing types in a minivect AST. This is run
before code generation. In LLVM types need to be equivalent for binary
operations.
"""
from __future__ import print_function, division, absolute_import
import sys
import copy
from . import minivisitor
from . import miniutils
from . import minitypes
from . import minierror
comparison_ops = set(['<', '>', '==', '!=', '>=', '<=',])
class TypePromoter(minivisitor.GenericTransform):
"""
Promote and demote values of differing types.
"""
def resolve_type(self, node):
if node.type.is_array:
node.type = node.type.dtype
def promote(self, dst_type, node):
return self.context.astbuilder.promote(dst_type, node)
def visit_UnopNode(self, node):
self.resolve_type(node)
node.operand = self.promote(node.type, self.visit(node.operand))
return node
def visit_BinopNode(self, node):
self.visitchildren(node)
self.resolve_type(node)
if node.operator in comparison_ops:
dst_type = self.context.promote_types(node.lhs.type, node.rhs.type)
else:
dst_type = node.type
if dst_type.is_pointer:
return node
return self.handle_binop(dst_type, node)
def visit_VectorStoreNode(self, node):
self.visitchildren(node)
return node
def handle_binop(self, dst_type, node):
node.lhs = self.promote(dst_type, node.lhs)
node.rhs = self.promote(dst_type, node.rhs)
return node
def visit_AssignmentExpr(self, node):
self.visitchildren(node)
self.resolve_type(node)
return self.handle_binop(node.lhs.type, node)
def visit_ResolvedVariable(self, node):
return self.visit(node.element)
########NEW FILE########
__FILENAME__ = xmldumper
# -*- coding: utf-8 -*-
"""
Convert a miniast to an XML document using ElementTree. This allows us to
write XPath unit tests, or just serialize the AST.
"""
from __future__ import print_function, division, absolute_import
__all__ = ['etree', 'tostring', 'XMLDumper']
from . import miniutils
try:
from lxml import etree
have_lxml = True
except ImportError:
have_lxml = False
try:
# Python 2.5
from xml.etree import cElementTree as etree
except ImportError:
try:
# Python 2.5
from xml.etree import ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
etree = miniutils.UnavailableImport("elementtree")
from . import minivisitor
class XMLDumper(minivisitor.PrintTree):
loop_level = 0
def visit_FunctionNode(self, node):
self.treebuilder = etree.TreeBuilder()
self.visit_Node(node)
return self.treebuilder.close()
def start(self, node, attrs={}):
name = type(node).__name__
format_value = self.format_value(node)
if format_value:
attrs = dict(attrs,
value=format_value,
id=hex(id(node)),
type=node.type)
attrs = dict((k, str(v)) for k, v in attrs.iteritems())
self.treebuilder.start(name, attrs)
return name
def visit_BinaryOperationNode(self, node):
name = self.start(node)
self.treebuilder.start('lhs', {})
self.visit(node.lhs)
self.treebuilder.end('lhs')
self.treebuilder.start('rhs', {})
self.visit(node.rhs)
self.treebuilder.end('rhs')
self.treebuilder.end(name)
def visit_ForNode(self, node):
attrs = dict(loop_level=self.loop_level,
is_fixup=node.is_fixup,
is_controlling_loop=node.is_controlling_loop,
is_tiling_loop=node.is_tiling_loop)
self.loop_level += 1
self.visit_Node(node, attrs)
self.loop_level -= 1
def visit_Node(self, node, attrs={}):
name = self.start(node, attrs)
self.visitchildren(node)
self.treebuilder.end(name)
def tostring(xml_root_element):
et = etree.ElementTree(xml_root_element)
kw = {}
if have_lxml:
kw['pretty_print'] = True
return etree.tostring(et, encoding='UTF-8', **kw)
########NEW FILE########
__FILENAME__ = missing
import ast
class FixMissingLocations(ast.NodeVisitor):
"""
Fix missing source position information.
"""
def __init__(self, lineno, col_offset, override=False):
self.lineno = lineno
self.col_offset = col_offset
self.override = override
def visit(self, node):
super(FixMissingLocations, self).visit(node)
if not hasattr(node, 'lineno') or self.override:
node.lineno = self.lineno
node.col_offset = self.col_offset
else:
self.lineno = node.lineno
self.col_offset = node.col_offset
########NEW FILE########
__FILENAME__ = multiarray_api
# -*- coding: utf-8 -*-
'''multiarray_api
Defines a utility class for generating LLVM code that retrieves values
out of the Numpy array C API PyCObject/capsule.
'''
from __future__ import print_function, division, absolute_import
# ______________________________________________________________________
import ctypes
import llvm.core as lc
import llvm.ee as le
from numpy.core.multiarray import _ARRAY_API
from .llvm_types import _int1, _int8, _int32, _int64, _intp, \
_void_star, _void_star_star, \
_numpy_struct, _numpy_array, _pyobject_head_struct_p, _numpy_array_field_ofs
from .scrape_multiarray_api import get_include, process_source
from numba import PY3
if PY3:
xrange = range
# ______________________________________________________________________
try:
PyCObject_AsVoidPtr = ctypes.pythonapi.PyCObject_AsVoidPtr
except AttributeError:
def PyCObject_AsVoidPtr(o):
raise TypeError("Not available")
else:
PyCObject_AsVoidPtr.restype = ctypes.c_void_p
PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
PyCObject_GetDesc = ctypes.pythonapi.PyCObject_GetDesc
PyCObject_GetDesc.restype = ctypes.c_void_p
PyCObject_GetDesc.argtypes = [ctypes.py_object]
try:
PyCapsule_IsValid = ctypes.pythonapi.PyCapsule_IsValid
except AttributeError:
def PyCapsule_IsValid(capsule, name):
raise TypeError("Not available")
def PyCapsule_GetPointer(capsule, name):
raise TypeError("Not available")
else:
PyCapsule_IsValid.restype = ctypes.c_int
PyCapsule_IsValid.argtypes = [ctypes.py_object, ctypes.c_char_p]
PyCapsule_GetPointer = ctypes.pythonapi.PyCapsule_GetPointer
PyCapsule_GetPointer.restype = ctypes.c_void_p
PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]
PyCapsule_GetContext = ctypes.pythonapi.PyCapsule_GetContext
PyCapsule_GetContext.restype = ctypes.c_void_p
PyCapsule_GetContext.argtypes = [ctypes.py_object]
class MultiarrayAPI (object):
_type_map = {
'char' : _int8,
'int' : _int32, # Based on mixed gcc/clang experiments,
# assuming sizeof(int) == 4 appears to
# hold true, even on 64-bit systems.
'unsigned char' : _int8, # XXX Loses unsigneded-ness
'unsigned int' : _int32, # XXX
'void' : lc.Type.void(),
'npy_bool' : _int1,
'npy_intp' : _intp,
'npy_uint32' : _int32, # XXX
'PyArrayObject' : _numpy_struct,
'double' : lc.Type.double(),
'size_t' : _intp, # XXX
'npy_int64' : _int64,
'npy_datetime' : _int64, # npy_common.h
'npy_timedelta' : _int64, # npy_common.h
}
@classmethod
def non_fn_ty_to_llvm (cls, c_ty_str):
npointer = c_ty_str.count('*')
if npointer == 0:
base_ty = c_ty_str
else:
base_ty = c_ty_str[:-npointer].strip()
if base_ty == 'void' and npointer > 0:
base_ty = _int8
elif base_ty not in cls._type_map:
if npointer > 0:
base_ty = _int8 # Basically cast into void *
else:
base_ty = _int32 # Or an int.
else:
base_ty = cls._type_map[base_ty]
ret_val = base_ty
for _ in xrange(npointer):
ret_val = lc.Type.pointer(ret_val)
return ret_val
@classmethod
def c_ty_str_to_llvm (cls, c_ty_str):
ty_str_fn_split = [substr.strip() for substr in c_ty_str.split('(*)')]
ret_val = cls.non_fn_ty_to_llvm(ty_str_fn_split[0])
if len(ty_str_fn_split) > 1:
arg_ty_strs = ty_str_fn_split[1][1:-1].split(', ')
if len(arg_ty_strs) == 1 and arg_ty_strs[0].strip() == 'void':
arg_ty_strs = []
arg_tys = [cls.non_fn_ty_to_llvm(arg_ty_str.strip())
for arg_ty_str in arg_ty_strs]
ret_val = lc.Type.pointer(lc.Type.function(ret_val, arg_tys))
return ret_val
def _add_loader (self, symbol_name, symbol_index, symbol_type):
def _load_symbol (module, builder):
api = module.get_global_variable_named('PyArray_API')
load_val = builder.load(
builder.gep(
builder.load(api),
[lc.Constant.int(_int32, symbol_index)]))
return builder.bitcast(load_val, symbol_type, name=symbol_name)
fn_name = "load_" + symbol_name
_load_symbol.__name__ = fn_name
setattr(self, fn_name, _load_symbol)
return _load_symbol
def __init__ (self, include_source_path = None):
if include_source_path is None:
include_source_path = get_include()
self.api_map = process_source(include_source_path)
for symbol_name, (symbol_index, c_ty_str) in self.api_map.items():
symbol_type = self.c_ty_str_to_llvm(c_ty_str)
self._add_loader(symbol_name, symbol_index, symbol_type)
setattr(self, symbol_name + '_ty', symbol_type)
self.api_addr = None
# Fallback for missing API
self._add_missing_PyArray_SetBaseObject()
def _add_missing_PyArray_SetBaseObject(self):
'''Implement PyArray_SetBaseObject for old numpy API
'''
if hasattr(self, 'load_PyArray_SetBaseObject'):
return
def impl(module, builder_unused):
fty = lc.Type.function(_int32, [_numpy_array,
_pyobject_head_struct_p])
fn = module.get_or_insert_function(fty, "PyArray_SetBaseObject")
if fn.is_declaration: # Is a declaration?
# Implement the function body
fn.linkage = lc.LINKAGE_LINKONCE_ODR
builder = lc.Builder.new(fn.append_basic_block(''))
pyarray, pyobj = fn.args
const = lambda x: lc.Constant.int(_int32, x)
offset = _numpy_array_field_ofs['base']
loc = builder.gep(pyarray, [const(0), const(offset)])
val = builder.bitcast(pyobj, _void_star)
builder.store(val, loc)
builder.ret(const(0))
return fn
self.load_PyArray_SetBaseObject = impl
def calculate_api_addr (self):
typ_name = type(_ARRAY_API).__name__
if typ_name == 'PyCObject':
vp = PyCObject_AsVoidPtr(_ARRAY_API)
elif typ_name == 'PyCapsule':
vp = PyCapsule_GetPointer(_ARRAY_API, None)
else:
raise TypeError("Cannot the the api address for %r, %s" % (_ARRAY_API, typ_name))
self.api_addr = vp
return vp
def set_PyArray_API (self, module):
'''Adds PyArray_API as a global variable to the input LLVM module.'''
if self.api_addr is None:
self.calculate_api_addr()
try:
api = module.get_global_variable_named("PyArray_API")
except:
api = module.add_global_variable(_void_star_star, "PyArray_API")
api.initializer = lc.Constant.inttoptr(
lc.Constant.int(_intp, self.api_addr), _void_star_star)
api.linkage = lc.LINKAGE_LINKONCE_ODR
# ______________________________________________________________________
# End of multiarray_api.py
########NEW FILE########
__FILENAME__ = naming
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import re
_ptx_invalid_char = re.compile('[^a-zA-Z0-9_]')
def _fix_naming(string):
def repl(m):
return '_%X_' % (ord(m.group(0)))
return _ptx_invalid_char.sub(repl, string)
def type_mangle(*types):
return "_".join(str(t).replace(" ", "_") for t in types)
function_counter = 0
def specialized_mangle(func_name, types):
global function_counter
# pre = "__numba_specialized_%d_%s" % (func_name, type_mangle(*types))
pre = "__numba_specialized_%d_%s" % (function_counter, func_name)
function_counter += 1
return _fix_naming(pre)
########NEW FILE########
__FILENAME__ = ndarray_helpers
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# For reference:
# typedef struct {
# PyObject_HEAD // indices (skipping the head)
# char *data; // 0
# int nd; // 1
# int *dimensions, *strides; // 2, 3
# PyObject *base; // 4
# PyArray_Descr *descr; // 5
# int flags; // 6
# } PyArrayObject;
import abc
from numba import *
from numba import typedefs
from numba.typesystem import tbaa
from numba.llvm_types import _head_len, _int32, _LLVMCaster, constant_int
import llvm.core as lc
def _const_int(X):
return lc.Constant.int(lc.Type.int(), X)
def ptr_at(builder, ptr, idx):
return builder.gep(ptr, [_const_int(idx)])
def load_at(builder, ptr, idx):
return builder.load(ptr_at(builder, ptr, idx))
def store_at(builder, ptr, idx, val):
builder.store(val, ptr_at(builder, ptr, idx))
def set_metadata(tbaa, instr, type):
if type is not None:
metadata = tbaa.get_metadata(type)
instr.set_metadata("tbaa", metadata)
def make_property(type=None, invariant=True):
"""
type: The type to be used for TBAA annotation
"""
def decorator(access_func):
def load(self):
instr = self.builder.load(access_func(self))
if self.tbaa:
set_metadata(self.tbaa, instr, type)
return instr
def store(self, value):
ptr = access_func(self)
instr = self.builder.store(value, ptr)
if self.tbaa:
set_metadata(self.tbaa, instr, type)
return property(load, store)
return decorator
class PyArrayAccessor(object):
"""
Convenient access to a the native fields of a NumPy array.
builder: llvmpy IRBuilder
pyarray_ptr: pointer to the numpy array
tbaa: metadata.TBAAMetadata instance
"""
def __init__(self, builder, pyarray_ptr, tbaa=None, dtype=None):
self.builder = builder
self.pyarray_ptr = pyarray_ptr
self.tbaa = tbaa # this may be None
self.dtype = dtype
def _get_element(self, idx):
indices = [constant_int(0), constant_int(_head_len + idx)]
ptr = self.builder.gep(self.pyarray_ptr, indices)
return ptr
def get_data(self):
instr = self.builder.load(self._get_element(0))
if self.tbaa:
set_metadata(self.tbaa, instr, self.dtype.pointer())
return instr
def set_data(self, value):
instr = self.builder.store(value, self._get_element(0))
if self.tbaa:
set_metadata(self.tbaa, instr, self.dtype.pointer())
data = property(get_data, set_data, "The array.data attribute")
def typed_data(self, context):
data = self.data
ltype = self.dtype.pointer().to_llvm(context)
return self.builder.bitcast(data, ltype)
@make_property(tbaa.numpy_ndim)
def ndim(self):
return self._get_element(1)
@make_property(tbaa.numpy_shape.pointer().qualify("const"))
def dimensions(self):
return self._get_element(2)
shape = dimensions
@make_property(tbaa.numpy_strides.pointer().qualify("const"))
def strides(self):
return self._get_element(3)
@make_property(tbaa.numpy_base)
def base(self):
return self._get_element(4)
@make_property(tbaa.numpy_dtype)
def descr(self):
return self._get_element(5)
@make_property(tbaa.numpy_flags)
def flags(self):
return self._get_element(6)
class Array(object):
"""
Interface for foreign arrays, like LLArray
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def from_type(cls, llvm_dtype):
"""
Given an LLVM representation of the dtype, return the LLVM array type
representation
"""
@abc.abstractproperty
def data(self):
"""Return the data pointer of this array (for A.data)"""
@abc.abstractproperty
def shape_ptr(self):
"""Return the shape pointer of this array (for A.shape[0], etc)"""
@abc.abstractproperty
def strides_ptr(self):
"""Return the strides pointer of this array (for A.strides[0], etc)"""
@abc.abstractproperty
def shape(self):
"""Return the extents as a list of loaded LLVM values"""
@abc.abstractproperty
def strides(self):
"""Return the strides as a list of loaded LLVM values"""
@abc.abstractproperty
def ndim(self):
"""Return the dimensionality of this array as an LLVM constant"""
@abc.abstractmethod
def getptr(self, *indices):
"""Compute an element pointer given LLVM indices into the array"""
class NumpyArray(Array):
"""
LLArray compatible inferface for NumPy's ndarray
"""
_strides_ptr = None
_strides = None
_shape_ptr = None
_shape = None
_data_ptr = None
_freefuncs = []
_freedata = []
def __init__(self, pyarray_ptr, builder, tbaa=None, type=None):
self.type = type
self.nd = type.ndim
self.array_type = pyarray_ptr.type.pointee
# LLVM attributes
self.arr = PyArrayAccessor(builder, pyarray_ptr, tbaa, type.dtype)
self.builder = builder
self._shape = None
self._strides = None
self.caster = _LLVMCaster(builder)
@classmethod
def from_type(cls, llvm_dtype):
return typedefs.PyArray.pointer().to_llvm()
@property
def data(self):
if not self._data_ptr:
self._data_ptr = self.arr.get_data()
return self._data_ptr
@property
def shape_ptr(self):
if self._shape_ptr is None:
self._shape_ptr = self.arr.shape
return self._shape_ptr
@property
def strides_ptr(self):
if self._strides_ptr is None:
self._strides_ptr = self.arr.strides
return self._strides_ptr
@property
def shape(self):
if not self._shape:
self._shape = self.preload(self.shape_ptr, self.nd)
return self._shape
@property
def strides(self):
if not self._strides:
self._strides = self.preload(self.strides_ptr, self.nd)
return self._strides
@property
def ndim(self):
return _const_int(self.nd)
def getptr(self, *indices):
offset = _const_int(0)
for i, (stride, index) in enumerate(zip(self.strides, indices)):
index = self.caster.cast(index, stride.type, unsigned=False)
offset = self.caster.cast(offset, stride.type, unsigned=False)
offset = self.builder.add(offset, self.builder.mul(index, stride))
data_ty = self.type.dtype.to_llvm()
data_ptr_ty = lc.Type.pointer(data_ty)
dptr_plus_offset = self.builder.gep(self.data, [offset])
ptr = self.builder.bitcast(dptr_plus_offset, data_ptr_ty)
return ptr
# Misc, optional methods
@property
def itemsize(self):
raise NotImplementedError
def preload(self, ptr, count=None):
assert count is not None
return [load_at(self.builder, ptr, i) for i in range(count)]
########NEW FILE########
__FILENAME__ = basenodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
import numba.nodes
def is_expr(node):
if not isinstance(node, Node):
return True
return isinstance(node, ExprNode)
class Node(ast.AST):
"""
Superclass for Numba AST nodes
"""
_fields = []
_attributes = ('lineno', 'col_offset')
def __init__(self, **kwargs):
vars(self).update(kwargs)
class ExprNode(Node):
"""
Node that is an expression.
"""
def _variable_get(self):
if not hasattr(self, '_variable'):
self._variable = Variable(self.type)
return self._variable
def _variable_set(self, variable):
self._variable = variable
variable = property(_variable_get, _variable_set)
def coerce(self, dst_type):
return numba.nodes.CoercionNode(self, dst_type)
@property
def cloneable(self):
if isinstance(self, (CloneNode, CloneableNode)):
return self
return CloneableNode(self)
class Name(ast.Name, ExprNode):
cf_maybe_null = False
raise_unbound_node = None
_fields = ast.Name._fields + ('check_unbound',)
def __init__(self, id, ctx, *args, **kwargs):
super(Name, self).__init__(*args, **kwargs)
self.id = self.name = id
self.ctx = ctx
def __repr__(self):
type = getattr(self, 'type', "")
if type:
type = ', %s' % type
name = self.name
if hasattr(self, 'variable') and self.variable.renamed_name:
name = self.variable.unmangled_name
return "name(%s%s)" % (name, type)
def __deepcopy__(self, memo):
result = Name(self.id, self.ctx)
result.cf_maybe_null = self.cf_maybe_null
result.raise_unbound_node = self.raise_unbound_node
return result
class WithPythonNode(Node):
"with python: ..."
_fields = ['body']
class WithNoPythonNode(WithPythonNode):
"with nopython: ..."
class CloneableNode(ExprNode):
"""
Create a node that can be cloned. This allows sub-expressions to be
re-used without re-evaluating them.
"""
_fields = ['node']
def __init__(self, node, **kwargs):
super(CloneableNode, self).__init__(**kwargs)
self.node = node
self.clone_nodes = []
self.type = getattr(node, 'type', None) or node.variable.type
@property
def clone(self):
return CloneNode(self)
def __repr__(self):
return "cloneable(%s)" % self.node
class CloneNode(ExprNode):
"""
Clone a CloneableNode. This allows the node's sub-expressions to be
re-used without re-evaluating them.
The CloneableNode must be evaluated before the CloneNode is evaluated!
"""
_fields = ['node']
def __init__(self, node, **kwargs):
super(CloneNode, self).__init__(**kwargs)
assert isinstance(node, CloneableNode)
self.node = node
self.type = node.type
node.clone_nodes.append(self)
self.llvm_value = None
@property
def clone(self):
self
def __repr__(self):
return "clone(%s)" % self.node
class ExpressionNode(ExprNode):
"""
Node that allows an expression to execute a bunch of statements first.
"""
_fields = ['stmts', 'expr']
def __init__(self, stmts, expr, **kwargs):
super(ExpressionNode, self).__init__(**kwargs)
self.stmts = stmts
self.expr = expr
self.type = expr.variable.type
def __repr__(self):
return "exprstat(..., %s)" % self.expr
class FunctionWrapperNode(Node):
"""
This code is a wrapper function callable from Python using NumbaFunction
(see numba/numbafunction.c):
PyObject *(*)(PyObject *self, PyObject *args)
It unpacks the tuple to native types, calls the wrapped function, and
coerces the return type back to an object.
"""
_fields = ['body', 'return_result']
def __init__(self, wrapped_function, signature, orig_py_func, fake_pyfunc,
orig_py_func_name):
self.wrapped_function = wrapped_function
self.signature = signature
self.orig_py_func = orig_py_func
self.fake_pyfunc = fake_pyfunc
self.name = orig_py_func_name
########NEW FILE########
__FILENAME__ = bitwise
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
from numba.nodes import *
from numba import typesystem
def is_bitwise(op):
return isinstance(op, (ast.BitAnd, ast.BitOr, ast.BitXor,
ast.LShift, ast.RShift, ast.Invert))
########NEW FILE########
__FILENAME__ = callnodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
class FunctionCallNode(ExprNode):
_attributes = ['signature', 'type', 'name']
def __init__(self, signature, args, name=''):
self.signature = signature
self.type = signature.return_type
self.name = name
self.original_args = args
class NativeCallNode(FunctionCallNode):
_attributes = FunctionCallNode._attributes + ['llvm_func_name']
_fields = ['args']
def __init__(self, signature, args, llvm_func, py_func=None,
badval=None, goodval=None,
exc_type=None, exc_msg=None, exc_args=None,
skip_self=False, **kw):
super(NativeCallNode, self).__init__(signature, args, **kw)
self.llvm_func = llvm_func
self.llvm_func_name = getattr(llvm_func, 'name', None)
self.py_func = py_func
self.skip_self = skip_self
self.type = signature.return_type
self.coerce_args()
self.badval = badval
self.goodval = goodval
self.exc_type = exc_type
self.exc_msg = exc_msg
self.exc_args = exc_args
def coerce_args(self):
self.args = list(self.original_args)
for i, dst_type in enumerate(self.signature.args[self.skip_self:]):
arg = self.args[i]
self.args[i] = CoercionNode(arg, dst_type,
name='func_%s_arg%d' % (self.name, i))
def __repr__(self):
if self.llvm_func:
name = self.llvm_func.name
elif self.name:
name = self.name
else:
name = "<unknown(%s)>" % self.signature
return "%s(%s)" % (name, ", ".join(str(arg) for arg in self.args))
class NativeFunctionCallNode(NativeCallNode):
"""
Call a function which is given as a node
"""
_fields = ['function', 'args']
def __init__(self, signature, function_node, args, **kw):
super(NativeFunctionCallNode, self).__init__(signature, args, None,
None, **kw)
self.function = function_node
class LLMacroNode (NativeCallNode):
'''
Inject a low-level macro in the function at the call site.
Low-level macros are Python functions that take a FunctionCache
instance, a LLVM builder instance, and a set of arguments,
construct LLVM code, and return some kind of LLVM value result.
The passed signature should reflect the Numba types of the
expected input parameters, and the type of the resulting value
(this does not restrict polymorphism at the LLVM type level in the
macro expansion function).
'''
_fields = ['macro', 'args']
def __init__(self, signature, macro, *args, **kw):
super(LLMacroNode, self).__init__(signature, args, None, None, **kw)
self.macro = macro
class LLVMExternalFunctionNode(ExprNode):
'''For calling an external llvm function where you only have the
signature and the function name.
'''
def __init__(self, signature, fname):
super(LLVMExternalFunctionNode, self).__init__(signature=signature,
fname=fname)
class LLVMIntrinsicNode(NativeCallNode):
"Call an llvm intrinsic function"
def __init__(self, signature, args, func_name, **kw):
super(LLVMIntrinsicNode, self).__init__(signature, args, None, **kw)
self.func_name = func_name
class MathCallNode(NativeCallNode):
"Low level call a libc math function"
class PointerCallNode(NativeCallNode):
"Call a ctypes function"
_fields = NativeCallNode._fields + ['function']
def __init__(self, signature, args, pointer, py_func=None, **kw):
super(PointerCallNode, self).__init__(signature, args, None,
py_func, **kw)
self.pointer = pointer
self.function = ConstNode(self.pointer, signature.pointer())
class ObjectCallNode(FunctionCallNode):
_fields = ['function', 'args_tuple', 'kwargs_dict']
def __init__(self, signature, func, args, keywords=None, py_func=None, **kw):
if py_func and not kw.get('name', None):
kw['name'] = py_func.__name__
if signature is None:
signature = numba.function(object_, [object_] * len(args))
if keywords:
signature.args.extend([object_] * len(keywords))
super(ObjectCallNode, self).__init__(signature, args)
assert func is not None
self.function = func
self.py_func = py_func
self.args_tuple = ast.Tuple(elts=list(args), ctx=ast.Load())
self.args_tuple.variable = Variable(
typesystem.tuple_(object_, size=len(args)))
if keywords:
keywords = [(ConstNode(k.arg), k.value) for k in keywords]
keys, values = zip(*keywords)
self.kwargs_dict = ast.Dict(list(keys), list(values))
self.kwargs_dict.variable = Variable(object_)
else:
self.kwargs_dict = NULL_obj
self.type = signature.return_type
def __repr__(self):
return 'objcall(%s, %s)' % (self.function, self.original_args)
class ComplexConjugateNode(ExprNode):
"mycomplex.conjugate()"
_fields = ['complex_node']
def __init__(self, complex_node, **kwargs):
super(ComplexConjugateNode, self).__init__(**kwargs)
self.complex_node = complex_node
########NEW FILE########
__FILENAME__ = cfnodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
basic_block_fields = ['cond_block', 'if_block', 'else_block', 'exit_block']
def delete_control_blocks(flow_node, flow):
"""
Remove all control flow basic blocks from the CFG given a FlowNode
and the CFG. Also removes Name references from cf_references.
"""
parent = flow_node.cond_block.idom
flow_node.exit_block.reparent(parent)
flow.blocks.remove(flow_node.exit_block)
flow_node.exit_block = None
#flow_node.cond_block.delete(flow)
#flow_node.if_block.delete(flow)
#if flow_node.orelse:
# flow_node.else_block.delete(flow)
from numba import control_flow
control_flow.DeleteStatement(flow).visit(flow_node)
class FlowNode(Node):
"""
Node that has control flow basic blocks.
"""
cond_block = None
if_block = None
else_block = None
exit_block = None
def __init__(self, **kwargs):
super(FlowNode, self).__init__(**kwargs)
from numba import control_flow
for field_name in basic_block_fields:
if not getattr(self, field_name):
block = control_flow.ControlBlock(-1, is_fabricated=True)
setattr(self, field_name, block)
class If(ast.If, FlowNode):
"An if statement node. Has the basic block attributes from FlowNode"
class While(ast.While, FlowNode):
"A while loop node. Has the basic block attributes from FlowNode"
# Place to jump to when we see a 'continue'. The default is
# 'the condition block'. For 'for' loops we set this to
# 'the counter increment block'
continue_block = None
class For(ast.For, FlowNode):
"A for loop node. Has the basic block attributes from FlowNode"
def merge_cfg_in_ast(basic_block_fields, bodies, node):
"""
Merge CFG blocks into the AST. E.g.
While(test=x, body=y)
becomes
While(test=ControlBlock(0, body=[x]), body=ControlBlock(1, body=[y]))
"""
for bb_name, body_name in zip(basic_block_fields, bodies):
body = getattr(node, body_name)
bb = getattr(node, bb_name)
if not body:
continue
# Merge AST child in body list of CFG block
if isinstance(body, list):
bb.body = body
bb = [bb]
else:
bb.body = [body]
# Set basic block as an AST child of the node
setattr(node, body_name, bb)
def merge_cfg_in_while(node):
bodies = ['test', 'body', 'orelse']
merge_cfg_in_ast(basic_block_fields, bodies, node)
def build_if(cls=If, **kwargs):
node = cls(**kwargs)
merge_cfg_in_while(node)
return node
def build_while(**kwargs):
return build_if(cls=While, **kwargs)
def build_for(**kwargs):
result = For(**kwargs)
merge_cfg_in_ast(basic_block_fields, ['iter', 'body', 'orelse'], result)
merge_cfg_in_ast(['target_block'], ['target'], result)
return result
def if_else(op, cond_left, cond_right, lhs, rhs):
"Implements 'lhs if cond_left <op> cond_right else rhs'"
test = ast.Compare(left=cond_left, ops=[op],
comparators=[cond_right])
test.right = cond_right
test = typednode(test, bool_)
return build_if(test=test, body=[lhs], orelse=[rhs] if rhs else [])
class LowLevelBasicBlockNode(Node):
"""
Evaluate a statement or expression in a new LLVM basic block.
"""
_fields = ['body']
def __init__(self, body, label='unnamed', **kwargs):
super(LowLevelBasicBlockNode, self).__init__(**kwargs)
self.body = body
self.label = label
self.entry_block = None
def create_block(self, translator, label=None):
if self.entry_block is None:
self.entry_block = translator.append_basic_block(label or self.label)
return self.entry_block
class MaybeUnusedNode(Node):
"""
Wraps an ast.Name() to indicate that the result may be unused.
"""
_fields = ["name_node"]
def __init__(self, name_node):
self.name_node = name_node
########NEW FILE########
__FILENAME__ = closurenodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
# not really an expression, but used in an assignment
class ClosureNode(ExprNode):
"""
Inner functions or closures.
When coerced to an object, a wrapper PyMethodDef gets created, and at
call time a function is dynamically created with the closure scope.
func_def:
AST FunctionDef of the function
closure_type:
numba.typesystem.ClosureType
outer_py_func:
Outer Python function (or None!)
"""
_fields = []
def __init__(self, env, func_def, closure_type, outer_py_func, **kwargs):
super(ClosureNode, self).__init__(**kwargs)
self.func_def = func_def
self.type = closure_type
self.outer_py_func = outer_py_func
self.name = self.func_def.name
func_env = env.translation.get_env(func_def)
self.need_numba_func = not func_env or func_env.need_closure_wrapper
self.lfunc = None
self.wrapper_func = None
self.wrapper_lfunc = None
self.lfunc_pointer = None
# FunctionEnvironment after type inference
self.func_env = None
# self.type_inferred_ast = None
# self.symtab = None
from numba import pipeline
self.locals = pipeline.get_locals(func_def, None)
# The Python extension type that must be instantiated to hold cellvars
# self.scope_type = None
self.ext_type = None
self.need_closure_scope = False
def make_pyfunc(self):
d = self.outer_py_func.__globals__
# argnames = tuple(arg.id for arg in self.func_def.args.args)
# dummy_func_string = """
#def __numba_closure_func(%s):
# pass
# """ % ", ".join(argnames)
# exec dummy_func_string in d, d
# Something set a pure and original, unmodified, AST, use that
# instead and reset it after the compile. This is a HACK
func_body = self.func_def.body
if hasattr(self.func_def, 'pure_ast_body'):
self.func_def.body = self.func_def.pure_ast_body
name = self.func_def.name
self.func_def.name = '__numba_closure_func'
ast_mod = ast.Module(body=[self.func_def])
numba.functions.fix_ast_lineno(ast_mod)
c = compile(ast_mod, '<string>', 'exec')
exec(c, d, d)
self.func_def.name = name
self.py_func = d['__numba_closure_func']
self.py_func.live_objects = []
self.py_func.__module__ = self.outer_py_func.__module__
self.py_func.__name__ = name
if hasattr(self.func_def, 'pure_ast_body'):
self.func_def.body = func_body
class InstantiateClosureScope(ExprNode):
_fields = ['outer_scope']
def __init__(self, func_def, scope_ext_type, scope_type, outer_scope, **kwargs):
super(InstantiateClosureScope, self).__init__(**kwargs)
self.func_def = func_def
self.scope_type = scope_type
self.ext_type = scope_ext_type
self.outer_scope = outer_scope
self.type = scope_type
class ClosureScopeLoadNode(ExprNode):
"Load the closure scope for the function or NULL"
type = void.pointer()
class ClosureCallNode(NativeCallNode):
"""
Call to closure or inner function.
"""
_fields = ['func', 'args']
def __init__(self, closure_type, call_node, **kwargs):
self.call_node = call_node
self.func = call_node.func
self.closure_type = closure_type
self.argnames = [name.id for name in self.func_def.args.args[self.need_closure_scope:]]
self.expected_nargs = len(self.argnames)
args, keywords = call_node.args, call_node.keywords
args = args + self._resolve_keywords(args, keywords)
super(ClosureCallNode, self).__init__(
closure_type.signature, args, llvm_func=None,
skip_self=self.need_closure_scope, **kwargs)
@property
def need_closure_scope(self):
return self.closure_type.closure.need_closure_scope
@property
def func_def(self):
return self.closure_type.closure.func_def
def _resolve_keywords(self, args, keywords):
"Map keyword arguments to positional arguments"
expected = self.expected_nargs - len(args)
if len(keywords) != expected:
raise error.NumbaError(
self.call_node,
"Expected %d arguments, got %d" % (self.expected_nargs,
len(args) + len(keywords)))
argpositions = dict(zip(self.argnames, range(self.expected_nargs)))
positional = [None] * (self.expected_nargs - len(args))
for keyword in keywords:
argname = keyword.arg
pos = argpositions.get(argname, None)
if pos is None:
raise error.NumbaError(
keyword, "Not a valid keyword argument name: %s" % argname)
elif pos < len(args):
raise error.NumbaError(
keyword, "Got multiple values for positional "
"argument %r" % argname)
else:
positional[pos] = keyword.value
return positional
########NEW FILE########
__FILENAME__ = coercionnodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
import numba.nodes
class CoercionNode(ExprNode):
"""
Coerce a node to a different type
"""
_fields = ['node']
_attributes = ['type', 'name']
def __new__(cls, node, dst_type, name=''):
if isinstance(node, CoercionNode) and node.type == dst_type:
return node
return super(CoercionNode, cls).__new__(cls, node, dst_type, name=name)
def __init__(self, node, dst_type, name=''):
if node is self:
# We are trying to coerce a CoercionNode which already has the
# right type, so __new__ returns a CoercionNode, which then results
# in __init__ being called
return
type = getattr(node, 'type', None) or node.variable.type
if dst_type.is_pointer and type.is_int:
assert type == Py_uintptr_t, type
self.type = dst_type
self.variable = Variable(dst_type)
self.name = name
self.node = self.verify_conversion(dst_type, node)
if (dst_type.is_object and not node.variable.type.is_object and
isinstance(node, numba.nodes.ArrayAttributeNode)):
self.node = self.coerce_numpy_attribute(node)
def coerce_numpy_attribute(self, node):
"""
Numpy array attributes, such as 'data', get rewritten to direct
accesses. Since they are being coerced back to objects, use a generic
attribute access instead.
"""
node = ast.Attribute(value=node.array, attr=node.attr_name,
ctx=ast.Load())
node.variable = Variable(object_)
node.type = object_
return node
@property
def dst_type(self):
"""
dst_type is always the same as type, and 'type' is kept consistent
with Variable.type
"""
return self.type
@classmethod
def coerce(cls, node_or_nodes, dst_type):
if isinstance(node_or_nodes, list) and isinstance(dst_type, list):
return [cls(node, dst) for node, dst in zip(node_or_nodes, dst_type)]
elif isinstance(node_or_nodes, list):
return [cls(node, dst_type) for node in node_or_nodes]
return cls(node_or_nodes, dst_type)
def verify_conversion(self, dst_type, node):
if ((node.variable.type.is_complex or dst_type.is_complex) and
(node.variable.type.is_object or dst_type.is_object)):
if dst_type.is_complex:
complex_type = dst_type
else:
complex_type = node.variable.type
if not complex_type == complex128:
node = CoercionNode(node, complex128)
elif ((node.variable.type.is_datetime or dst_type.is_datetime) and
(node.variable.type.is_object or dst_type.is_object)):
if dst_type.is_datetime:
datetime_type = dst_type
else:
datetime_type = node.variable.type
if not datetime_type.is_datetime and \
not datetime_type.is_numpy_datetime:
node = CoercionNode(node, datetime)
elif ((node.variable.type.is_timedelta or dst_type.is_timedelta) and
(node.variable.type.is_object or dst_type.is_object)):
if dst_type.is_timedelta:
timedelta_type = dst_type
else:
timedelta_type = node.variable.type
if not timedelta_type.is_timedelta and \
not timedelta_type.is_timedelta:
node = CoercionNode(node, timedelta)
return node
def __repr__(self):
return "Coerce(%s, %s)" % (self.type, self.node)
class CastNode(ExprNode):
"""
Explicit cast by user, e.g. double(value)
"""
_fields = ["arg"]
def __init__(self, node, type):
self.arg = node
self.type = type
class PromotionNode(ExprNode):
"""
Coerces a variable of some type to another type for a phi node in a
successor block.
"""
_fields = ['node']
def __init__(self, **kwargs):
super(PromotionNode, self).__init__(**kwargs)
self.variable = self.node.variable
class CoerceToObject(CoercionNode):
"Coerce native values to objects"
class CoerceToNative(CoercionNode):
"Coerce objects to native values"
class DeferredCoercionNode(ExprNode):
"""
Coerce to the type of the given variable. The type of the variable may
change in the meantime (e.g. may be promoted or demoted).
"""
_fields = ['node']
def __init__(self, node, variable):
self.node = node
self.variable = variable
class UntypedCoercion(ExprNode):
"""
Coerce a node to the destination type. The node need not yet have a
type or variable.
"""
_fields = ['node']
def __init__(self, node, type):
self.node = node
self.type = type
########NEW FILE########
__FILENAME__ = constnodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
import numba.nodes
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
def get_pointer_address(value, type):
if type.is_known_pointer:
return type.address
else:
return value
def is_null_constant(constant):
return constant is _NULL
#------------------------------------------------------------------------
# Constant Nodes
#------------------------------------------------------------------------
class ConstNode(ExprNode):
"""
Wrap a constant.
"""
_attributes = ['type', 'pyval']
def __init__(self, pyval, type=None):
if type is None:
type = numba.typeof(pyval)
# if pyval is not _NULL:
# assert not type.is_object
self.variable = Variable(type, is_constant=True, constant_value=pyval)
self.type = type
self.pyval = pyval
def __repr__(self):
return "const(%s, %s)" % (self.pyval, self.type)
#------------------------------------------------------------------------
# NULL Constants
#------------------------------------------------------------------------
_NULL = object()
NULL_obj = ConstNode(_NULL, object_)
NULL = ConstNode(_NULL, void.pointer())
########NEW FILE########
__FILENAME__ = excnodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
import numba.nodes
class CheckErrorNode(ExprNode):
"""
Check for an exception.
badval: if this value is returned, propagate an error
goodval: if this value is not returned, propagate an error
If exc_type, exc_msg and optionally exc_args are given, an error is
raised instead of propagating it.
See RaiseNode for the exc_* arguments.
"""
_fields = ['return_value', 'badval', 'raise_node']
def __init__(self, return_value, badval=None, goodval=None,
exc_type=None, exc_msg=None, exc_args=None,
**kwargs):
super(CheckErrorNode, self).__init__(**kwargs)
self.return_value = return_value
if badval is not None and not isinstance(badval, ast.AST):
badval = ConstNode(badval, return_value.type)
if goodval is not None and not isinstance(goodval, ast.AST):
goodval = ConstNode(goodval, return_value.type)
self.badval = badval
self.goodval = goodval
self.raise_node = RaiseNode(exc_type, exc_msg, exc_args)
class RaiseNode(ExprNode):
"""
Raise an exception.
exception_type: The Python exception type
exc_type: The Python exception as an AST node
May be passed in as a Python exception type
exc_msg: The message to print as an AST node
May be passed in as a string
exc_args: If given, must be an list of AST nodes representing the
arguments to PyErr_Format (matching the format specifiers
at runtime in exc_msg)
"""
_fields = ['exc_type', 'exc_msg', 'exc_args']
def __init__(self, exc_type, exc_msg, exc_args=None, print_on_trap=True,
**kwargs):
super(RaiseNode, self).__init__(**kwargs)
self.exception_type = None
if isinstance(exc_type, type) and issubclass(exc_type, BaseException):
self.exception_type = exc_type
exc_type = const(exc_type, object_)
if isinstance(exc_msg, (str, unicode)):
exc_msg = const(exc_msg, char.pointer())
self.exc_type = exc_type
self.exc_msg = exc_msg
self.exc_args = exc_args
self.print_on_trap = print_on_trap
class PropagateNode(ExprNode):
"""
Propagate an exception (jump to the error label). This is resolved
at code generation time and can be generated at any moment.
"""
class PyErr_OccurredNode(ExprNode):
"""
Check for a set Python exception using PyErr_Occurred().
Can be set any time after type inference. This node is resolved during
late specialization.
"""
# TODO: support checking for (value == badval && PyErr_Occurred()) for
# efficiency
_fields = ['node']
def __init__(self, node):
self.node = node
self.variable = node.variable
self.type = node.type
########NEW FILE########
__FILENAME__ = extnodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
from numba.exttypes.types import methods
class ExtTypeAttribute(ExprNode):
_fields = ['value']
def __init__(self, value, attr, variable, ctx, ext_type, **kwargs):
super(ExtTypeAttribute, self).__init__(**kwargs)
self.value = value
self.attr = attr
self.type = variable.type
self.variable = variable
self.ctx = ctx
self.ext_type = ext_type
def __repr__(self):
return "%s.%s" % (self.value, self.attr)
@classmethod
def from_known_attribute(cls, value, attr, ctx, ext_type):
"""
Create an extension type attribute node if the attribute is known
to exist (and isn't being inferred)
"""
assert attr in ext_type.attributedict
import numba.symtab
variable = numba.symtab.Variable(ext_type.attributedict[attr])
return cls(value, attr, variable, ctx, ext_type)
class NewExtObjectNode(ExprNode):
"""
Instantiate an extension type. Currently unused.
"""
_fields = ['args']
def __init__(self, ext_type, args, **kwargs):
super(NewExtObjectNode, self).__init__(**kwargs)
self.ext_type = ext_type
self.args = args
class ExtensionMethod(ExprNode):
_fields = ['value']
call_node = None
def __init__(self, obj, attr, method, **kwargs):
super(ExtensionMethod, self).__init__(**kwargs)
ext_type = obj.variable.type
assert ext_type.is_extension
self.value = obj
self.attr = attr
self.ext_type = ext_type
self.initialize_type(method)
def initialize_type(self, method):
self.type = method.signature
def __repr__(self):
return "%s.%s" % (self.value, self.attr)
class AutojitExtensionMethod(ExtensionMethod):
def initialize_type(self, method):
self.type = methods.AutojitMethodType()
#class ExtensionMethodCall(Node):
# """
# Low level call that has resolved the virtual method.
# """
#
# _fields = ['vmethod', 'args']
#
# def __init__(self, vmethod, self_obj, args, signature, **kwargs):
# super(ExtensionMethodCall, self).__init__(**kwargs)
# self.vmethod = vmethod
# self.args = args
# self.signature = signature
# self.type = signature
########NEW FILE########
__FILENAME__ = llvmnodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
from numba import functions
class LLVMValueRefNode(ExprNode):
"""
Wrap an LLVM value.
"""
_fields = []
def __init__(self, type, llvm_value):
self.type = type
self.llvm_value = llvm_value
class BadValue(LLVMValueRefNode):
def __init__(self, type):
super(BadValue, self).__init__(type, None)
def __repr__(self):
return "bad(%s)" % self.type
class LLVMCBuilderNode(UserNode):
"""
Instantiate an link in an LLVM cbuilder CDefinition. The CDefinition is
passed the list of dependence nodes and the list of LLVM value dependencies
"""
_fields = ["dependencies"]
def __init__(self, env, cbuilder_cdefinition, signature, dependencies=None):
self.env = env
self.llvm_context = env.llvm_context
self.cbuilder_cdefinition = cbuilder_cdefinition
self.type = signature
self.dependencies = dependencies or []
def infer_types(self, type_inferer):
type_inferer.visitchildren(self)
return self
def codegen(self, codegen):
func_env = self.env.translation.crnt
dependencies = codegen.visitlist(self.dependencies)
cdef = self.cbuilder_cdefinition(self.dependencies, dependencies)
self.lfunc = cdef.define(func_env.llvm_module) #, optimize=False)
functions.keep_alive(func_env.func, self.lfunc)
return self.lfunc
@property
def pointer(self):
return self.llvm_context.get_pointer_to_function(self.lfunc)
########NEW FILE########
__FILENAME__ = metadata
# -*- coding: utf-8 -*-
"""
Allow annotating AST nodes with some metadata, and querying for that metadata.
"""
from __future__ import print_function, division, absolute_import
import weakref
def create_metadata_env():
return weakref.WeakKeyDictionary()
def annotate(env, node, **flags):
func_env = env.translation.crnt
assert func_env is not None
if node not in func_env.ast_metadata:
metadata = {}
func_env.ast_metadata[node] = metadata
else:
metadata = func_env.ast_metadata[node]
metadata.update(flags)
def query(env, node, key, default=None):
func_env = env.translation.crnt
assert func_env is not None
node_metadata = func_env.ast_metadata.get(node, {})
return node_metadata.get(key, default)
########NEW FILE########
__FILENAME__ = numpynodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import collections
import llvm.core
from numba import typesystem
from numba.typesystem import tbaa
from numba.nodes import *
from numba.ndarray_helpers import PyArrayAccessor, NumpyArray
#----------------------------------------------------------------------------
# External Utilities
#----------------------------------------------------------------------------
def is_constant_index(node):
return (isinstance(node, ast.Index) and
isinstance(node.value, ConstNode))
def is_newaxis(node):
v = node.variable
return (is_constant_index(node) and
node.value.pyval is None) or v.type.is_newaxis or v.type.is_none
def is_ellipsis(node):
return is_constant_index(node) and node.value.pyval is Ellipsis
#----------------------------------------------------------------------------
# Internal Utilities
#----------------------------------------------------------------------------
def _const_int(X):
return llvm.core.Constant.int(llvm.core.Type.int(), X)
def get_shape(builder, tbaa_metadata, shape_pointer, ndim):
"Load the shape values from an ndarray"
shape_metadata = tbaa_metadata.get_metadata(tbaa_metadata.numpy_shape)
for i in range(ndim):
shape_ptr = builder.gep(shape_pointer, [_const_int(i)])
extent = builder.load(shape_ptr)
extent.set_metadata("tbaa", shape_metadata)
yield extent
def get_strides(builder, tbaa_metadata, strides_pointer, ndim):
"Load the stride values from an ndarray"
stride_metadata = tbaa_metadata.get_metadata(tbaa.numpy_strides)
for i in range(ndim):
stride_ptr = builder.gep(strides_pointer, [_const_int(i)])
stride = builder.load(stride_ptr)
stride.set_metadata("tbaa", stride_metadata)
yield stride
#----------------------------------------------------------------------------
# NumPy Array Attributes
#----------------------------------------------------------------------------
class DataPointerNode(ExprNode):
_fields = ['node', 'slice']
def __init__(self, node, slice, ctx):
self.node = node
self.slice = slice
self.type = node.type.dtype
self.variable = Variable(self.type)
self.ctx = ctx
def __repr__(self):
return "%s.data" % self.node
class ArrayAttributeNode(ExprNode):
is_read_only = True
_fields = ['array']
def __init__(self, attribute_name, array):
self.array = array
self.attr_name = attribute_name
self.array_type = array.variable.type
if attribute_name == 'ndim':
type = int_
elif attribute_name in ('shape', 'strides'):
type = typesystem.sized_pointer(typesystem.npy_intp,
size=self.array_type.ndim)
elif attribute_name == 'data':
type = self.array_type.dtype.pointer()
else:
raise error._UnknownAttribute(attribute_name)
self.type = type
def __repr__(self):
return "%s.%s" % (self.array, self.attr_name)
class ShapeAttributeNode(ArrayAttributeNode):
# NOTE: better do this at code generation time, and not depend on
# variable.lvalue
_fields = ['array']
def __init__(self, array):
super(ShapeAttributeNode, self).__init__('shape', array)
self.array = array
self.element_type = typesystem.npy_intp
self.type = typesystem.carray(self.element_type,
array.variable.type.ndim)
#----------------------------------------------------------------------------
# NumPy Array Creation
#----------------------------------------------------------------------------
class ArrayNewNode(ExprNode):
"""
Allocate a new array given the attributes.
"""
_fields = ['data', 'shape', 'strides', 'base']
def __init__(self, type, data, shape, strides, base=None, **kwargs):
super(ArrayNewNode, self).__init__(**kwargs)
self.type = type
self.data = data
self.shape = shape
self.strides = strides
self.base = base
class ArrayNewEmptyNode(ExprNode):
"""
Allocate a new array with data.
"""
_fields = ['shape']
def __init__(self, type, shape, is_fortran=False, **kwargs):
super(ArrayNewEmptyNode, self).__init__(**kwargs)
self.type = type
self.shape = shape
self.is_fortran = is_fortran
#----------------------------------------------------------------------------
# Nodes for NumPy calls
#----------------------------------------------------------------------------
shape_type = npy_intp.pointer()
void_p = void.pointer()
class MultiArrayAPINode(NativeCallNode):
def __init__(self, name, signature, args):
super(MultiArrayAPINode, self).__init__(signature, args,
llvm_func=None)
self.func_name = name
def PyArray_NewFromDescr(args):
"""
Low-level specialized equivalent of ArrayNewNode
"""
signature = object_(
object_, # subtype
object_, # descr
int_, # ndim
shape_type, # shape
shape_type, # strides
void_p, # data
int_, # flags
object_, # obj
)
return MultiArrayAPINode('PyArray_NewFromDescr', signature, args)
def PyArray_SetBaseObject(args):
signature = int_(object_, object_)
return MultiArrayAPINode('PyArray_SetBaseObject', signature, args)
def PyArray_UpdateFlags(args):
return MultiArrayAPINode('PyArray_UpdateFlags', void(object_, int_), args)
def PyArray_Empty(args, name='PyArray_Empty'):
nd, shape, dtype, fortran = args
return_type = typesystem.array(dtype, nd)
signature = return_type(
int_, # nd
npy_intp.pointer(), # shape
object_, # dtype
int_) # fortran
return MultiArrayAPINode(name, signature, args)
def PyArray_Zeros(args):
return PyArray_Empty(args, name='PyArray_Zeros')
########NEW FILE########
__FILENAME__ = objectnodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import typesystem
from numba.nodes import *
class ObjectInjectNode(ExprNode):
"""
Refer to a Python object in the llvm code.
"""
_attributes = ['object', 'type']
def __init__(self, object, type=None, **kwargs):
super(ObjectInjectNode, self).__init__(**kwargs)
self.object = object
self.type = type or object_
self.variable = Variable(self.type, is_constant=True,
constant_value=object)
def __repr__(self):
return "<inject(%s)>" % self.object
NoneNode = ObjectInjectNode(None, object_)
class ObjectTempNode(ExprNode):
"""
Coerce a node to a temporary which is reference counted.
"""
_fields = ['node']
def __init__(self, node, incref=False):
assert not isinstance(node, ObjectTempNode)
self.node = node
self.llvm_temp = None
self.type = getattr(node, 'type', node.variable.type)
self.incref = incref
def __repr__(self):
return "objtemp(%s)" % self.node
class NoneNode(ExprNode):
"""
Return None.
"""
type = typesystem.none
variable = Variable(type)
class ObjectTempRefNode(ExprNode):
"""
Reference an ObjectTempNode, without evaluating its subexpressions.
The ObjectTempNode must already have been evaluated.
"""
_fields = []
def __init__(self, obj_temp_node, **kwargs):
super(ObjectTempRefNode, self).__init__(**kwargs)
self.obj_temp_node = obj_temp_node
class IncrefNode(ExprNode):
_fields = ['value']
def __init__(self, value, **kwargs):
super(IncrefNode, self).__init__(**kwargs)
self.value = value
class DecrefNode(IncrefNode):
pass
########NEW FILE########
__FILENAME__ = pointernodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
import numba.nodes
def pointer_add(pointer, offset):
assert pointer.type == char.pointer()
left = numba.nodes.ptrtoint(pointer)
result = ast.BinOp(left, ast.Add(), offset)
result.type = left.type
result.variable = Variable(result.type)
return CoercionNode(result, char.pointer())
def ptrtoint(node):
return CoercionNode(node, Py_uintptr_t)
def ptrfromint(intval, dst_ptr_type):
return CoercionNode(ConstNode(intval, Py_uintptr_t), dst_ptr_type)
def ptrfromobj(obj):
return PointerFromObject(obj)
def value_at_offset(obj_node, offset, dst_type):
"""
Perform (dst_type) (((char *) my_object) + offset)
"""
offset = ConstNode(offset, Py_ssize_t)
pointer = PointerFromObject(obj_node)
pointer = CoercionNode(pointer, char.pointer())
pointer = pointer_add(pointer, offset)
value_at_offset = CoercionNode(pointer, dst_type)
return value_at_offset
class DereferenceNode(ExprNode):
"""
Dereference a pointer
"""
_fields = ['pointer']
def __init__(self, pointer, **kwargs):
super(DereferenceNode, self).__init__(**kwargs)
self.pointer = pointer
self.type = pointer.type.base_type
def __repr__(self):
return "*%s" % (self.pointer,)
class PointerFromObject(ExprNode):
"""
Bitcast objects to void *
"""
_fields = ['node']
type = void.pointer()
variable = Variable(type)
def __init__(self, node, **kwargs):
super(PointerFromObject, self).__init__(**kwargs)
self.node = node
def __repr__(self):
return "((void *) %s)" % (self.node,)
########NEW FILE########
__FILENAME__ = structnodes
# -*- coding: utf-8 -*-
"""
Struct and complex nodes.
Structs are allocated on the stack, and not mutated as values. This is
because mutations are attribute or index assignments, which are not
recognized as variable assignments. Hence mutation cannot propagate new
values. So we mutate what we have on the stack.
"""
from __future__ import print_function, division, absolute_import
from numba.nodes import *
def struct_type(type):
if type.is_reference:
type = type.referenced_type
return type
class StructAttribute(ExprNode):
# expr : = StructAttribute(expr, string, expr_context, Type, metadata)
# metadata := StructAttribute | ComplexAttribute
_fields = ['value']
def __init__(self, value, attr, ctx, type, **kwargs):
super(StructAttribute, self).__init__(**kwargs)
self.value = value
self.attr = attr
self.ctx = ctx
self.struct_type = type
type = struct_type(type)
self.attr_type = type.fielddict[attr]
self.type = self.attr_type
self.variable = Variable(self.type, promotable_type=False)
@property
def field_idx(self):
fields = struct_type(self.struct_type).fields
return fields.index((self.attr, self.attr_type))
class StructVariable(ExprNode):
"""
Tells the type inferencer that the node is actually a valid struct that
we can mutate. For instance
func().a = 2
is wrong if func() returns a struct by value. So we only allow references
like struct.a = 2 and array[i].a = 2.
"""
_fields = ['node']
def __init__(self, node, **kwargs):
super(StructVariable, self).__init__(**kwargs)
self.node = node
self.type = node.type
class ComplexNode(ExprNode):
_fields = ['real', 'imag']
type = complex128
variable = Variable(type)
def __init__(self, real, imag):
self.real = real
self.imag = imag
class ComplexAttributeNode(ExprNode):
_fields = ["value"]
def __init__(self, value, attr):
self.value = value
self.attr = attr
self.type = value.type.base_type
self.variable = Variable(self.type)
class DateTimeNode(ExprNode):
_fields = ['timestamp', 'units']
type = datetime()
variable = Variable(type)
def __init__(self, timestamp, units):
self.timestamp = timestamp
self.units = units
class DateTimeAttributeNode(ExprNode):
_fields = ['value']
def __init__(self, value, attr):
self.value = value
self.attr = attr
self.type = value.type
self.variable = Variable(self.type)
class NumpyDateTimeNode(ExprNode):
_fields = ['datetime_string']
type = datetime()
variable = Variable(type)
def __init__(self, datetime_string):
self.datetime_string = datetime_string
class TimeDeltaNode(ExprNode):
_fields = ['diff', 'units']
type = timedelta()
variable = Variable(type)
def __init__(self, diff, units):
self.diff = diff
self.units = units
class NumpyTimeDeltaNode(ExprNode):
_fields = ['diff', 'units_str']
type = timedelta()
variable = Variable(type)
def __init__(self, diff, units_str):
self.diff = diff
self.units_str = units_str
class TimeDeltaAttributeNode(ExprNode):
_fields = ['value']
def __init__(self, value, attr):
self.value = value
self.attr = attr
self.type = value.type
self.variable = Variable(self.type)
########NEW FILE########
__FILENAME__ = tempnodes
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
class TempNode(ExprNode): #, ast.Name):
"""
Create a temporary to store values in. Does not perform reference counting.
"""
temp_counter = 0
def __init__(self, type, name=None, dst_variable=None):
self.type = type
self.name = name
self.variable = Variable(type, name='___numba_%d' % self.temp_counter,
is_local=True)
TempNode.temp_counter += 1
self.llvm_temp = None
self.dst_variable = dst_variable
self._tbaa_node = None
def get_tbaa_node(self, tbaa):
"""
TBAA metadata node unique to this temporary. This is valid
since one cannot take the address of a temporary.
"""
if self._tbaa_node is None:
root = tbaa.get_metadata(char.pointer())
self._tbaa_node = tbaa.make_unique_metadata(root)
return self._tbaa_node
def load(self, invariant=False):
return TempLoadNode(temp=self, invariant=invariant)
def store(self):
return TempStoreNode(temp=self)
def __repr__(self):
if self.name:
name = ", %s" % self.name
else:
name = ""
return "temp(%s%s)" % (self.type, name)
class TempLoadNode(ExprNode):
_fields = ['temp']
def __init__(self, temp, invariant=False):
self.temp = temp
self.type = temp.type
self.variable = temp.variable
self.invariant = invariant
def __repr__(self):
return "load(%s)" % self.temp
class TempStoreNode(TempLoadNode):
_fields = ['temp']
def __repr__(self):
return "store(%s)" % self.temp
########NEW FILE########
__FILENAME__ = usernode
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.nodes import *
#----------------------------------------------------------------------------
# User-extensible nodes
#----------------------------------------------------------------------------
class UserNodeMeta(type):
def __init__(cls, what, bases=None, dict=None):
super(UserNodeMeta, cls).__init__(what, bases, dict)
cls.actual_name = cls.__name__
cls.__name__ = "UserNode"
def __repr__(cls):
return "<class %s>" % cls.actual_name
class UserNode(ExprNode):
"""
Node that users can subclass and insert in the AST without using mixins
to provide user-specified functionality.
"""
__metaclass__ = UserNodeMeta
_fields = []
def infer_types(self, type_inferer):
"""
Infer the type of this node and set it self.type.
The return value will replace this node in the AST.
"""
raise NotImplementedError
def specialize(self, specializer):
"""
Just before code generation. Useful to rewrite this node in terms
of other existing fundamental operations.
Implementing this method is optional.
"""
specializer.visitchildren(self)
return self
def codegen(self, codegen):
"""
Generate code for this node.
Must return an LLVM Value.
"""
raise NotImplementedError
def __repr__(self):
return "<%s object>" % self.actual_name
class dont_infer(UserNode):
"""
Support delayed type inference of the body. E.g. if you want a portion
<blob> to be inferred elsewhere:
print x
<blob>
print y
If we want to infer <blob> after the last print, but evaluate it before,
we can replace these statements with:
[print x, dont_infer(<blob>), print y, infer_now(<blob>)]
"""
_fields = ["arg"]
def __init__(self, arg):
self.arg = arg
def infer_types(self, type_inferer):
return self
def specialize(self, specializer):
return specializer.visit(self.arg)
class infer_now(UserNode):
"See dont_infer above"
_fields = []
def __init__(self, arg, dont_infer_node):
self.arg = arg
self.dont_infer_node = dont_infer_node
def infer_types(self, type_inferer):
self.dont_infer_node.arg = type_inferer.visit(self.arg)
return None
########NEW FILE########
__FILENAME__ = normalize
# -*- coding: utf-8 -*-
"""
Initial AST validation and normalization.
"""
from __future__ import print_function, division, absolute_import
import ast
import copy
import types
from numba import error
from numba import nodes
from numba import visitors
from numba import typesystem
class NormalizeAST(visitors.NumbaTransformer):
"Normalize AST"
function_level = 0
# TODO: Actually use numba.ir.normalized
ir = types.ModuleType('numba.ir.normalized')
vars(ir).update(vars(ast))
vars(ir).update(vars(nodes))
#------------------------------------------------------------------------
# Normalization
#------------------------------------------------------------------------
def visit_FunctionDef(self, node):
if self.function_level:
return self.handle_inner_function(node)
self.function_level += 1
self.visitchildren(node)
self.function_level -= 1
return node
def handle_inner_function(self, node):
"Create assignment code for inner functions and mark the assignment"
lhs = ast.Name(node.name, ast.Store())
ast.copy_location(lhs, node)
rhs = FuncDefExprNode(func_def=node)
ast.copy_location(rhs, node)
fields = rhs._fields
rhs._fields = []
assmnt = ast.Assign(targets=[lhs], value=rhs)
result = self.visit(assmnt)
rhs._fields = fields
return result
def visit_FunctionDef(self, node):
#for arg in node.args:
# if arg.default:
# self.visitchildren(arg)
if self.function_level:
return self.handle_inner_function(node)
self.visitchildren(node)
return node
def visit_ListComp(self, node):
"""
Rewrite list comprehensions to the equivalent for loops.
AST syntax:
ListComp(expr elt, comprehension* generators)
comprehension = (expr target, expr iter, expr* ifs)
'ifs' represent a chain of ANDs
"""
assert len(node.generators) > 0
# Create innermost body, i.e. list.append(expr)
# TODO: size hint for PyList_New
list_create = ast.List(elts=[], ctx=ast.Load())
list_create.type = typesystem.object_ # typesystem.list_()
list_create = nodes.CloneableNode(list_create)
list_value = nodes.CloneNode(list_create)
list_append = ast.Attribute(list_value, "append", ast.Load())
append_call = ast.Call(func=list_append, args=[node.elt],
keywords=[], starargs=None, kwargs=None)
# Build up the loops from inwards to outwards
body = append_call
for comprehension in reversed(node.generators):
# Hanlde the 'if' clause
ifs = comprehension.ifs
if len(ifs) > 1:
make_boolop = lambda op1_op2: ast.BoolOp(op=ast.And(),
values=op1_op2)
if_test = reduce(make_boolop, ifs)
elif len(ifs) == 1:
if_test, = ifs
else:
if_test = None
if if_test is not None:
body = ast.If(test=if_test, body=[body], orelse=[])
# Wrap list.append() call or inner loops
body = ast.For(target=comprehension.target,
iter=comprehension.iter, body=[body], orelse=[])
expr = nodes.ExpressionNode(stmts=[list_create, body], expr=list_value)
return self.visit(expr)
def visit_AugAssign(self, node):
"""
Inplace assignment.
Resolve a += b to a = a + b. Set 'inplace_op' attribute of the
Assign node so later stages may recognize inplace assignment.
Do this now, so that we can correctly mark the RHS reference.
"""
target = node.target
rhs_target = copy.deepcopy(target)
rhs_target.ctx = ast.Load()
ast.fix_missing_locations(rhs_target)
bin_op = self.ir.BinOp(rhs_target, node.op, node.value)
assignment = self.ir.Assign([target], bin_op)
assignment.inplace_op = node.op
return self.visit(assignment)
def DISABLED_visit_Compare(self, node):
"Reduce cascaded comparisons into single comparisons"
# Process children
self.generic_visit(node)
# TODO: We can't generate temporaries from subexpressions since
# this may invalidate execution order. For now, set the type so
# we can clone
for c in node.comparators:
c.type = None
compare_nodes = []
comparators = [nodes.CloneableNode(c) for c in node.comparators]
# Build comparison nodes
left = node.left
for op, right in zip(node.ops, comparators):
node = self.ir.Compare(left=left, ops=[op], comparators=[right])
# We shouldn't need to type this...
node = nodes.typednode(node, typesystem.bool_)
left = right.clone
compare_nodes.append(node)
# AND the comparisons together
boolop = lambda left, right: self.ir.BoolOp(ast.And(), [left, right])
node = reduce(boolop, reversed(compare_nodes))
return node
#------------------------------------------------------------------------
# Nodes
#------------------------------------------------------------------------
class FuncDefExprNode(nodes.Node):
"""
Wraps an inner function node until the closure code kicks in.
"""
_fields = ['func_def']
########NEW FILE########
__FILENAME__ = odict
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
try:
from UserDict import DictMixin
except:
from collections import MutableMapping as DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
try:
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
except AttributeError:
pass
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
########NEW FILE########
__FILENAME__ = oset
# -*- coding: utf-8 -*-
"""
Ordered Set. See http://code.activestate.com/recipes/576694/
"""
from __future__ import print_function, division, absolute_import
import collections
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def update(self, it):
for x in it:
self.add(x)
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
if __name__ == '__main__':
s = OrderedSet('abracadaba')
t = OrderedSet('simsalabim')
print((s | t))
print((s & t))
########NEW FILE########
__FILENAME__ = parallel
# -*- coding: utf-8 -*-
"""
Support for
for i in numba.prange(...):
...
The implementation isn't particularly good, and should be greatly simplified
at some point.
"""
from __future__ import print_function, division, absolute_import
import ast
import copy
import warnings
import multiprocessing
try:
NUM_THREADS = multiprocessing.cpu_count()
except NotImplementedError:
warnings.warn("Unable to determine cpu count, assuming 2")
NUM_THREADS = 2
import llvm.core
from llvm_cbuilder import *
from llvm_cbuilder import shortnames as C
from llvm_cbuilder import builder
import numba.decorators
from numba import *
from numba import error, visitors, nodes, templating
from numba.minivect import minitypes
from numba import typesystem, pipeline
from numba.type_inference import infer
from numba.specialize.loops import unpack_range_args
from numba import threads
opmap = {
# Unary
ast.Invert: '~',
ast.Not: None, # not supported
ast.UAdd: '+',
ast.USub: '-',
# Binary
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.Mod: '%',
ast.Pow: '**',
ast.LShift: '<<',
ast.RShift: '>>',
ast.BitOr: '|',
ast.BitXor: '^',
ast.BitAnd: '&',
ast.FloorDiv: '//',
# Comparison
ast.Eq: '==',
ast.NotEq: '!=',
ast.Lt: '<',
ast.LtE: '<=',
ast.Gt: '>',
ast.GtE: '>=',
ast.Is: None,
ast.IsNot: None,
ast.In: None,
ast.NotIn: None,
}
import logging
logger = logging.getLogger(__name__)
def get_reduction_op(op):
# TODO: recognize invalid operators
op = type(op)
reduction_op = opmap[op]
reduction_ops = {'-': '+', '/': '*'}
if reduction_op in reduction_ops:
reduction_op = reduction_ops[reduction_op]
return reduction_op
def get_reduction_default(op):
defaults = {
'+': 0, '-': 0, '*': 1, '/': 1,
}
return defaults[op]
class VariableFindingVisitor(visitors.VariableFindingVisitor):
"Find referenced and assigned ast.Name nodes"
def __init__(self):
super(VariableFindingVisitor, self).__init__()
self.reductions = {}
def register_assignment(self, node, target, op):
if isinstance(target, ast.Name):
self.assigned.add(target.id)
if op is None:
redop = op
else:
redop = get_reduction_op(op)
if target.id in self.reductions:
previous_op = self.reductions[target.id]
if ((previous_op is None and op is not None) or
(previous_op is not None and op is None)):
raise error.NumbaError(
node, "Reduction variable %r may not be "
"assigned to" % target.id)
else:
if redop != previous_op:
raise error.NumbaError(
node, "Incompatible reduction operators: "
"(%s and %s) for variable %r" % (
op, previous_op, target.id))
elif op:
self.reductions[target.id] = redop
def visit_Assign(self, node):
if isinstance(node.targets[0], ast.Name):
self.register_assignment(node, node.targets[0],
getattr(node, 'inplace_op', None))
def create_prange_closure(env, prange_node, body, target):
# Find referenced and assigned variables
v = VariableFindingVisitor()
v.visitlist(body)
# Determine privates and reductions. Shared variables will be handled by
# the closure support.
privates = set(v.assigned) - set(v.reductions)
reductions = v.reductions
if isinstance(target, ast.Name) and target.id in reductions:
# Remove target variable from reductions if present
reductions.pop(target.id)
privates.add(target.id)
privates_struct_type = numba.struct([])
privates_struct = ast.Name('__numba_privates', ast.Param())
args = [privates_struct]
func_def = ast.FunctionDef(name=templating.temp_name("prange_body"),
args=ast.arguments(args=args, vararg=None,
kwarg=None, defaults=[]),
body=copy.deepcopy(body),
decorator_list=[])
# Update outlined prange body closure
func_signature = void(privates_struct_type.ref())
# func_signature.struct_by_reference = True
need_closure_wrapper = False
locals_dict = { '__numba_privates': privates_struct_type.ref() }
func_env = env.translation.make_partial_env(
func_def,
func_signature=func_signature,
need_closure_wrapper=need_closure_wrapper,
locals=locals_dict,
)
# Update prange node
prange_node.func_env = func_env
prange_node.privates_struct_type = privates_struct_type
prange_node.privates = privates
prange_node.reductions = reductions
prange_node.func_def = func_def
prange_template = """
{{func_def}}
%s # function name; avoid warning about unused variable
$pack_struct
$nsteps = ({{stop}} - {{start}}) / ({{step}} * {{num_threads}})
for $i in range({{num_threads}}):
$temp_struct.__numba_closure_scope = {{closure_scope}}
$temp_struct.__numba_start = {{start}} + $i * {{step}} * $nsteps
$temp_struct.__numba_stop = $temp_struct.__numba_start + {{step}} * $nsteps
$temp_struct.__numba_step = {{step}}
$contexts[$i] = $temp_struct
# print "temp struct", $temp_struct.__numba_start, \
# $temp_struct.__numba_stop, {{step}}, $nsteps
# Undo any truncation, don't use $i here, range() doesn't
# have py semantics yet
$contexts[{{num_threads}} - 1].__numba_stop = {{stop}}
# print "invoking..."
{{invoke_and_join_threads}}
$lastprivates = $contexts[{{num_threads}} - 1]
"""
def kill_attribute_assignments(env, prange_node, temporaries):
"""
Remove attribute assignments from the list of statements that need to
be resolved before type inference.
"""
func_env = env.translation.crnt
kill_set = func_env.kill_attribute_assignments
kill_set.update(temporaries)
kill_set.update(prange_node.privates)
kill_set.update(prange_node.reductions)
def rewrite_prange(env, prange_node, target, locals_dict, closures_dict):
func_def = prange_node.func_def
struct_type = prange_node.privates_struct_type
templ = templating.TemplateContext(env.context,
prange_template % func_def.name)
# Allocate context for each thread
num_threads = NUM_THREADS
contexts_array_type = minitypes.CArrayType(struct_type,
num_threads)
# Create variables for template substitution
nsteps = templ.temp_var('nsteps')
temp_i = templ.temp_var('i', int32)
contexts = templ.temp_var('contexts', contexts_array_type)
temp_struct = templ.temp_var('temp_struct', struct_type)
lastprivates = templ.temp_var("lastprivates")
pack_struct = templ.code_var('pack_struct')
if isinstance(target, ast.Name):
target_name = target.id
struct_type.add_field(target_name, Py_ssize_t)
else:
raise error.NumbaError(
prange_node, "Only name target for prange is currently supported")
# Create code for reductions and (last)privates
for name, reduction_op in prange_node.reductions.iteritems():
default = get_reduction_default(reduction_op)
pack_struct.codes.append("%s.%s = %s" % (temp_struct, name,
default))
# Update struct type with closure scope, index variable, start,
# stop and step
struct_type.add_field('__numba_closure_scope', void.pointer())
struct_type.add_field('__numba_start', npy_intp)
struct_type.add_field('__numba_stop', npy_intp)
struct_type.add_field('__numba_step', npy_intp)
# Interpolate code and variables and run type inference
# TODO: UNDO monkeypatching
func_def.type = prange_node.func_env.func_signature
func_def.is_prange_body = True
func_def.prange_node = prange_node
num_threads_node = nodes.const(num_threads, Py_ssize_t)
invoke = InvokeAndJoinThreads(env, contexts=contexts.node,
func_def_name=func_def.name,
struct_type=struct_type,
target_name=target_name,
num_threads=num_threads_node,
closures=closures_dict)
closure_scope = nodes.ClosureScopeLoadNode()
subs = dict(
func_def=func_def,
func_def_name=func_def.name,
closure_scope=closure_scope,
invoke_and_join_threads=invoke,
num_threads=num_threads_node,
start=nodes.UntypedCoercion(prange_node.start, Py_ssize_t),
stop=nodes.UntypedCoercion(prange_node.stop, Py_ssize_t),
step=nodes.UntypedCoercion(prange_node.step, Py_ssize_t),
)
tree = templ.template(subs)
temporaries = {}
templ.update_locals(temporaries)
locals_dict.update(temporaries)
kill_attribute_assignments(env, prange_node, temporaries)
# TODO: Make this an SSA variable
locals_dict[target_name] = Py_ssize_t
prange_node.target = target
prange_node.num_threads_node = num_threads_node #.clone
prange_node.template_vars = {
'contexts': contexts,
'i': temp_i,
'lastprivates': lastprivates,
'nsteps': nsteps,
}
# print(templ.substituted_template)
return tree
def typeof(name, expr):
return "__numba_typeof(%s, %s)" % (name, expr)
def assign(name, expr):
return "%s = %s" % (name, typeof(name, expr))
post_prange_template = """
#for $i in range({{num_threads}}):
# {{invoke_thread}}
#for $i in range({{num_threads}}):
# {{join_thread}}
# print "performing reductions"
for $i in range({{num_threads}}):
$reductions
# print "unpacking lastprivates"
$unpack_struct
"""
def perform_reductions(context, prange_node):
templ = templating.TemplateContext(context, post_prange_template)
unpack_struct, reductions = templ.code_vars('unpack_struct', 'reductions')
reductions.sep = "; "
getvar = prange_node.template_vars.get
templ.add_variable(getvar("i"))
# Create code for reductions and (last)privates
for name, reduction_op in prange_node.reductions.iteritems():
# Generate: x += contexts[i].x
expr = "%s %s %s[%s].%s" % (name, reduction_op,
getvar("contexts"), getvar("i"),
name)
reductions.codes.append(assign(name, expr))
target_name = ""
if isinstance(prange_node.target, ast.Name):
target_name = prange_node.target.id
for name in prange_node.privates:
# Generate: x += contexts[num_threads - 1].x
expr = "%s.%s" % (getvar("lastprivates"), name)
assmnt = assign(name, expr)
if name == target_name:
assmnt = "if %s > 0: %s" % (getvar("nsteps"), assmnt)
unpack_struct.codes.append(assmnt)
substitutions = { "num_threads": prange_node.num_threads_node }
result = templ.template(substitutions)
# print(templ.substituted_template)
return result
#------------------------------------------------------------------------
# prange nodes and types
#------------------------------------------------------------------------
class PrangeType(typesystem.NumbaType):
is_prange = True
is_range = True
class PrangeNode(nodes.ExprNode):
"""
Prange node. This replaces the For loop iterator in the initial stage.
After type inference and before closure type inference it replaces the
entire loop.
"""
_fields = ['start', 'stop', 'step']
func_def = None # outlined prange closure body
privates_struct_type = None # numba.struct(var_name=var_type)
privates = None # set([var_name])
reductions = None # { var_name: reduction_op }
target = None # Target iteration variable
num_threads_node = None # num_threads CloneNode
template_vars = None # { "template_var_name": $template_var }
def __init__(self, start, stop, step, **kwargs):
super(PrangeNode, self).__init__(**kwargs)
self.start = start
self.stop = stop
self.step = step
self.type = PrangeType()
class InvokeAndJoinThreads(nodes.UserNode):
"""
contexts
contexts array node (array of privates structs)
num_threads
num threads node
func_def_name
name of outlined prange body function
target_name
name of iteration target variable (e.g. 'i')
struct_type
privates struct type
closures
{ closure_name : closure_node }
"""
_fields = ['contexts', 'num_threads']
def __init__(self, env, **kwargs):
super(InvokeAndJoinThreads, self).__init__(**kwargs)
self.env = env
def infer_types(self, type_inferer):
type_inferer.visitchildren(self)
return self
def build_wrapper(self, codegen):
closure_type = self.closures[self.func_def_name].type
lfunc = closure_type.closure.lfunc
lfunc_pointer = closure_type.closure.lfunc_pointer
KernelWrapper, RunThreadPool = get_threadpool_funcs(
codegen.context,
self.struct_type,
self.target_name,
lfunc,
lfunc_pointer,
closure_type.signature,
self.num_threads,
codegen.llvm_module)
kernel_wrapper = nodes.LLVMCBuilderNode(self.env, KernelWrapper, None)
run_threadpool = nodes.LLVMCBuilderNode(self.env, RunThreadPool, None,
dependencies=[kernel_wrapper])
lfunc_run = codegen.visit(run_threadpool)
return lfunc_run
def codegen(self, codegen):
contexts = codegen.visit(self.contexts)
num_threads = codegen.visit(self.num_threads.coerce(int_))
lfunc_run = self.build_wrapper(codegen)
codegen.builder.call(lfunc_run, [contexts, num_threads])
return None
class TypeofNode(nodes.UserNode):
_fields = ["name", "expr"]
def __init__(self, name, expr):
self.name = name
self.expr = expr
def infer_types(self, type_inferer):
if type_inferer.analyse:
return type_inferer.visit(self.expr)
self.name = type_inferer.visit(self.name)
self.type = self.name.variable.type
return self
def make_privates_struct_type(privates_struct_type, names):
"""
Update the struct of privates and reductions once we know the
field types.
"""
fielddict = dict((name.id, name.variable.type) for name in names)
fielddict.update(privates_struct_type.fielddict)
fields = numba.struct(**fielddict).fields
privates_struct_type.fields[:] = fields
# privates_struct_type.fielddict = fielddict
privates_struct_type.update_mutated()
class VariableTypeInferingNode(nodes.UserNode):
_fields = ["names", "pre_prange_code"]
def __init__(self, variable_names, privates_struct_type):
super(VariableTypeInferingNode, self).__init__()
self.privates_struct_type = privates_struct_type
self.names = []
for varname in variable_names:
self.names.append(ast.Name(id=varname, ctx=ast.Load()))
def infer_types(self, type_inferer):
type_inferer.visitchildren(self)
make_privates_struct_type(self.privates_struct_type, self.names)
return None
#------------------------------------------------------------------------
# prange visitors
#------------------------------------------------------------------------
class PrangeExpander(visitors.NumbaTransformer):
"""
Rewrite 'for i in prange(...): ...' before the control flow pass.
"""
prange = 0
def visit_FunctionDef(self, node):
if self.func_level == 0:
node = self.visit_func_children(node)
return node
def match_global(self, node, expected_value):
if isinstance(node, ast.Name) and node.id not in self.local_names:
value = self.func_globals.get(node.id, None)
return value is expected_value
return False
def is_numba_prange(self, node):
return (self.match_global(node, prange) or
(isinstance(node, ast.Attribute) and node.attr == "prange" and
self.match_global(node.value, numba)))
def visit_Call(self, node):
if self.is_numba_prange(node.func):
infer.no_keywords(node)
start, stop, step = self.visitlist(unpack_range_args(node))
node = PrangeNode(start, stop, step)
self.visitchildren(node)
return node
def error_check_prange(self, node):
if self.prange:
raise error.NumbaError(node, "Cannot nest prange")
if node.orelse:
raise error.NumbaError(node.orelse,
"Else clause to prange not yet supported")
def visit_For(self, node):
node.iter = self.visit(node.iter)
if not isinstance(node.iter, PrangeNode):
self.visitchildren(node)
return node
self.error_check_prange(node)
node.target = self.visit(node.target)
self.prange += 1
node.body = self.visitlist(node.body)
self.prange -= 1
# Create prange closure
prange_node = node.iter
create_prange_closure(self.env, prange_node, node.body, node.target)
# setup glue code
pre_loop = rewrite_prange(self.env, prange_node, node.target,
self.locals, self.closures)
post_loop = perform_reductions(self.context, prange_node)
# infer glue code at the right place
pre_loop_dont_infer = nodes.dont_infer(pre_loop)
pre_loop_infer_now = nodes.infer_now(pre_loop, pre_loop_dont_infer)
# infer the type of the struct of privates right after the loop
allprivates = set(prange_node.privates) | set(prange_node.reductions)
type = prange_node.privates_struct_type
infer_privates_struct = VariableTypeInferingNode(allprivates, type)
# Signal that we now have additional local variables
self.invalidate_locals()
return ast.Suite(body=[
pre_loop_dont_infer,
node,
infer_privates_struct,
pre_loop_infer_now,
post_loop])
class PrangeCleanup(visitors.NumbaTransformer):
"""
Clean up outlined prange loops after type inference (removes them entirely).
"""
def visit_For(self, node):
if not node.iter.variable.type.is_prange:
self.visitchildren(node)
return node
nodes.delete_control_blocks(node, self.ast.flow)
return None
class PrangePrivatesReplacer(visitors.NumbaTransformer):
"""
Rewrite private variables to accesses on the privates before
closure type inference (closure type inference of the outer function, and
type inference (and control flow analysis) of the inner function).
"""
in_prange_closure = 0
def visit_FunctionDef(self, node):
"""
Analyse immedidate prange functions (not ones in closures).
Don't re-analyze prange functions when the prange function closures
themselves are compiled.
"""
if getattr(node, 'is_prange_body', False) and self.func_level == 0:
prange_node = node.prange_node
self.privates_struct_type = prange_node.privates_struct_type
node.body = [nodes.WithNoPythonNode(body=node.body)]
self.in_prange_closure += 1
self.visit_func_children(node)
self.in_prange_closure -= 1
self.invalidate_locals(node)
self.invalidate_locals()
else:
self.visit_func_children(node)
return node
def visit_Name(self, node):
if self.in_prange_closure:
if node.id in self.privates_struct_type.fielddict:
privates_struct = ast.Name('__numba_privates', ast.Load()) #node.ctx)
result = ast.Attribute(value=privates_struct,
attr=node.id,
ctx=node.ctx)
return result
return node
def visit_Call(self, node):
if isinstance(node.func, ast.Name) and node.func.id == "__numba_typeof":
return TypeofNode(node.args[0], node.args[1])
self.visitchildren(node)
return node
#----------------------------------------------------------------------------
# LLVM cbuilder prange utilities
#----------------------------------------------------------------------------
_count = 0
def get_threadpool_funcs(context, context_struct_type, target_name,
lfunc, lfunc_pointer, signature,
num_threads, llvm_module):
"""
Get functions to run the closure in separate threads.
context:
the Numba/Minivect context
context_struct_type:
the struct type holding all private and reduction variables
"""
global _count
_count += 1
context_cbuilder_type = builder.CStruct.from_numba_struct(
context, context_struct_type)
context_p_ltype = context_struct_type.pointer().to_llvm(context)
class KernelWrapper(CDefinition):
"""
Implements a prange kernel wrapper that is invoked in each thread.
Implements:
for i in range(start, stop, step):
worker(closure_scope, privates)
"""
_name_ = "prange_kernel_wrapper_%d" % _count
_argtys_ = [
('context', C.void_p),
]
def __init__(self, dependencies, ldependencies, **kwargs):
super(KernelWrapper, self).__init__(**kwargs)
def dispatch(self, context_struct_p, context_getfield,
lfunc, lfunc_pointer):
"""
Call the closure with the closure scope and context arguments.
We don't directly call the lfunc since there are linkage issues.
"""
if signature.args[0].is_closure_scope:
llvm_object_type = object_.to_llvm(context)
closure_scope = context_getfield('closure_scope')
closure_scope = closure_scope.cast(llvm_object_type)
args = [closure_scope, context_struct_p]
else:
args = [context_struct_p]
# Get the LLVM arguments
llargs = [arg.handle for arg in args]
# Get the LLVM pointer to the function
lfunc_pointer = llvm.core.Constant.int(Py_uintptr_t.to_llvm(context),
lfunc_pointer)
lfunc_pointer = self.builder.inttoptr(lfunc_pointer, lfunc.type)
self.builder.call(lfunc_pointer, llargs)
def body(self, context_p):
context_struct_p = context_p.cast(context_p_ltype)
context_struct = context_struct_p.as_struct(context_cbuilder_type)
def context_getfield(name):
"Get a field named __numba_<name>"
return getattr(context_struct, '__numba_' + name)
start = self.var(C.npy_intp, context_getfield('start'))
stop = self.var(C.npy_intp, context_getfield('stop'))
step = self.var(C.npy_intp, context_getfield('step'))
length = stop - start
nsteps = self.var(C.npy_intp, length / step)
zero = self.constant(C.npy_intp, 0)
with self.ifelse(length % step != zero) as ifelse:
with ifelse.then():
nsteps += self.constant(C.npy_intp, 1)
# self.debug("start", start, "stop", stop, "step", step)
with self.for_range(nsteps) as (loop, i):
getattr(context_struct, target_name).assign(start)
self.dispatch(context_struct_p, context_getfield,
lfunc, lfunc_pointer)
start += step
self.ret()
def specialize(self, *args, **kwargs):
self._name_ = "__numba_kernel_wrapper_%s" % lfunc.name
class RunThreadPool(CDefinition, threads.ParallelMixin):
"""
Function that spawns the thread pool.
"""
_name_ = "invoke_prange_%d" % _count
_argtys_ = [
('contexts', context_p_ltype),
('num_threads', C.int),
]
def __init__(self, dependencies, ldependencies, **kwargs):
self.kernel_wrapper, = dependencies
super(RunThreadPool, self).__init__(**kwargs)
def body(self, contexts, num_threads):
callback = self.kernel_wrapper.pointer
callback = self.constant(Py_uintptr_t.to_llvm(context), callback)
callback = callback.cast(C.void_p)
self._dispatch_worker(callback, contexts, num_threads)
self.ret()
def specialize(self, *args, **kwargs):
self._name_ = "__numba_run_threadpool_%s" % lfunc.name
return KernelWrapper, RunThreadPool
#----------------------------------------------------------------------------
# The actual prange function
#----------------------------------------------------------------------------
def prange(start=0, stop=None, step=1):
if stop is None:
stop = start
start = 0
return range(start, stop, step)
# numba.prange = prange
########NEW FILE########
__FILENAME__ = pipeline
# -*- coding: utf-8 -*-
"""
This module contains the Pipeline class which provides a pluggable way to
define the transformations and the order in which they run on the AST.
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import ast as ast_module
import logging
import pprint
import random
import types
import copy
import llvm.core as lc
# import numba.closures
from numba import PY3
from numba import error
from numba import functions
from numba import transforms
from numba import control_flow
from numba import closures
from numba import reporting
from numba import normalize
from numba import validate
from numba.array_validation import ArrayValidator
from numba.viz import cfgviz
from numba import typesystem
from numba.codegen import llvmwrapper
from numba import ast_constant_folding as constant_folding
from numba.control_flow import ssa, cfstats
from numba.codegen import translate
from numba import utils
from numba.missing import FixMissingLocations
from numba.type_inference import infer as type_inference
from numba.asdl import schema
from numba.prettyprint import (dump_ast, dump_cfg, dump_annotations,
dump_llvm, dump_optimized)
import numba.visitors
from numba.specialize import comparisons
from numba.specialize import loops
from numba.specialize import exceptions
from numba.specialize import funccalls
from numba.specialize import exttypes
from numba import astsix
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
def get_locals(ast, locals_dict):
# TODO: Remove this
if locals_dict is None:
locals_dict = getattr(ast, "locals_dict", {})
elif hasattr(ast, "locals_dict"):
assert ast.locals_dict is locals_dict
ast.locals_dict = locals_dict
return locals_dict
def module_name(func):
if func is None:
name = "NoneFunc"
func_id = random.randrange(1000000)
else:
name = '%s.%s' % (func.__module__, func.__name__)
func_id = id(func)
return 'tmp.module.%s.%x' % (name, func_id)
#------------------------------------------------------------------------
# Entry points
#------------------------------------------------------------------------
def run_pipeline2(env, func, func_ast, func_signature,
pipeline=None, **kwargs):
assert pipeline is None
assert kwargs.get('order', None) is None
logger.debug(pprint.pformat(kwargs))
kwargs['llvm_module'] = lc.Module.new(module_name(func))
with env.TranslationContext(env, func, func_ast, func_signature,
**kwargs) as func_env:
pipeline = env.get_pipeline(kwargs.get('pipeline_name', None))
post_ast = pipeline(func_ast, env)
func_signature = func_env.func_signature
symtab = func_env.symtab
return func_env, (func_signature, symtab, post_ast)
def run_env(env, func_env, **kwargs):
env.translation.push_env(func_env)
pipeline = env.get_pipeline(kwargs.get('pipeline_name', None))
try:
pipeline(func_env.ast, env)
finally:
env.translation.pop()
def _infer_types2(env, func, restype=None, argtypes=None, **kwargs):
ast = functions._get_ast(func)
func_signature = typesystem.function(restype, argtypes)
return run_pipeline2(env, func, ast, func_signature, **kwargs)
def infer_types2(env, func, restype=None, argtypes=None, **kwargs):
"""
Like run_pipeline, but takes restype and argtypes instead of a function
"""
pipeline, (sig, symtab, ast) = _infer_types2(
env, func, restype, argtypes, pipeline_name='type_infer', **kwargs)
return sig, symtab, ast
def compile2(env, func, restype=None, argtypes=None, ctypes=False,
compile_only=False, func_ast=None, **kwds):
"""
Compile a numba annotated function.
- decompile function into a Python ast
- run type inference using the given input types
- compile the function to LLVM
"""
# Let the pipeline create a module for the function it is compiling
# and the user will link that in.
assert 'llvm_module' not in kwds
kwds['llvm_module'] = lc.Module.new(module_name(func))
logger.debug(kwds)
if func_ast is None:
func_ast = functions._get_ast(func)
else:
func_ast = copy.deepcopy(func_ast)
func_signature = typesystem.function(restype, argtypes)
#pipeline, (func_signature, symtab, ast) = _infer_types2(
# env, func, restype, argtypes, codegen=True, **kwds)
with env.TranslationContext(env, func, func_ast, func_signature,
need_lfunc_wrapper=not compile_only,
**kwds) as func_env:
pipeline = env.get_pipeline(kwds.get('pipeline_name', None))
func_ast.pipeline = pipeline
post_ast = pipeline(func_ast, env)
func_signature = func_env.func_signature
symtab = func_env.symtab
t = func_env.translator
return func_env
#------------------------------------------------------------------------
# Pipeline refactored code
#------------------------------------------------------------------------
class PipelineStage(object):
is_composed = False
def check_preconditions(self, ast, env):
return True
def check_postconditions(self, ast, env):
return True
def transform(self, ast, env):
raise NotImplementedError('%r does not implement transform!' %
type(self))
def make_specializer(self, cls, ast, env, **kws):
crnt = env.translation.crnt
kws = kws.copy()
kws.update(func_signature=crnt.func_signature,
nopython=env.translation.nopython,
symtab=crnt.symtab,
func_name=crnt.func_name,
llvm_module=crnt.llvm_module,
func_globals=crnt.function_globals,
locals=crnt.locals,
allow_rebind_args=env.translation.allow_rebind_args,
warn=env.translation.crnt.warn,
is_closure=crnt.is_closure,
closures=crnt.closures,
closure_scope=crnt.closure_scope,
env=env)
return cls(env.context, crnt.func, ast, **kws)
def __call__(self, ast, env):
if env.stage_checks: self.check_preconditions(ast, env)
if self.is_composed:
ast = self.transform(ast, env)
else:
try:
ast = self.transform(ast, env)
except error.NumbaError as e:
func_env = env.translation.crnt
error_env = func_env.error_env
if func_env.is_closure:
flags, parent_func_env = env.translation.stack[-2]
error_env.merge_in(parent_func_env.error_env)
elif not e.has_report:
reporting.report(env, exc=e)
raise
env.translation.crnt.ast = ast
if env.stage_checks: self.check_postconditions(ast, env)
return ast
class SimplePipelineStage(PipelineStage):
transformer = None
def transform(self, ast, env):
transform = self.make_specializer(self.transformer, ast, env)
return transform.visit(ast)
class AST3to2(PipelineStage):
def transform(self, ast, env):
if not PY3:
return ast
return astsix.AST3to2().visit(ast)
def ast3to2(ast, env):
if not PY3:
return ast
return astsix.AST3to2().visit(ast)
def resolve_templates(ast, env):
# TODO: Unify with decorators module
crnt = env.translation.crnt
if crnt.template_signature is not None:
from numba import typesystem
argnames = [name.id for name in ast.args.args]
argtypes = list(crnt.func_signature.args)
template_context, signature = typesystem.resolve_templates(
crnt.locals, crnt.template_signature, argnames, argtypes)
crnt.func_signature = signature
return ast
def validate_signature(tree, env):
arg_types = env.translation.crnt.func_signature.args
if (isinstance(tree, ast_module.FunctionDef) and
len(arg_types) != len(tree.args.args)):
raise error.NumbaError(
"Incorrect number of types specified in @jit() for function %r" %
env.crnt.func_name)
return tree
def validate_arrays(ast, env):
ArrayValidator(env).visit(ast)
return ast
def update_signature(tree, env):
func_env = env.translation.crnt
func_signature = func_env.func_signature
restype = func_signature.return_type
if restype and (restype.is_struct or restype.is_complex or restype.is_datetime or restype.is_timedelta):
# Change signatures returning complex numbers or structs to
# signatures taking a pointer argument to a complex number
# or struct
func_signature = func_signature.return_type(*func_signature.args)
func_env.func_signature = func_signature
return tree
def get_lfunc(env, func_env):
lfunc = func_env.llvm_module.add_function(
func_env.func_signature.to_llvm(env.context),
func_env.mangled_name)
return lfunc
def create_lfunc(tree, env):
"""
Update the FunctionEnvironment with an LLVM function if the signature
is known (try this before type inference to support recursion).
"""
func_env = env.translation.crnt
if (not func_env.lfunc and func_env.func_signature and
func_env.func_signature.return_type):
assert func_env.llvm_module is not None
lfunc = get_lfunc(env, func_env)
func_env.lfunc = lfunc
if func_env.func:
env.specializations.register_specialization(func_env)
return tree
def create_lfunc1(tree, env):
func_env = env.translation.crnt
if not func_env.is_closure:
create_lfunc(tree, env)
return tree
def create_lfunc2(tree, env):
func_env = env.translation.crnt
assert func_env.func_signature and func_env.func_signature.return_type
return create_lfunc1(tree, env)
def create_lfunc3(tree, env):
func_env = env.translation.crnt
create_lfunc(tree, env)
return tree
# ______________________________________________________________________
class ValidateASTStage(PipelineStage):
def transform(self, ast, env):
validate.ValidateAST().visit(ast)
return ast
class NormalizeASTStage(PipelineStage):
def transform(self, ast, env):
transform = self.make_specializer(normalize.NormalizeAST, ast, env)
return transform.visit(ast)
# ______________________________________________________________________
class ControlFlowAnalysis(PipelineStage):
_pre_condition_schema = None
@property
def pre_condition_schema(self):
if self._pre_condition_schema is None:
self._pre_condition_schema = schema.load('Python.asdl')
return self._pre_condition_schema
def check_preconditions(self, ast, env):
self.pre_condition_schema.verify(ast) # raises exception on error
return True
def transform(self, ast, env):
transform = self.make_specializer(control_flow.ControlFlowAnalysis,
ast, env)
ast = transform.visit(ast)
env.translation.crnt.symtab = transform.symtab
ast.flow = transform.flow
return ast
class ConstFolding(PipelineStage):
def check_preconditions(self, ast, env):
assert not hasattr(env.crnt, 'constvars')
return super(ConstFolding, self).check_preconditions(ast, env)
def check_postconditions(self, ast, env):
assert hasattr(env.crnt, 'constvars')
return super(ConstFolding, self).check_postconditions(ast, env)
def transform(self, ast, env):
const_marker = self.make_specializer(constant_folding.ConstantMarker,
ast, env)
const_marker.visit(ast)
constvars = const_marker.get_constants()
# FIXME: Make constvars a property of the FunctionEnvironment,
# or nix this transformation pass.
env.translation.crnt.constvars = constvars
const_folder = self.make_specializer(constant_folding.ConstantFolder,
ast, env, constvars=constvars)
return const_folder.visit(ast)
class TypeInfer(PipelineStage):
def check_preconditions(self, ast, env):
assert env.translation.crnt.symtab is not None
return super(TypeInfer, self).check_preconditions(ast, env)
def transform(self, ast, env):
crnt = env.translation.crnt
type_inferer = self.make_specializer(type_inference.TypeInferer,
ast, env, **crnt.kwargs)
type_inferer.infer_types()
crnt.func_signature = type_inferer.func_signature
logger.debug("signature for %s: %s", crnt.func_name,
crnt.func_signature)
crnt.symtab = type_inferer.symtab
return ast
class TypeSet(PipelineStage):
def transform(self, ast, env):
visitor = self.make_specializer(type_inference.TypeSettingVisitor, ast,
env)
visitor.visit(ast)
return ast
class ClosureTypeInference(PipelineStage):
def transform(self, ast, env):
type_inferer = self.make_specializer(
numba.closures.ClosureTypeInferer, ast, env)
return type_inferer.visit(ast)
class TransformFor(PipelineStage):
def transform(self, ast, env):
transform = self.make_specializer(loops.TransformForIterable, ast,
env)
return transform.visit(ast)
class TransformBuiltinLoops(PipelineStage):
def transform(self, ast, env):
transform = self.make_specializer(loops.TransformBuiltinLoops, ast,
env)
return transform.visit(ast)
#----------------------------------------------------------------------------
# Prange
#----------------------------------------------------------------------------
def run_prange(name):
def wrapper(ast, env):
from numba import parallel
stage = getattr(parallel, name)
return PipelineStage().make_specializer(stage, ast, env).visit(ast)
wrapper.__name__ = name
return wrapper
ExpandPrange = run_prange('PrangeExpander')
RewritePrangePrivates = run_prange('PrangePrivatesReplacer')
CleanupPrange = run_prange('PrangeCleanup')
class UpdateAttributeStatements(PipelineStage):
def transform(self, ast, env):
func_env = env.translation.crnt
for block in func_env.flow.blocks:
stats = []
for cf_stat in block.stats:
if (isinstance(cf_stat, cfstats.AttributeAssignment) and
isinstance(cf_stat.lhs, ast_module.Attribute)):
value = cf_stat.lhs.value
if (isinstance(value, ast_module.Name) and
value.id in func_env.kill_attribute_assignments):
cf_stat = None
if cf_stat:
stats.append(cf_stat)
block.stats = stats
return ast
#----------------------------------------------------------------------------
# Specializing/Lowering Transforms
#----------------------------------------------------------------------------
class Specialize(PipelineStage):
def transform(self, ast, env):
return ast
class RewriteArrayExpressions(PipelineStage):
def transform(self, ast, env):
from numba import array_expressions
transformer = self.make_specializer(
array_expressions.ArrayExpressionRewriteNative, ast, env)
return transformer.visit(ast)
class SpecializeComparisons(PipelineStage):
def transform(self, ast, env):
transform = self.make_specializer(comparisons.SpecializeComparisons,
ast, env)
return transform.visit(ast)
class SpecializeSSA(PipelineStage):
def transform(self, ast, env):
ssa.specialize_ssa(ast)
return ast
class SpecializeClosures(SimplePipelineStage):
transformer = closures.ClosureSpecializer
class Optimize(PipelineStage):
def transform(self, ast, env):
return ast
class SpecializeLoops(PipelineStage):
def transform(self, ast, env):
transform = self.make_specializer(loops.SpecializeObjectIteration, ast,
env)
return transform.visit(ast)
class LowerRaise(PipelineStage):
def transform(self, ast, env):
return self.make_specializer(exceptions.LowerRaise, ast, env).visit(ast)
class LateSpecializer(PipelineStage):
def transform(self, ast, env):
specializer = self.make_specializer(transforms.LateSpecializer, ast,
env)
return specializer.visit(ast)
class ExtensionTypeLowerer(PipelineStage):
def transform(self, ast, env):
specializer = self.make_specializer(exttypes.ExtensionTypeLowerer,
ast, env)
return specializer.visit(ast)
class SpecializeFunccalls(PipelineStage):
def transform(self, ast, env):
transform = self.make_specializer(funccalls.FunctionCallSpecializer,
ast, env)
return transform.visit(ast)
class SpecializeExceptions(PipelineStage):
def transform(self, ast, env):
transform = self.make_specializer(exceptions.ExceptionSpecializer, ast,
env)
return transform.visit(ast)
def cleanup_symtab(ast, env):
"Pop original variables from the symtab"
for var in env.translation.crnt.symtab.values():
if not var.parent_var and var.renameable:
env.translation.crnt.symtab.pop(var.name, None)
return ast
class FixASTLocations(PipelineStage):
def transform(self, ast, env):
lineno = getattr(ast, 'lineno', 1)
col_offset = getattr(ast, 'col_offset', 1)
FixMissingLocations(lineno, col_offset).visit(ast)
return ast
class CodeGen(PipelineStage):
def transform(self, ast, env):
func_env = env.translation.crnt
func_env.translator = self.make_specializer(
translate.LLVMCodeGenerator, ast, env,
**func_env.kwargs)
func_env.translator.translate()
func_env.lfunc = func_env.translator.lfunc
return ast
class PostPass(PipelineStage):
def transform(self, ast, env):
for postpass_name, postpass in env.crnt.postpasses.iteritems():
env.crnt.lfunc = postpass(env,
env.llvm_context.execution_engine,
env.crnt.llvm_module,
env.crnt.lfunc)
return ast
class LinkingStage(PipelineStage):
"""
Link the resulting LLVM function into the global fat module.
"""
def transform(self, ast, env):
func_env = env.translation.crnt
# Link libraries into module
env.context.intrinsic_library.link(func_env.lfunc.module)
# env.context.cbuilder_library.link(func_env.lfunc.module)
env.constants_manager.link(func_env.lfunc.module)
lfunc_pointer = 0
if func_env.link:
# Link function into fat LLVM module
func_env.lfunc = env.llvm_context.link(func_env.lfunc)
func_env.translator.lfunc = func_env.lfunc
lfunc_pointer = func_env.translator.lfunc_pointer
func_env.lfunc_pointer = lfunc_pointer
return ast
class WrapperStage(PipelineStage):
"""
Build a wrapper LLVM function around the compiled numba function to call
it from Python.
"""
def transform(self, ast, env):
func_env = env.translation.crnt
if func_env.is_closure:
wrap = func_env.need_closure_wrapper
else:
wrap = func_env.wrap
if wrap:
numbawrapper, lfuncwrapper, _ = (
llvmwrapper.build_wrapper_function(env))
func_env.numba_wrapper_func = numbawrapper
func_env.llvm_wrapper_func = lfuncwrapper
# Set pointer to function for external code and numba.addressof()
numbawrapper.lfunc_pointer = func_env.lfunc_pointer
return ast
class ErrorReporting(PipelineStage):
"Sort and issue warnings and errors"
def transform(self, ast, env):
reporting.report(env)
return ast
class ComposedPipelineStage(PipelineStage):
is_composed = True
def __init__(self, stages=None):
if stages is None:
stages = []
self.stages = [self.check_stage(stage)[1] for stage in stages]
@staticmethod
def check_stage(stage):
def _check_stage_object(stage_obj):
if (isinstance(stage_obj, type) and
issubclass(stage_obj, PipelineStage)):
stage_obj = stage_obj()
return stage_obj
if isinstance(stage, str):
name = stage
def _stage(ast, env):
stage_obj = getattr(env.pipeline_stages, name)
return _check_stage_object(stage_obj)(ast, env)
_stage.__name__ = name
stage = _stage
else:
name = stage.__name__
stage = _check_stage_object(stage)
return name, stage
def transform(self, ast, env):
logger.debug('Running composed stages: %s', self.stages)
for stage in self.stages:
if env.debug:
stage_tuple = (stage, utils.ast2tree(ast))
logger.debug(pprint.pformat(stage_tuple))
ast = stage(ast, env)
return ast
@classmethod
def compose(cls, stage0, stage1):
if isinstance(stage0, ComposedPipelineStage):
stage0s = stage0.stages
else:
stage0s = [check_stage(stage0)[1]]
if isinstance(stage1, ComposedPipelineStage):
stage1s = stage1.stages
else:
stage1s = [check_stage(stage1)[1]]
return cls(stage0s + stage1s)
########NEW FILE########
__FILENAME__ = postpasses
# -*- coding: utf-8 -*-
"""
Postpasses over the LLVM IR.
The signature of each postpass is postpass(env, ee, lmod, lfunc) -> lfunc
"""
from __future__ import print_function, division, absolute_import
import llvmmath
from llvmmath import linking
default_postpasses = {}
def register_default(name):
def dec(f):
default_postpasses[name] = f
return f
return dec
# ______________________________________________________________________
# Postpasses
@register_default('math')
def postpass_link_math(env, ee, lmod, lfunc):
"numba.math.* -> llvmmath.*"
replacements = {}
for lf in lmod.functions:
if lf.name.startswith('numba.math.'):
_, _, name = lf.name.rpartition('.')
replacements[lf.name] = name
del lf # this is dead after linking below
default_math_lib = llvmmath.get_default_math_lib()
linker = linking.get_linker(default_math_lib)
linking.link_llvm_math_intrinsics(ee, lmod, default_math_lib,
linker, replacements)
return lfunc
########NEW FILE########
__FILENAME__ = prettyprint
# -*- coding: utf-8 -*-
"""
Pretty printing of numba IRs.
"""
from __future__ import print_function, division, absolute_import
import os
import sys
from numba.lexing import lex_source
from numba.viz import cfgviz, astviz
from numba.annotate import annotators
from numba.annotate import render_text, render_html
from numba.annotate.annotate import Source, Program, build_linemap
# ______________________________________________________________________
def dumppass(option):
def decorator(f):
def wrapper(ast, env):
if env.cmdopts.get(option):
f(ast, env, env.cmdopts.get("fancy"))
return ast
return wrapper
return decorator
# ______________________________________________________________________
@dumppass("dump-ast")
def dump_ast(ast, env, fancy):
if fancy:
astviz.render_ast(ast, os.path.expanduser("~/ast.dot"))
else:
import ast as ast_module
print(ast_module.dump(ast))
@dumppass("dump-cfg")
def dump_cfg(ast, env, fancy):
cfg = env.crnt.flow
if fancy:
cfgviz.render_cfg(cfg, os.path.expanduser("~/cfg.dot"))
else:
for block in cfg.blocks:
print(block)
print(" ", block.parents)
print(" ", block.children)
@dumppass("annotate")
def dump_annotations(ast, env, fancy):
llvm_intermediate, = [i for i in env.crnt.intermediates if i.name == "llvm"]
annotators.annotate_pyapi(llvm_intermediate, env.crnt.annotations)
p = Program(Source(build_linemap(env.crnt.func), env.crnt.annotations),
env.crnt.intermediates)
if fancy:
render = render_html
fn, ext = os.path.splitext(env.cmdopts["filename"])
out = open(fn + '.html', 'w')
print("Writing", fn + '.html')
else:
render = render_text
out = sys.stdout
# Look back through stack until we find the line of code that called our
# jitted function.
func_call = ''
func_call_filename = env.cmdopts['filename']
func_call_lineno = ''
import traceback
stack = traceback.extract_stack()
for i in range(len(stack)-1, -1, -1):
if stack[i][0] == env.cmdopts['filename'] and stack[i][3].find(env.crnt.func_name) > -1:
func_call = stack[i][3]
func_call_lineno = str(stack[i][1])
break
annotation = {'func_call':func_call,
'func_call_filename':func_call_filename,
'func_call_lineno':func_call_lineno,
'python_source':p.python_source,
'intermediates':p.intermediates}
if fancy:
env.annotation_blocks.append(annotation)
else:
env.annotation_blocks = [annotation]
render(env.annotation_blocks, emit=out.write, intermediate_names=["llvm"])
@dumppass("dump-llvm")
def dump_llvm(ast, env, fancy):
print(lex_source(str(env.crnt.lfunc), "llvm", "console"))
@dumppass("dump-optimized")
def dump_optimized(ast, env, fancy):
print(lex_source(str(env.crnt.lfunc), "llvm", "console"))
########NEW FILE########
__FILENAME__ = random
import ctypes as ct
import numpy.random as nr
import os.path
from numpy.distutils.misc_util import get_shared_lib_extension
mtrand = ct.CDLL(nr.mtrand.__file__)
# Should we parse this from randomkit.h in the numpy directory?
RK_STATE_LEN = len(nr.get_state()[1])
class rk_state(ct.Structure):
_fields_ = [("key", ct.c_ulong * RK_STATE_LEN),
("pos", ct.c_int),
("has_gauss", ct.c_int),
("gauss", ct.c_double),
("has_binomial", ct.c_int),
("psave", ct.c_double),
("nsave", ct.c_long),
("r", ct.c_double),
("q", ct.c_double),
("fm", ct.c_double),
("m", ct.c_long),
("p1", ct.c_double),
("xm", ct.c_double),
("xl", ct.c_double),
("xr", ct.c_double),
("c", ct.c_double),
("laml", ct.c_double),
("lamr", ct.c_double),
("p2", ct.c_double),
("p3", ct.c_double),
("p4", ct.c_double)]
try:
rk_randomseed = mtrand.rk_randomseed
rk_seed = mtrand.rk_seed
rk_interval = mtrand.rk_interval
rk_gamma = mtrand.rk_gamma
rk_normal = mtrand.rk_normal
except AttributeError as e:
raise ImportError(str(e))
rk_randomseed.argtypes = [ct.POINTER(rk_state)]
rk_seed.restype = None
rk_seed.argtypes = [ct.c_long, ct.POINTER(rk_state)]
rk_interval.restype = ct.c_ulong
rk_interval.argtypes = [ct.c_ulong, ct.POINTER(rk_state)]
state = rk_state()
state_p = ct.pointer(state)
state_vp = ct.cast(state_p, ct.c_void_p)
def seed(N):
return rk_seed(N, state_p)
# Returns a random unsigned long between 0 and max inclusive
def interval(max):
return rk_interval(max, state_p)
def init():
if rk_randomseed(state_p) != 0:
raise ValueError("Cannot initialize the random number generator.")
def init2(n=200):
if rk_seed(n, state_p) != 0:
raise ValueError("Cannot initialize the random number generator.")
rk_address = ct.POINTER(rk_state)
rk_error = ct.c_int
_thisname = os.path.abspath(__file__)
_filename = os.path.dirname(_thisname) + os.path.sep + '_rng_generated.py'
with open(_filename) as f:
_code = compile(f.read(), _filename, 'exec')
exec(_code)
del _thisname
del _filename
del f
del _code
init()
########NEW FILE########
__FILENAME__ = _rng_generated
rk_randomseed = mtrand.rk_randomseed
rk_randomseed.restype = rk_error
rk_randomseed.argtypes = [rk_address]
def randomseed():
return rk_randomseed(state_p)
rk_random = mtrand.rk_random
rk_random.restype = ct.c_ulong
rk_random.argtypes = [rk_address]
def random():
return rk_random(state_p)
rk_long = mtrand.rk_long
rk_long.restype = ct.c_long
rk_long.argtypes = [rk_address]
def long():
return rk_long(state_p)
rk_ulong = mtrand.rk_ulong
rk_ulong.restype = ct.c_ulong
rk_ulong.argtypes = [rk_address]
def ulong():
return rk_ulong(state_p)
rk_double = mtrand.rk_double
rk_double.restype = ct.c_double
rk_double.argtypes = [rk_address]
def double():
return rk_double(state_p)
rk_gauss = mtrand.rk_gauss
rk_gauss.restype = ct.c_double
rk_gauss.argtypes = [rk_address]
def gauss():
return rk_gauss(state_p)
rk_normal = mtrand.rk_normal
rk_normal.restype = ct.c_double
rk_normal.argtypes = [rk_address,ct.c_double,ct.c_double]
def normal(loc,scale):
return rk_normal(state_p,loc,scale)
rk_standard_exponential = mtrand.rk_standard_exponential
rk_standard_exponential.restype = ct.c_double
rk_standard_exponential.argtypes = [rk_address]
def standard_exponential():
return rk_standard_exponential(state_p)
rk_exponential = mtrand.rk_exponential
rk_exponential.restype = ct.c_double
rk_exponential.argtypes = [rk_address,ct.c_double]
def exponential(scale):
return rk_exponential(state_p,scale)
rk_uniform = mtrand.rk_uniform
rk_uniform.restype = ct.c_double
rk_uniform.argtypes = [rk_address,ct.c_double,ct.c_double]
def uniform(loc,scale):
return rk_uniform(state_p,loc,scale)
rk_standard_gamma = mtrand.rk_standard_gamma
rk_standard_gamma.restype = ct.c_double
rk_standard_gamma.argtypes = [rk_address,ct.c_double]
def standard_gamma(shape):
return rk_standard_gamma(state_p,shape)
rk_gamma = mtrand.rk_gamma
rk_gamma.restype = ct.c_double
rk_gamma.argtypes = [rk_address,ct.c_double,ct.c_double]
def gamma(shape,scale):
return rk_gamma(state_p,shape,scale)
rk_beta = mtrand.rk_beta
rk_beta.restype = ct.c_double
rk_beta.argtypes = [rk_address,ct.c_double,ct.c_double]
def beta(a,b):
return rk_beta(state_p,a,b)
rk_chisquare = mtrand.rk_chisquare
rk_chisquare.restype = ct.c_double
rk_chisquare.argtypes = [rk_address,ct.c_double]
def chisquare(df):
return rk_chisquare(state_p,df)
rk_noncentral_chisquare = mtrand.rk_noncentral_chisquare
rk_noncentral_chisquare.restype = ct.c_double
rk_noncentral_chisquare.argtypes = [rk_address,ct.c_double,ct.c_double]
def noncentral_chisquare(df,nonc):
return rk_noncentral_chisquare(state_p,df,nonc)
rk_f = mtrand.rk_f
rk_f.restype = ct.c_double
rk_f.argtypes = [rk_address,ct.c_double,ct.c_double]
def f(dfnum,dfden):
return rk_f(state_p,dfnum,dfden)
rk_noncentral_f = mtrand.rk_noncentral_f
rk_noncentral_f.restype = ct.c_double
rk_noncentral_f.argtypes = [rk_address,ct.c_double,ct.c_double,ct.c_double]
def noncentral_f(dfnum,dfden,nonc):
return rk_noncentral_f(state_p,dfnum,dfden,nonc)
rk_binomial = mtrand.rk_binomial
rk_binomial.restype = ct.c_long
rk_binomial.argtypes = [rk_address,ct.c_long,ct.c_double]
def binomial(n,p):
return rk_binomial(state_p,n,p)
rk_binomial_btpe = mtrand.rk_binomial_btpe
rk_binomial_btpe.restype = ct.c_long
rk_binomial_btpe.argtypes = [rk_address,ct.c_long,ct.c_double]
def binomial_btpe(n,p):
return rk_binomial_btpe(state_p,n,p)
rk_binomial_inversion = mtrand.rk_binomial_inversion
rk_binomial_inversion.restype = ct.c_long
rk_binomial_inversion.argtypes = [rk_address,ct.c_long,ct.c_double]
def binomial_inversion(n,p):
return rk_binomial_inversion(state_p,n,p)
rk_negative_binomial = mtrand.rk_negative_binomial
rk_negative_binomial.restype = ct.c_long
rk_negative_binomial.argtypes = [rk_address,ct.c_double,ct.c_double]
def negative_binomial(n,p):
return rk_negative_binomial(state_p,n,p)
rk_poisson = mtrand.rk_poisson
rk_poisson.restype = ct.c_long
rk_poisson.argtypes = [rk_address,ct.c_double]
def poisson(lam):
return rk_poisson(state_p,lam)
rk_poisson_mult = mtrand.rk_poisson_mult
rk_poisson_mult.restype = ct.c_long
rk_poisson_mult.argtypes = [rk_address,ct.c_double]
def poisson_mult(lam):
return rk_poisson_mult(state_p,lam)
rk_poisson_ptrs = mtrand.rk_poisson_ptrs
rk_poisson_ptrs.restype = ct.c_long
rk_poisson_ptrs.argtypes = [rk_address,ct.c_double]
def poisson_ptrs(lam):
return rk_poisson_ptrs(state_p,lam)
rk_standard_cauchy = mtrand.rk_standard_cauchy
rk_standard_cauchy.restype = ct.c_double
rk_standard_cauchy.argtypes = [rk_address]
def standard_cauchy():
return rk_standard_cauchy(state_p)
rk_standard_t = mtrand.rk_standard_t
rk_standard_t.restype = ct.c_double
rk_standard_t.argtypes = [rk_address,ct.c_double]
def standard_t(df):
return rk_standard_t(state_p,df)
rk_vonmises = mtrand.rk_vonmises
rk_vonmises.restype = ct.c_double
rk_vonmises.argtypes = [rk_address,ct.c_double,ct.c_double]
def vonmises(mu,kappa):
return rk_vonmises(state_p,mu,kappa)
rk_pareto = mtrand.rk_pareto
rk_pareto.restype = ct.c_double
rk_pareto.argtypes = [rk_address,ct.c_double]
def pareto(a):
return rk_pareto(state_p,a)
rk_weibull = mtrand.rk_weibull
rk_weibull.restype = ct.c_double
rk_weibull.argtypes = [rk_address,ct.c_double]
def weibull(a):
return rk_weibull(state_p,a)
rk_power = mtrand.rk_power
rk_power.restype = ct.c_double
rk_power.argtypes = [rk_address,ct.c_double]
def power(a):
return rk_power(state_p,a)
rk_laplace = mtrand.rk_laplace
rk_laplace.restype = ct.c_double
rk_laplace.argtypes = [rk_address,ct.c_double,ct.c_double]
def laplace(loc,scale):
return rk_laplace(state_p,loc,scale)
rk_gumbel = mtrand.rk_gumbel
rk_gumbel.restype = ct.c_double
rk_gumbel.argtypes = [rk_address,ct.c_double,ct.c_double]
def gumbel(loc,scale):
return rk_gumbel(state_p,loc,scale)
rk_logistic = mtrand.rk_logistic
rk_logistic.restype = ct.c_double
rk_logistic.argtypes = [rk_address,ct.c_double,ct.c_double]
def logistic(loc,scale):
return rk_logistic(state_p,loc,scale)
rk_lognormal = mtrand.rk_lognormal
rk_lognormal.restype = ct.c_double
rk_lognormal.argtypes = [rk_address,ct.c_double,ct.c_double]
def lognormal(mean,sigma):
return rk_lognormal(state_p,mean,sigma)
rk_rayleigh = mtrand.rk_rayleigh
rk_rayleigh.restype = ct.c_double
rk_rayleigh.argtypes = [rk_address,ct.c_double]
def rayleigh(mode):
return rk_rayleigh(state_p,mode)
rk_wald = mtrand.rk_wald
rk_wald.restype = ct.c_double
rk_wald.argtypes = [rk_address,ct.c_double,ct.c_double]
def wald(mean,scale):
return rk_wald(state_p,mean,scale)
rk_zipf = mtrand.rk_zipf
rk_zipf.restype = ct.c_long
rk_zipf.argtypes = [rk_address,ct.c_double]
def zipf(a):
return rk_zipf(state_p,a)
rk_geometric = mtrand.rk_geometric
rk_geometric.restype = ct.c_long
rk_geometric.argtypes = [rk_address,ct.c_double]
def geometric(p):
return rk_geometric(state_p,p)
rk_geometric_search = mtrand.rk_geometric_search
rk_geometric_search.restype = ct.c_long
rk_geometric_search.argtypes = [rk_address,ct.c_double]
def geometric_search(p):
return rk_geometric_search(state_p,p)
rk_geometric_inversion = mtrand.rk_geometric_inversion
rk_geometric_inversion.restype = ct.c_long
rk_geometric_inversion.argtypes = [rk_address,ct.c_double]
def geometric_inversion(p):
return rk_geometric_inversion(state_p,p)
rk_hypergeometric = mtrand.rk_hypergeometric
rk_hypergeometric.restype = ct.c_long
rk_hypergeometric.argtypes = [rk_address,ct.c_long,ct.c_long,ct.c_long]
def hypergeometric(good,bad,sample):
return rk_hypergeometric(state_p,good,bad,sample)
rk_hypergeometric_hyp = mtrand.rk_hypergeometric_hyp
rk_hypergeometric_hyp.restype = ct.c_long
rk_hypergeometric_hyp.argtypes = [rk_address,ct.c_long,ct.c_long,ct.c_long]
def hypergeometric_hyp(good,bad,sample):
return rk_hypergeometric_hyp(state_p,good,bad,sample)
rk_hypergeometric_hrua = mtrand.rk_hypergeometric_hrua
rk_hypergeometric_hrua.restype = ct.c_long
rk_hypergeometric_hrua.argtypes = [rk_address,ct.c_long,ct.c_long,ct.c_long]
def hypergeometric_hrua(good,bad,sample):
return rk_hypergeometric_hrua(state_p,good,bad,sample)
rk_triangular = mtrand.rk_triangular
rk_triangular.restype = ct.c_double
rk_triangular.argtypes = [rk_address,ct.c_double,ct.c_double,ct.c_double]
def triangular(left,mode,right):
return rk_triangular(state_p,left,mode,right)
rk_logseries = mtrand.rk_logseries
rk_logseries.restype = ct.c_long
rk_logseries.argtypes = [rk_address,ct.c_double]
def logseries(p):
return rk_logseries(state_p,p)
########NEW FILE########
__FILENAME__ = __generate_rng
def _groupn(iter, N):
k = 0
ret = ()
for item in iter:
k = k + 1
ret += (item,)
if k == N:
yield ret
k = 0
ret = ()
_toreplace = [('unsigned long ', 'ct.c_ulong '),
('long ', 'ct.c_long '),
('double ', 'ct.c_double '),
('rk_state *', 'rk_address '),
('void *', 'ct.c_void_p '),
('void', 'None')]
# Return [name, restype, and argtypes, argnames]
def parse_header(header):
mystr = '@@@_%d_@@@'
for i, (old, new) in enumerate(_toreplace):
header = header.replace(old, mystr % i)
for i, (old, new) in enumerate(_toreplace):
header = header.replace(mystr % i, new)
funcs = [val.split(';')[0].strip() for indx, val in enumerate(header.split('extern')) if indx > 0]
result = []
for func in funcs:
temp = func.split()
restype = temp[0]
name, arg0type = temp[1].split('(')
argtypes = [arg0type]
argnames = [temp[2].strip(')')]
for atype, aname in _groupn(temp[3:], 2):
argnames.append(aname.strip(')'))
argtypes.append(atype)
result.append([name, restype, argtypes, argnames])
return result
header1 = """
/*
* Initialize the RNG state using the given seed.
*/
/*
* Initialize the RNG state using a random seed.
* Uses /dev/random or, when unavailable, the clock (see randomkit.c).
* Returns RK_NOERR when no errors occurs.
* Returns RK_ENODEV when the use of RK_DEV_RANDOM failed (for example because
* there is no such device). In this case, the RNG was initialized using the
* clock.
*/
extern rk_error rk_randomseed(rk_state *state);
/*
* Returns a random unsigned long between 0 and RK_MAX inclusive
*/
extern unsigned long rk_random(rk_state *state);
/*
* Returns a random long between 0 and LONG_MAX inclusive
*/
extern long rk_long(rk_state *state);
/*
* Returns a random unsigned long between 0 and ULONG_MAX inclusive
*/
extern unsigned long rk_ulong(rk_state *state);
/*
* Returns a random double between 0.0 and 1.0, 1.0 excluded.
*/
extern double rk_double(rk_state *state);
/*
* return a random gaussian deviate with variance unity and zero mean.
*/
extern double rk_gauss(rk_state *state);
"""
header2 = """
/* Normal distribution with mean=loc and standard deviation=scale. */
extern double rk_normal(rk_state *state, double loc, double scale);
/* Standard exponential distribution (mean=1) computed by inversion of the
* CDF. */
extern double rk_standard_exponential(rk_state *state);
/* Exponential distribution with mean=scale. */
extern double rk_exponential(rk_state *state, double scale);
/* Uniform distribution on interval [loc, loc+scale). */
extern double rk_uniform(rk_state *state, double loc, double scale);
/* Standard gamma distribution with shape parameter.
* When shape < 1, the algorithm given by (Devroye p. 304) is used.
* When shape == 1, a Exponential variate is generated.
* When shape > 1, the small and fast method of (Marsaglia and Tsang 2000)
* is used.
*/
extern double rk_standard_gamma(rk_state *state, double shape);
/* Gamma distribution with shape and scale. */
extern double rk_gamma(rk_state *state, double shape, double scale);
/* Beta distribution computed by combining two gamma variates (Devroye p. 432).
*/
extern double rk_beta(rk_state *state, double a, double b);
/* Chi^2 distribution computed by transforming a gamma variate (it being a
* special case Gamma(df/2, 2)). */
extern double rk_chisquare(rk_state *state, double df);
/* Noncentral Chi^2 distribution computed by modifying a Chi^2 variate. */
extern double rk_noncentral_chisquare(rk_state *state, double df, double nonc);
/* F distribution computed by taking the ratio of two Chi^2 variates. */
extern double rk_f(rk_state *state, double dfnum, double dfden);
/* Noncentral F distribution computed by taking the ratio of a noncentral Chi^2
* and a Chi^2 variate. */
extern double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc);
/* Binomial distribution with n Bernoulli trials with success probability p.
* When n*p <= 30, the "Second waiting time method" given by (Devroye p. 525) is
* used. Otherwise, the BTPE algorithm of (Kachitvichyanukul and Schmeiser 1988)
* is used. */
extern long rk_binomial(rk_state *state, long n, double p);
/* Binomial distribution using BTPE. */
extern long rk_binomial_btpe(rk_state *state, long n, double p);
/* Binomial distribution using inversion and chop-down */
extern long rk_binomial_inversion(rk_state *state, long n, double p);
/* Negative binomial distribution computed by generating a Gamma(n, (1-p)/p)
* variate Y and returning a Poisson(Y) variate (Devroye p. 543). */
extern long rk_negative_binomial(rk_state *state, double n, double p);
/* Poisson distribution with mean=lam.
* When lam < 10, a basic algorithm using repeated multiplications of uniform
* variates is used (Devroye p. 504).
* When lam >= 10, algorithm PTRS from (Hoermann 1992) is used.
*/
extern long rk_poisson(rk_state *state, double lam);
/* Poisson distribution computed by repeated multiplication of uniform variates.
*/
extern long rk_poisson_mult(rk_state *state, double lam);
/* Poisson distribution computer by the PTRS algorithm. */
extern long rk_poisson_ptrs(rk_state *state, double lam);
/* Standard Cauchy distribution computed by dividing standard gaussians
* (Devroye p. 451). */
extern double rk_standard_cauchy(rk_state *state);
/* Standard t-distribution with df degrees of freedom (Devroye p. 445 as
* corrected in the Errata). */
extern double rk_standard_t(rk_state *state, double df);
/* von Mises circular distribution with center mu and shape kappa on [-pi,pi]
* (Devroye p. 476 as corrected in the Errata). */
extern double rk_vonmises(rk_state *state, double mu, double kappa);
/* Pareto distribution via inversion (Devroye p. 262) */
extern double rk_pareto(rk_state *state, double a);
/* Weibull distribution via inversion (Devroye p. 262) */
extern double rk_weibull(rk_state *state, double a);
/* Power distribution via inversion (Devroye p. 262) */
extern double rk_power(rk_state *state, double a);
/* Laplace distribution */
extern double rk_laplace(rk_state *state, double loc, double scale);
/* Gumbel distribution */
extern double rk_gumbel(rk_state *state, double loc, double scale);
/* Logistic distribution */
extern double rk_logistic(rk_state *state, double loc, double scale);
/* Log-normal distribution */
extern double rk_lognormal(rk_state *state, double mean, double sigma);
/* Rayleigh distribution */
extern double rk_rayleigh(rk_state *state, double mode);
/* Wald distribution */
extern double rk_wald(rk_state *state, double mean, double scale);
/* Zipf distribution */
extern long rk_zipf(rk_state *state, double a);
/* Geometric distribution */
extern long rk_geometric(rk_state *state, double p);
extern long rk_geometric_search(rk_state *state, double p);
extern long rk_geometric_inversion(rk_state *state, double p);
/* Hypergeometric distribution */
extern long rk_hypergeometric(rk_state *state, long good, long bad, long sample);
extern long rk_hypergeometric_hyp(rk_state *state, long good, long bad, long sample);
extern long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample);
/* Triangular distribution */
extern double rk_triangular(rk_state *state, double left, double mode, double right);
/* Logarithmic series distribution */
extern long rk_logseries(rk_state *state, double p);
"""
#Create function and name
ctypes_template = """
{func_name} = mtrand.{func_name}
{func_name}.restype = {restype}
{func_name}.argtypes = {argtypes}
"""
func_template = """
def {pyfunc_name}({args}):
return {func_name}({plus_args})
"""
_result1 = parse_header(header1)
_result2 = parse_header(header2)
_result = _result1 + _result2
afile = open('_rng_generated.py','w')
for func in _result:
argstr = "[" + ",".join(func[2]) + "]"
afile.write(ctypes_template.format(func_name = func[0],
restype = func[1], argtypes = argstr))
args = [x.strip(',') for x in func[3][1:]]
paramstr = ",".join(args)
allparams = ",".join(['state_p']+args)
afile.write(func_template.format(func_name = func[0], pyfunc_name = func[0][3:],
args = paramstr, plus_args=allparams))
########NEW FILE########
__FILENAME__ = reporting
# -*- coding: utf-8 -*-
"""
Error reporting. Used by the CFA and by each FunctionEnvironment,
which can collect errors and warnings and issue them after failed or
successful compilation.
"""
from __future__ import print_function, division, absolute_import
import sys
import inspect
from numba import error
def getpos(node):
try:
return node.lineno, node.col_offset
except:
return 0, 0
# ______________________________________________________________________
class SourceDescr(object):
"""
Source code descriptor.
"""
def __init__(self, func, ast):
self.func = func
self.ast = ast
def get_lines(self):
source = None
if self.func:
try:
source = inspect.getsource(self.func)
except EnvironmentError:
pass
if source is None:
try:
from meta import asttools
source = asttools.dump_python_source(self.ast)
except Exception:
source = ""
first_lineno = getattr(self.ast, "lineno", 2)
line_offset = offset(source.splitlines())
newlines = "\n" * (first_lineno - line_offset)
source = newlines + source
return source.splitlines()
def offset(source_lines):
offset = 0
for line in source_lines:
if line.strip().startswith("def"):
break
offset += 1
return offset
# ______________________________________________________________________
def sort_message(collected_message):
node, is_error, message = collected_message
lineno, colno = float('inf'), float('inf')
if hasattr(node, 'lineno'):
lineno, colno = map(float, getpos(node))
return not is_error, lineno, colno, message
class MessageCollection(object):
"""Collect error/warnings messages first then sort"""
def __init__(self, ast=None, source_lines=None, file=None):
# (node, is_error, message)
self.buf = []
self.file = file or sys.stdout
self.ast = ast
self.source_lines = source_lines
self.messages = []
self.have_errors = False
def error(self, node, message):
self.have_errors = True
self.messages.append((node, True, message))
def warning(self, node, message):
self.messages.append((node, False, message))
def header(self):
pass
def footer(self):
pass
def report_message(self, message, node, type):
self.buf.append(format_msg_simple(type, node, message))
def report(self, post_mortem=False):
self.messages.sort(key=sort_message)
if self.messages:
self.header()
errors = []
for node, is_error, message in self.messages:
if is_error:
errors.append((node, message))
type = "Error"
else:
type = "Warning"
self.report_message(message, node, type)
if self.messages:
self.buf[-1] = self.buf[-1].rstrip() + '\n'
self.footer()
message = "".join(self.buf)
# clear buffer
del self.messages[:]
del self.buf[:]
if errors and not post_mortem:
if len(message.splitlines()) == 1:
raise error.NumbaError(*errors[0])
raise error.NumbaError("(see below)\n" + message.strip(), has_report=True)
else:
self.file.write(message)
class FancyMessageCollection(MessageCollection):
def header(self):
self.buf.append(
" Numba Encountered Errors or Warnings ".center(80, "-") + '\n')
def footer(self):
self.buf.append("-" * 80 + '\n')
def report_message(self, message, node, type):
self.buf.append(format_msg(type, self.source_lines, node, message))
# ______________________________________________________________________
def format_msg(type, source_lines, node, msg):
ret = ''
if node and hasattr(node, 'lineno') and source_lines:
lineno, colno = getpos(node)
if lineno < len(source_lines):
line = source_lines[lineno]
ret = line + '\n' + "%s^" % ("-" * colno) + '\n'
return ret + format_msg_simple(type, node, msg) + "\n"
def format_msg_simple(type, node, message):
return "%s %s%s\n" % (type, error.format_pos(node), message)
# ______________________________________________________________________
def report(env, exc=None):
"""
:param function_error_env: the FunctionErrorEnvironment
:param post_mortem: whether to enable post-mortem debugging of Numba
:param exc: currently propagating exception
"""
function_error_env = env.crnt.error_env
post_mortem = function_error_env.enable_post_mortem
if exc is not None:
function_error_env.collection.error(exc.node, exc.msg)
try:
function_error_env.collection.report(post_mortem)
except error.NumbaError as e:
exc = e
if exc is not None and not post_mortem:
# Shorten traceback
raise exc
########NEW FILE########
__FILENAME__ = scrape_multiarray_api
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# ______________________________________________________________________
'''scrape_multiarray_api
Utilities for reading the __multiarray_api.h file, and scraping three
things: symbolic names of API members, array indices for those
members, and LLVM types for those members.
'''
from __future__ import print_function, division, absolute_import
# ______________________________________________________________________
import sys
import pprint
# ______________________________________________________________________
TEMPLATE_STR = '''"""Automatically generated code.
Automatically generated by .../numba/scrape_multiarray_api.py
Edit at your own risk!
"""
import llvm.core as lc
from .llvm_types import _int1, _int8, _int32, _int64, _intp, _numpy_struct, \\
_numpy_array
API_INDEX_MAP = %(indexmap)s
API_TYPE_MAP = %(tymap)s
# End of automatically generated code.
'''
# ______________________________________________________________________
def joinlines (source_lines):
'''Remove backslashes appearing next to line breaks in a string
and strip it after doing so.'''
return ''.join((ln[:-1] if ln[-1] == '\\' else ln
for ln in source_lines.splitlines())).strip()
# ______________________________________________________________________
TY_MAP = {
'char' : '_int8',
'int' : '_int32', # This seems to hold true, even on 64-bit systems.
'unsigned char' : '_int8', # XXX
'unsigned int' : 'u_int32', # XXX
'void' : 'lc.Type.void()',
'npy_bool' : '_int8',
'npy_intp' : '_intp',
'npy_uint32' : 'u_int32', # XXX/Note: Loses unsigned info in LLVM type.
'PyArrayObject' : '_numpy_struct',
'double' : 'lc.Type.double()',
'size_t' : 'u_intp', # XXX Loses unsigneded-ness
'npy_int64' : '_int64',
'npy_datetime' : '_int64', # npy_common.h
'npy_timedelta' : '_int64', # npy_common.h
}
# ______________________________________________________________________
def map_type (ty_str):
npointer = ty_str.count('*')
if npointer == 0:
base_ty = ty_str
else:
base_ty = ty_str[:-npointer].strip()
if base_ty == 'void' and npointer > 0:
base_ty = '_int8'
elif base_ty not in TY_MAP:
if npointer > 0:
base_ty = '_int8' # Basically cast into void *
else:
base_ty = '_int32' # Or an int.
else:
base_ty = TY_MAP[base_ty]
if base_ty == '_numpy_struct' and npointer > 0:
base_ty = '_numpy_array'
npointer -= 1
return ''.join((npointer * 'lc.Type.pointer(', base_ty, ')' * npointer))
# ______________________________________________________________________
def c_ty_str_to_llvm (c_ty_str):
ty_str_fn_split = [substr.strip() for substr in c_ty_str.split('(*)')]
ret_val = map_type(ty_str_fn_split[0])
if len(ty_str_fn_split) > 1:
arg_ty_strs = ty_str_fn_split[1][1:-1].split(', ')
if len(arg_ty_strs) == 1 and arg_ty_strs[0].strip() == 'void':
arg_ty_strs = []
ret_val = ('lc.Type.function(%s, [%s])' %
(ret_val, ', '.join((map_type(arg_ty_str.strip())
for arg_ty_str in arg_ty_strs))))
return ret_val
# ______________________________________________________________________
def process_type (ty_str):
if ty_str.startswith('(*'):
ty_str = ty_str[3:-1]
else:
assert ty_str[0] == '('
ty_str = ty_str[2:-1]
return ty_str
# ______________________________________________________________________
def process_definition (source_defn):
arr_str = 'PyArray_API['
arr_str_idx = source_defn.index(arr_str)
arr_str_end_idx = source_defn.index('])', arr_str_idx)
return (int(source_defn[arr_str_idx + len(arr_str):arr_str_end_idx]),
process_type(joinlines(source_defn[:arr_str_idx].strip())))
# ______________________________________________________________________
def process_source (source_file_path):
ret_val = None
with open(source_file_path) as source_file:
source_text = source_file.read()
split_by_pp_define0 = source_text.split('#define ')
split_by_pp_define1 = (substr0.strip().split(None, 1)
for substr0 in split_by_pp_define0)
return dict(((sym_name, process_definition(sym_defn))
for sym_name, sym_defn in split_by_pp_define1
if (sym_name != 'PyArray_API' and
sym_defn.find('])') != -1)))
# ______________________________________________________________________
def gen_python (processed_source, template_str = None):
if template_str is None:
template_str = TEMPLATE_STR
index_map = {}
ty_map_strs = ['{']
for symbol, (index, c_ty_str) in processed_source.iteritems():
index_map[symbol] = index
ty_map_strs.append(' %r : %s,' % (symbol, c_ty_str_to_llvm(c_ty_str)))
ty_map_strs.append('}')
return template_str % {'indexmap' : pprint.pformat(index_map),
'tymap' : '\n'.join(ty_map_strs)}
# ______________________________________________________________________
def get_include ():
import os, numpy
return os.path.join(numpy.get_include(), 'numpy', '__multiarray_api.h')
# ______________________________________________________________________
def main (*args, **kws):
'''Initial prototype for automatically generating multiarray C API
call information for llvm-py. Not actually used to generate any
Numba modules (dynamically handled by multiarray_api).'''
if len(args) == 0:
args = (get_include(),)
for arg in args:
print((gen_python(process_source(arg))))
# ______________________________________________________________________
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
# ______________________________________________________________________
# End of scrape_multiarray_api.py
########NEW FILE########
__FILENAME__ = special
# -*- coding: utf-8 -*-
"""
Special compiler-recognized numba functions and attributes.
"""
from __future__ import print_function, division, absolute_import
__all__ = ['NULL', 'typeof', 'python', 'nopython', 'addressof', 'prange']
import ctypes
from numba import error
#------------------------------------------------------------------------
# Pointers
#------------------------------------------------------------------------
class NumbaDotNULL(object):
"NULL pointer"
NULL = NumbaDotNULL()
def addressof(obj, propagate=True):
"""
Take the address of a compiled jit function.
:param obj: the jit function
:param write_unraisable: whether to write uncaught exceptions to stderr
:param propagate: whether to always propagate exceptions
:return: ctypes function pointer
"""
from numba import numbawrapper
if not propagate:
raise ValueError("Writing unraisable exceptions is not yet supported")
if not isinstance(obj, (numbawrapper.NumbaCompiledWrapper,
numbawrapper.numbafunction_type)):
raise TypeError("Object is not a jit function")
if obj.lfunc_pointer is None:
assert obj.lfunc is not None, obj
from numba.codegen import llvmcontext
llvm_context = llvmcontext.LLVMContextManager()
obj.lfunc_pointer = llvm_context.get_pointer_to_function(obj.lfunc)
ctypes_sig = obj.signature.to_ctypes()
return ctypes.cast(obj.lfunc_pointer, ctypes_sig)
#------------------------------------------------------------------------
# Types
#------------------------------------------------------------------------
def typeof(value):
"""
Get the type of a variable or value.
Used outside of Numba code, infers the type for the object.
"""
from numba import typesystem
return typesystem.numba_typesystem.typeof(value)
#------------------------------------------------------------------------
# python/nopython context managers
#------------------------------------------------------------------------
class NoopContext(object):
def __init__(self, name):
self.name = name
def __enter__(self, *args):
return None
def __exit__(self, *args):
return None
def __repr__(self):
return self.name
python = NoopContext("python")
nopython = NoopContext("nopython")
#------------------------------------------------------------------------
# prange
#------------------------------------------------------------------------
def prange(start=0, stop=None, step=1):
if stop is None:
stop = start
start = 0
return range(start, stop, step)
########NEW FILE########
__FILENAME__ = comparisons
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
from functools import reduce
import numba
from numba import *
from numba import error
from numba import visitors, nodes
from numba import function_util
from numba.symtab import Variable
from numba.typesystem import is_obj
from numba import pyconsts
logger = logging.getLogger(__name__)
opmap = {
ast.Eq : pyconsts.Py_EQ,
ast.NotEq : pyconsts.Py_NE,
ast.Lt : pyconsts.Py_LT,
ast.LtE : pyconsts.Py_LE,
ast.Gt : pyconsts.Py_GT,
ast.GtE : pyconsts.Py_GE,
}
def build_boolop(right, left):
node = ast.BoolOp(ast.And(), [left, right])
return nodes.typednode(node, bool_)
def extract(complex_node):
complex_node = nodes.CloneableNode(complex_node)
real = nodes.ComplexAttributeNode(complex_node, 'real')
imag = nodes.ComplexAttributeNode(complex_node.clone, 'imag')
return real, imag
def compare(lhs, op, rhs):
result = ast.Compare(lhs, [op], [rhs])
return nodes.typednode(result, bool_)
class SpecializeComparisons(visitors.NumbaTransformer):
"""
Rewrite cascaded ast.Compare nodes to a sequence of boolean operations
ANDed together:
a < b < c
becomes
a < b and b < c
"""
def single_compare(self, node):
rhs = node.comparators[0]
if is_obj(node.left.type):
node = self.single_compare_objects(node)
elif node.left.type.is_pointer and rhs.type.is_pointer:
# Coerce pointers to integer values before comparing
node.left = nodes.CoercionNode(node.left, Py_uintptr_t)
node.comparators = [nodes.CoercionNode(rhs, Py_uintptr_t)]
elif node.left.type.is_complex and rhs.type.is_complex:
real1, imag1 = extract(node.left)
real2, imag2 = extract(rhs)
op = type(node.ops[0])
if op == ast.Eq:
lhs = compare(real1, ast.Eq(), real2)
rhs = compare(imag1, ast.Eq(), imag2)
result = ast.BoolOp(ast.And(), [lhs, rhs])
elif op == ast.NotEq:
lhs = compare(real1, ast.NotEq(), real2)
rhs = compare(imag1, ast.NotEq(), imag2)
result = ast.BoolOp(ast.Or(), [lhs, rhs])
else:
raise NotImplementedError("ordered comparisons are not "
"implemented for complex numbers")
node = nodes.typednode(result, bool_)
elif node.left.type.is_string and rhs.type.is_string:
node.left = nodes.CoercionNode(node.left, object_)
node.comparators = [nodes.CoercionNode(rhs, object_)]
return self.single_compare(node)
elif node.left.type.is_complex and rhs.type.is_datetime:
raise error.NumbaError(
node, "datetime comparisons not yet implemented")
return node
def single_compare_objects(self, node):
op = type(node.ops[0])
if op not in opmap:
raise error.NumbaError(
node, "%s comparisons not yet implemented" % (op,))
# Build arguments for PyObject_RichCompareBool
operator = nodes.const(opmap[op], int_)
args = [node.left, node.comparators[0], operator]
# Call PyObject_RichCompareBool
compare = function_util.external_call(self.context,
self.llvm_module,
'PyObject_RichCompare',
args=args)
# Coerce int result to bool
return nodes.CoercionNode(compare, node.type)
def visit_Compare(self, node):
"Reduce cascaded comparisons into single comparisons"
# Process children
self.generic_visit(node)
compare_nodes = []
comparators = [nodes.CloneableNode(c) for c in node.comparators]
if len(node.comparators) > 1:
if node.type.is_array:
raise error.NumbaError(
node, "Cannot determine truth value of boolean array "
"(use any or all)")
# Build comparison nodes
left = node.left
for op, right in zip(node.ops, comparators):
node = ast.Compare(left=left, ops=[op], comparators=[right])
# Set result type of comparison:
# bool array of array comparison
# bool otherwise
if left.type.is_array or right.type.is_array:
# array < x -> Array(bool_, array.ndim)
result_type = self.env.crnt.typesystem.promote(
left.type, right.type)
else:
result_type = bool_
nodes.typednode(node, result_type)
# Handle comparisons specially based on their types
node = self.single_compare(node)
compare_nodes.append(node)
left = right.clone
# AND the comparisons together
node = reduce(build_boolop, reversed(compare_nodes))
return node
########NEW FILE########
__FILENAME__ = exceptions
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
from numba import *
from numba import visitors, nodes, error, function_util
logger = logging.getLogger(__name__)
from numba.typesystem import is_obj
#------------------------------------------------------------------------
# 'raise'
#------------------------------------------------------------------------
class LowerRaise(visitors.NumbaTransformer):
"""
Resolve the 'raise' statement.
"""
def visit_Raise(self, node):
# Create void * temporaries
args = [] # Type, Value, Traceback, Cause
for arg in [node.type, node.inst, node.tback, None]:
if arg:
arg = nodes.CoercionNode(arg, object_)
arg = nodes.PointerFromObject(arg)
else:
arg = nodes.NULL
args.append(arg)
# Call numba/external/utitilies/cpyutils.c:do_raise()
set_exc = function_util.utility_call(
self.context,
self.llvm_module,
'Raise', args)
result = self.visit(set_exc)
return result
#------------------------------------------------------------------------
# Specialize Error Checking and Raising
#------------------------------------------------------------------------
class ExceptionSpecializer(visitors.NumbaTransformer):
"""
Specialize exception handling. Handle error checking and raising.
"""
def __init__(self, *args, **kwargs):
super(ExceptionSpecializer, self).__init__(*args, **kwargs)
self.visited_callnodes = set()
#------------------------------------------------------------------------
# Error Checking
#------------------------------------------------------------------------
def visit_CheckErrorNode(self, node):
if node.badval is not None:
badval = node.badval
eq = ast.Eq()
else:
assert node.goodval is not None
badval = node.goodval
eq = ast.NotEq()
check = nodes.if_else(eq, node.return_value, badval,
lhs=node.raise_node, rhs=None)
return self.visit(check)
def visit_PyErr_OccurredNode(self, node):
check_err = nodes.CheckErrorNode(
nodes.ptrtoint(function_util.external_call(
self.context,
self.llvm_module,
'PyErr_Occurred')),
goodval=nodes.ptrtoint(nodes.NULL))
result = nodes.CloneableNode(node.node)
result = nodes.ExpressionNode(stmts=[result, check_err],
expr=result.clone)
return self.visit(result)
#------------------------------------------------------------------------
# Error Checking for Function Calls
#------------------------------------------------------------------------
def visit_NativeCallNode(self, node):
badval, goodval = node.badval, node.goodval
if node not in self.visited_callnodes and (badval is not None or
goodval is not None):
self.visited_callnodes.add(node)
result = node.cloneable
body = nodes.CheckErrorNode(
result, badval, goodval,
node.exc_type, node.exc_msg, node.exc_args)
node = nodes.ExpressionNode(stmts=[body],
expr=result.clone)
return self.visit(node)
else:
self.generic_visit(node)
return node
#------------------------------------------------------------------------
# Check for UnboundLocalError
#------------------------------------------------------------------------
def visit_Name(self, node):
if (is_obj(node.type) and isinstance(node.ctx, ast.Load) and
getattr(node, 'cf_maybe_null', False)):
# Check for unbound objects and raise UnboundLocalError if so
value = nodes.LLVMValueRefNode(Py_uintptr_t, None)
node.loaded_name = value
exc_msg = node.variable.name
if hasattr(node, 'lineno'):
exc_msg = '%s%s' % (error.format_pos(node), exc_msg)
check_unbound = nodes.CheckErrorNode(
value, badval=nodes.const(0, Py_uintptr_t),
exc_type=UnboundLocalError,
exc_msg=exc_msg)
node.check_unbound = self.visit(check_unbound)
return node
#------------------------------------------------------------------------
# Exception Raising
#------------------------------------------------------------------------
def _raise_exception(self, node):
if node.exc_type:
assert node.exc_msg
if node.exc_args:
args = [node.exc_type, node.exc_msg] + node.exc_args
raise_node = function_util.external_call(self.context,
self.llvm_module,
'PyErr_Format',
args=args)
else:
args = [node.exc_type, node.exc_msg]
raise_node = function_util.external_call(self.context,
self.llvm_module,
'PyErr_SetString',
args=args)
return [raise_node]
return []
def _trap(self, node):
body = []
if node.exc_msg and node.print_on_trap:
pos = error.format_pos(node)
if node.exception_type:
exc_type = '%s: ' % node.exception_type.__name__
else:
exc_type = ''
msg = '%s%s%%s' % (exc_type, pos)
format = nodes.const(msg, c_string_type)
print_msg = function_util.external_call(self.context,
self.llvm_module,
'printf',
args=[format,
node.exc_msg])
body.append(print_msg)
trap = nodes.LLVMIntrinsicNode(signature=void(), args=[],
func_name='TRAP')
return body + [trap]
def visit_RaiseNode(self, node):
if self.nopython:
result = self._trap(node)
else:
result = self._raise_exception(node)
return ast.Suite(body=result + [nodes.PropagateNode()])
########NEW FILE########
__FILENAME__ = exttypes
import ast
import numba
from numba import *
from numba import error
from numba import typesystem
from numba import visitors
from numba import nodes
from numba import function_util
from numba.exttypes import virtual
from numba.traits import traits, Delegate
class ExtensionTypeLowerer(visitors.NumbaTransformer):
"""
Lower extension type attribute accesses and method calls.
"""
def get_handler(self, ext_type):
if ext_type.is_extension and not ext_type.is_autojit_exttype:
return StaticExtensionHandler()
else:
assert ext_type.is_autojit_exttype, ext_type
return DynamicExtensionHandler()
# ______________________________________________________________________
# Attributes
def visit_ExtTypeAttribute(self, node):
"""
Resolve an extension attribute.
"""
handler = self.get_handler(node.ext_type)
self.visitchildren(node)
return handler.handle_attribute_lookup(self.env, node)
# ______________________________________________________________________
# Methods
def visit_NativeFunctionCallNode(self, node):
if node.signature.is_bound_method:
assert isinstance(node.function, nodes.ExtensionMethod)
self.visitlist(node.args)
node = self.visit_ExtensionMethod(node.function, node)
else:
self.visitchildren(node)
return node
def visit_ExtensionMethod(self, node, call_node=None):
"""
Resolve an extension method. We currently only support immediate
calls of extension methods.
"""
if call_node is None:
raise error.NumbaError(node, "Referenced extension method '%s' "
"must be called" % node.attr)
handler = self.get_handler(node.ext_type)
return handler.handle_method_call(self.env, node, call_node)
#------------------------------------------------------------------------
# Handle Static VTable Attributes and Methods
#------------------------------------------------------------------------
class StaticExtensionHandler(object):
"""
Handle attribute lookup and method calls for static extensions
with C++/Cython-like virtual method tables and static object layouts.
"""
def handle_attribute_lookup(self, env, node):
"""
Resolve an extension attribute for a static object layout.
((attributes_struct *)
(((char *) obj) + attributes_offset))->attribute
:node: ExtTypeAttribute AST node
"""
ext_type = node.value.type
offset = ext_type.attr_offset
type = ext_type.attribute_table.to_struct()
if isinstance(node.ctx, ast.Load):
value_type = type.ref() # Load result
else:
value_type = type.pointer() # Use pointer for storage
struct_pointer = nodes.value_at_offset(node.value, offset,
value_type)
result = nodes.StructAttribute(struct_pointer, node.attr,
node.ctx, type.ref())
return result
def handle_method_call(self, env, node, call_node):
"""
Resolve an extension method of a static (C++/Cython-like) vtable:
typedef {
double (*method1)(double);
...
} vtab_struct;
vtab_struct *vtab = *(vtab_struct **) (((char *) obj) + vtab_offset)
void *method = vtab[index]
"""
# Make the object we call the method on clone-able
node.value = nodes.CloneableNode(node.value)
ext_type = node.value.type
offset = ext_type.vtab_offset
vtable_struct = ext_type.vtab_type.to_struct()
vtable_struct_type = vtable_struct.ref()
vtab_struct_pointer_pointer = nodes.value_at_offset(
node.value, offset,vtable_struct_type.pointer())
vtab_struct_pointer = nodes.DereferenceNode(vtab_struct_pointer_pointer)
vmethod = nodes.StructAttribute(vtab_struct_pointer, node.attr,
ast.Load(), vtable_struct_type)
# Insert first argument 'self' in args list
args = call_node.args
args.insert(0, nodes.CloneNode(node.value))
result = nodes.NativeFunctionCallNode(node.type, vmethod, args)
return result
#------------------------------------------------------------------------
# Handle Dynamic VTable Attributes and Methods
#------------------------------------------------------------------------
@traits
class DynamicExtensionHandler(object):
"""
Handle attribute lookup and method calls for autojit extensions
with dynamic perfect-hash-based virtual method tables and dynamic
object layouts.
"""
static_handler = StaticExtensionHandler()
# TODO: Implement hash-based attribute lookup
handle_attribute_lookup = Delegate('static_handler')
def handle_method_call(self, env, node, call_node):
"""
Resolve an extension method of a dynamic hash-based vtable:
PyCustomSlots_Table ***vtab_slot = (((char *) obj) + vtab_offset)
lookup_virtual_method(*vtab_slot)
We may cache (*vtab_slot), but we may not cache (**vtab_slot), since
compilations may regenerate the table.
However, we could *preload* (**vtab_slot), where function calls
invalidate the preload, if we were so inclined.
"""
# Make the object we call the method on clone-able
node.value = nodes.CloneableNode(node.value)
ext_type = node.ext_type
func_signature = node.type #typesystem.extmethod_to_function(node.type)
offset = ext_type.vtab_offset
# __________________________________________________________________
# Retrieve vtab
vtab_ppp = nodes.value_at_offset(node.value, offset,
void.pointer().pointer())
vtab_struct_pp = nodes.DereferenceNode(vtab_ppp)
# __________________________________________________________________
# Calculate pre-hash
prehash = virtual.hash_signature(func_signature, func_signature.name)
prehash_node = nodes.ConstNode(prehash, uint64)
# __________________________________________________________________
# Retrieve method pointer
# A method is always present when it was given a static signature,
# e.g. @double(double)
always_present = node.attr in ext_type.vtab_type.methodnames
args = [vtab_struct_pp, prehash_node]
# lookup_impl = NumbaVirtualLookup()
lookup_impl = DebugVirtualLookup()
ptr = lookup_impl.lookup(env, always_present, node, args)
vmethod = ptr.coerce(func_signature.pointer())
vmethod = vmethod.cloneable
# __________________________________________________________________
# Call method pointer
# Insert first argument 'self' in args list
args = call_node.args
args.insert(0, nodes.CloneNode(node.value))
method_call = nodes.NativeFunctionCallNode(func_signature, vmethod, args)
# __________________________________________________________________
# Generate fallback
# TODO: Subclassing!
# if not always_present:
# # TODO: Enable this path and generate a phi for the result
# # Generate object call
# obj_args = [nodes.CoercionNode(arg, object_) for arg in args]
# obj_args.append(nodes.NULL)
# object_call = function_util.external_call(
# env.context, env.crnt.llvm_module,
# 'PyObject_CallMethodObjArgs', obj_args)
#
# # if vmethod != NULL: vmethod(obj, ...)
# # else: obj.method(...)
# method_call = nodes.if_else(
# ast.NotEq(),
# vmethod.clone, nodes.NULL,
# lhs=method_call, rhs=object_call)
return method_call
#------------------------------------------------------------------------
# Method lookup
#------------------------------------------------------------------------
def call_jit(jit_func, args):
return nodes.NativeCallNode(jit_func.signature, args, jit_func.lfunc)
class NumbaVirtualLookup(object):
"""
Use a numba function from numba.utility.virtuallookup to look up virtual
methods in a hash table.
"""
def lookup(self, env, always_present, node, args):
"""
:param node: ExtensionMethodNode
:param args: [vtable_node, prehash_node]
:return: The virtual method as a Node
"""
from numba.utility import virtuallookup
if always_present and False:
lookup = virtuallookup.lookup_method
else:
lookup = virtuallookup.lookup_and_assert_method
args.append(nodes.const(node.attr, c_string_type))
vmethod = call_jit(lookup, args)
return vmethod
class DebugVirtualLookup(object):
"""
Use a C utility function from numba/utility/utilities/virtuallookup.c
to look up virtual methods in a hash table.
Use for debugging.
"""
def lookup(self, env, always_present, node, args):
args.append(nodes.const(node.attr, c_string_type))
vmethod = function_util.utility_call(
env.context, env.crnt.llvm_module,
"lookup_method", args)
return vmethod
########NEW FILE########
__FILENAME__ = funccalls
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba
from numba import *
from numba import visitors, nodes, error, transforms
from numba.typesystem import is_obj
logger = logging.getLogger(__name__)
from numba.external import pyapi
class FunctionCallSpecializer(visitors.NumbaTransformer,
visitors.NoPythonContextMixin,
):
def visit_NativeCallNode(self, node):
if is_obj(node.signature.return_type):
if self.nopython:
raise error.NumbaError(
node, "Cannot call function returning object in "
"nopython context")
self.generic_visit(node)
return nodes.ObjectTempNode(node)
self.generic_visit(node)
return node
def visit_NativeFunctionCallNode(self, node):
return self.visit_NativeCallNode(node)
########NEW FILE########
__FILENAME__ = loopimpl
# -*- coding: utf-8 -*-
"""
Define various loop implementations.
"""
from __future__ import print_function, division, absolute_import
import ast
import numba
from numba import *
from numba import function_util
from numba import visitors, nodes, error, functions
from numba.typesystem.typematch import typematch
logger = logging.getLogger(__name__)
iterator_impls = []
def register_iterator_implementation(iterator_pattern, iterator_impl):
iterator_impls.append((iterator_pattern, iterator_impl))
def find_iterator_impl(node):
"Find a suitable iterator type for which we have an implementation"
type = node.iter.type
for pattern, impl in iterator_impls:
if typematch(pattern, type):
return impl
raise error.NumbaError(node, "Unsupported iterator "
"type: %s" % (type,))
#------------------------------------------------------------------------
# Interface for Loop Implementations
#------------------------------------------------------------------------
class IteratorImpl(object):
"Implementation of an iterator over a value of a certain type"
def getiter(self, context, for_node, llvm_module):
"Set up an iterator (statement or None)"
raise NotImplementedError
def body(self, context, for_node, llvm_module):
"Get the loop body as a list of statements"
return list(for_node.body)
def next(self, context, for_node, llvm_module):
"Get the next iterator element (ExprNode)"
raise NotImplementedError
#------------------------------------------------------------------------
# External Function Iterator
#------------------------------------------------------------------------
class NativeIteratorImpl(IteratorImpl):
"""
Implement iteration over an iterator which has externally callable
functions for the `getiter` and `next` operations.
"""
def __init__(self, getiter_func, next_func):
self.getiter_func = getiter_func
self.next_func = next_func
self.iterator = None
def getiter(self, context, for_node, llvm_module):
iterator = function_util.external_call(context, llvm_module,
self.getiter_func,
args=[for_node.iter])
iterator = nodes.CloneableNode(iterator)
self.iterator = iterator.clone
return iterator
def next(self, context, for_node, llvm_module):
return function_util.external_call(context, llvm_module,
self.next_func,
args=[self.iterator])
#------------------------------------------------------------------------
# Indexing Iterator
#------------------------------------------------------------------------
def assign(target, value):
return ast.Assign(targets=target, value=value)
def index(value, index):
return ast.Subscript(value=value, slice=index, ctx=ast.Load())
class IndexingIteratorImpl(IteratorImpl):
"""
Implement iteration using indexing.
"""
def getiter(self, context, for_node, llvm_module):
self.index = nodes.TempNode(Py_ssize_t, "iterator_index")
return assign(self.index, nodes.const(0, Py_ssize_t))
def next(self, context, for_node, llvm_module):
"Index element and update index"
index = self.index.load
value = nodes.CloneableNode(index(for_node.iter, index))
add = ast.BinOp(index, ast.Add(), nodes.const(1, Py_ssize_t))
return nodes.ExpressionNode(stmts=[value, assign(self.index.store, add)],
expr=value.clone)
def length(self, context, for_node, llvm_module):
"Length of the iterable"
raise NotImplementedError
#------------------------------------------------------------------------
# Register Loop Implementations
#------------------------------------------------------------------------
register_iterator_implementation("object", NativeIteratorImpl("PyObject_GetIter",
"PyIter_Next"))
########NEW FILE########
__FILENAME__ = loops
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
import textwrap
try:
import __builtin__ as builtins
except ImportError:
import builtins
import numba
from numba import missing
from numba import *
from numba import error
from numba import typesystem
from numba import visitors, nodes
from numba.typesystem import get_type
from numba.specialize import loopimpl
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
def unpack_range_args(node):
start, stop, step = (nodes.const(0, Py_ssize_t),
None,
nodes.const(1, Py_ssize_t))
if len(node.args) == 0:
raise error.NumbaError(node, "Expected at least one argument")
elif len(node.args) == 1:
stop, = node.args
elif len(node.args) == 2:
start, stop = node.args
else:
start, stop, step = node.args
return [start, stop, step]
def make_while_loop(flow_node):
"Create a while loop from a flow node (a While or If node)"
while_node = nodes.While(test=flow_node.test,
body=flow_node.body,
orelse=flow_node.orelse)
return ast.copy_location(while_node, flow_node)
def copy_basic_blocks(flow_node_src, flow_node_dst):
"Copy cfg basic blocks from one flow node to another"
flow_node_dst.cond_block = flow_node_src.cond_block
flow_node_dst.if_block = flow_node_src.if_block
flow_node_dst.else_block = flow_node_src.else_block
flow_node_dst.exit_block = flow_node_src.exit_block
def make_while_from_for(for_node):
"Create a While from a For. The 'test' (loop condition) must still be set."
while_node = nodes.While(test=None,
body=for_node.body,
orelse=for_node.orelse)
copy_basic_blocks(for_node, while_node)
while_node = nodes.build_while(**vars(while_node))
return ast.copy_location(while_node, for_node)
def untypedTemp():
"Temp node with a yet unknown type"
type = typesystem.DeferredType(None)
temp = nodes.TempNode(type)
type.variable = temp.variable
return temp
#------------------------------------------------------------------------
# Transform for loops
#------------------------------------------------------------------------
class TransformForIterable(visitors.NumbaTransformer):
"""
This transforms loops over 1D arrays and loops over range().
"""
def rewrite_range_iteration(self, node):
"""
Handle range iteration:
for i in range(start, stop, step):
...
becomes
nsteps = compute_nsteps(start, stop, step)
temp = 0
while temp < nsteps:
target = start + temp * step
...
temp += 1
"""
self.generic_visit(node)
temp = nodes.TempNode(node.target.type, 'target_temp')
nsteps = nodes.TempNode(Py_ssize_t, 'nsteps')
start, stop, step = unpack_range_args(node.iter)
if isinstance(step, nodes.ConstNode):
have_step = step.pyval != 1
else:
have_step = True
start, stop, step = map(nodes.CloneableNode, (start, stop, step))
if have_step:
templ = textwrap.dedent("""
{{temp}} = 0
{{nsteps}} = ({{stop}} - {{start}} + {{step}} -
(1 if {{step}} >= 0 else -1)) / {{step}}
while {{temp_load}} < {{nsteps_load}}:
{{target}} = {{start}} + {{temp_load}} * {{step}}
{{temp}} = {{temp_load}} + 1
{{body}}
""")
else:
templ = textwrap.dedent("""
{{temp}} = {{start}}
{{nsteps}} = {{stop}}
while {{temp_load}} < {{nsteps_load}}:
{{target}} = {{temp_load}}
{{temp}} = {{temp_load}} + 1
{{body}}
""")
if node.orelse:
templ += "\nelse: {{else_body}}"
# Leave the bodies empty, they are already analyzed
body = ast.Suite(body=[])
else_body = ast.Suite(body=[])
#--------------------------------------------------------------------
# Substitute template and infer types
#--------------------------------------------------------------------
result = self.run_template(
templ, vars=dict(length=Py_ssize_t),
start=start, stop=stop, step=step,
nsteps=nsteps.store(), nsteps_load=nsteps.load(),
temp=temp.store(), temp_load=temp.load(),
target=node.target,
body=body, else_body=else_body)
ast.copy_location(result, node)
if hasattr(node, 'lineno'):
visitor = missing.FixMissingLocations(node.lineno, node.col_offset,
override=True)
visitor.visit(result)
#--------------------------------------------------------------------
# Patch the body and else clause
#--------------------------------------------------------------------
body.body.extend(node.body)
else_body.body.extend(node.orelse)
while_node = result.body[-1]
assert isinstance(while_node, ast.While)
#--------------------------------------------------------------------
# Create a While with the ForNode's cfg blocks merged in
#--------------------------------------------------------------------
while_node = make_while_loop(while_node)
copy_basic_blocks(node, while_node)
while_node = nodes.build_while(**vars(while_node))
# Create the place to jump to for 'continue'
while_node.continue_block = node.cond_block
# Set the new while loop in the templated Suite
result.body[-1] = while_node
return result
def rewrite_array_iteration(self, node):
"""
Convert 1D array iteration to for-range and indexing:
for value in my_array:
...
becomes
for i in my_array.shape[0]:
value = my_array[i]
...
"""
logger.debug(ast.dump(node))
orig_target = node.target
orig_iter = node.iter
#--------------------------------------------------------------------
# Replace node.target with a temporary
#--------------------------------------------------------------------
target_temp = nodes.TempNode(typesystem.Py_ssize_t)
node.target = target_temp.store()
#--------------------------------------------------------------------
# Create range(A.shape[0])
#--------------------------------------------------------------------
call_func = ast.Name(id='range', ctx=ast.Load())
nodes.typednode(call_func, typesystem.range_)
shape_index = ast.Index(nodes.ConstNode(0, typesystem.Py_ssize_t))
shape_index.type = typesystem.npy_intp
stop = ast.Subscript(value=nodes.ShapeAttributeNode(orig_iter),
slice=shape_index,
ctx=ast.Load())
nodes.typednode(stop, npy_intp)
#--------------------------------------------------------------------
# Create range iterator and replace node.iter
#--------------------------------------------------------------------
call_args = [nodes.ConstNode(0, typesystem.Py_ssize_t),
nodes.CoercionNode(stop, typesystem.Py_ssize_t),
nodes.ConstNode(1, typesystem.Py_ssize_t),]
node.iter = ast.Call(func=call_func, args=call_args)
nodes.typednode(node.iter, call_func.type)
node.index = target_temp.load(invariant=True)
#--------------------------------------------------------------------
# Add assignment to new target variable at the start of the body
#--------------------------------------------------------------------
index = ast.Index(value=node.index)
index.type = target_temp.type
subscript = ast.Subscript(value=orig_iter,
slice=index, ctx=ast.Load())
nodes.typednode(subscript, get_type(orig_iter).dtype)
#--------------------------------------------------------------------
# Add assignment to new target variable at the start of the body
#--------------------------------------------------------------------
assign = ast.Assign(targets=[orig_target], value=subscript)
node.body = [assign] + node.body
#--------------------------------------------------------------------
# Specialize new for loop through range iteration
#--------------------------------------------------------------------
return self.visit(node)
def visit_For(self, node):
if node.iter.type.is_range:
return self.rewrite_range_iteration(node)
elif node.iter.type.is_array and node.iter.type.ndim == 1:
return self.rewrite_array_iteration(node)
else:
self.visitchildren(node)
return node
#------------------------------------------------------------------------
# Transform for loops over builtins
#------------------------------------------------------------------------
class TransformBuiltinLoops(visitors.NumbaTransformer):
def rewrite_enumerate(self, node):
"""
Rewrite a loop like
for i, x in enumerate(array[, start]):
...
into
_arr = array
[_s = start]
for _i in range(len(_arr)):
i = _i [+ _s]
x = _arr[_i]
...
"""
call = node.iter
if (len(call.args) not in (1, 2) or call.keywords or
call.starargs or call.kwargs):
self.error(call, 'expected 1 or 2 arguments to enumerate()')
target = node.target
if (not isinstance(target, (ast.Tuple, ast.List)) or
len(target.elts) != 2):
self.error(call, 'expected 2 iteration variables')
array = call.args[0]
start = call.args[1] if len(call.args) > 1 else None
idx = target.elts[0]
var = target.elts[1]
array_temp = untypedTemp()
if start:
start_temp = untypedTemp() # TODO: only allow integer start
idx_temp = nodes.TempNode(typesystem.Py_ssize_t)
# for _i in range(len(_arr)):
node.target = idx_temp.store()
node.iter = ast.Call(ast.Name('range', ast.Load()),
[ast.Call(ast.Name('len', ast.Load()),
[array_temp.load(True)],
[], None, None)],
[], None, None)
# i = _i [+ _s]
new_idx = idx_temp.load()
if start:
new_idx = ast.BinOp(new_idx, ast.Add(), start_temp.load(True))
node.body.insert(0, ast.Assign([idx], new_idx))
# x = _arr[_i]
value = ast.Subscript(array_temp.load(True),
ast.Index(idx_temp.load()),
ast.Load())
node.body.insert(1, ast.Assign([var], value))
# _arr = array; [_s = start]; ...
body = [ ast.Assign([array_temp.store()], array), node ]
if start:
body.insert(1, ast.Assign([start_temp.store()], start))
return map(self.visit, body)
def rewrite_zip(self, node):
"""
Rewrite a loop like
for x, y... in zip(xs, ys...):
...
into
_xs = xs; _ys = ys...
for _i in range(min(len(_xs), len(_ys)...)):
x = _xs[_i]; y = _ys[_i]...
...
"""
call = node.iter
if not call.args or call.keywords or call.starargs or call.kwargs:
self.error(call, 'expected at least 1 argument to zip()')
target = node.target
if (not isinstance(target, (ast.Tuple, ast.List)) or
len(target.elts) != len(call.args)):
self.error(call, 'expected %d iteration variables' % len(call.args))
temps = [untypedTemp() for _ in xrange(len(call.args))]
idx_temp = nodes.TempNode(typesystem.Py_ssize_t)
# min(len(_xs), len(_ys)...)
len_call = ast.Call(ast.Name('min', ast.Load()),
[ast.Call(ast.Name('len', ast.Load()),
[tmp.load(True)], [], None, None)
for tmp in temps],
[], None, None)
# for _i in range(...):
node.target = idx_temp.store()
node.iter = ast.Call(ast.Name('range', ast.Load()),
[len_call], [], None, None)
# x = _xs[_i]; y = _ys[_i]...
node.body = [ast.Assign([tgt],
ast.Subscript(tmp.load(True),
ast.Index(idx_temp.load()),
ast.Load()))
for tgt, tmp in zip(target.elts, temps)] + \
node.body
# _xs = xs; _ys = ys...
body = [ast.Assign([tmp.store()], arg)
for tmp, arg in zip(temps, call.args)] + \
[node]
return map(self.visit, body)
HANDLERS = {
id(enumerate): rewrite_enumerate,
id(zip): rewrite_zip,
}
def visit_For(self, node):
if (isinstance(node.iter, ast.Call) and
isinstance(node.iter.func, ast.Name)):
name = node.iter.func.id
if name not in self.symtab:
obj = (self.func_globals[name]
if name in self.func_globals else
getattr(builtins, name, None))
rewriter = self.HANDLERS.get(id(obj))
if rewriter:
return rewriter(self, node)
self.visitchildren(node)
return node
#------------------------------------------------------------------------
# Transform for loops over Objects
#------------------------------------------------------------------------
class SpecializeObjectIteration(visitors.NumbaTransformer):
"""
This transforms for loops over objects.
"""
def visit_For(self, node):
while_node = make_while_from_for(node)
test = nodes.const(True, bool_)
while_node.test = test
impl = loopimpl.find_iterator_impl(node)
# Get the iterator, loop body, and the item
iter = impl.getiter(self.context, node, self.llvm_module)
body = impl.body(self.context, node, self.llvm_module)
item = impl.next(self.context, node, self.llvm_module)
# Coerce item to LHS and assign
item = nodes.CoercionNode(item, node.target.type)
target_assmnt = ast.Assign(targets=[node.target], value=item)
# Update While node body
body.insert(0, target_assmnt)
while_node.body = body
nodes.merge_cfg_in_while(while_node)
return ast.Suite(body=[iter, while_node])
########NEW FILE########
__FILENAME__ = stdio_util
#! /usr/bin/env python
from __future__ import print_function, division, absolute_import
# -*- coding: utf-8 -*-
# ______________________________________________________________________
import ctypes
import ctypes.util
from numba import *
# ______________________________________________________________________
c_void_pp = ctypes.POINTER(ctypes.c_void_p)
def get_libc ():
return ctypes.CDLL(ctypes.util.find_library('c'))
def get_stdio_streams ():
'''
Returns file pointers (FILE *) as Python integers for the C stdio
stdin, stdout, and stderr streams.
'''
ret_val = None
if hasattr(ctypes.pythonapi, 'stdin'):
# Linux
_stdio_files = (ctypes.c_void_p.in_dll(ctypes.pythonapi, sym)
for sym in ('stdin', 'stdout', 'stderr'))
ret_val = tuple(c_void_pp(file_p)[0] for file_p in _stdio_files)
elif hasattr(ctypes.pythonapi, '__stdinp'):
# OSX
_stdio_files = (ctypes.c_void_p.in_dll(ctypes.pythonapi, sym)
for sym in ('__stdinp', '__stdoutp', '__stderrp'))
ret_val = tuple(c_void_pp(file_p)[0] for file_p in _stdio_files)
else:
libc = get_libc()
if hasattr(libc, '__getreent'):
# Cygwin
ret_val = tuple(ctypes.cast(libc.__getreent(), c_void_pp)[1:4])
elif hasattr(libc, '__iob_func'):
# MSVC
ret_val = tuple(ctypes.cast(libc.__iob_func(), c_void_pp)[0:3])
else:
raise NotImplementedError("Unsupported platform, don't know how to "
"find pointers to stdio streams!")
return ret_val
def get_stream_as_node(fp):
return nodes.CoercionNode(nodes.ConstNode(fp, Py_uintptr_t),
void.pointer())
# ______________________________________________________________________
def main ():
_, stdout, _ = get_stdio_streams()
PyObject_Print = ctypes.pythonapi.PyObject_Print
PyObject_Print.restype = ctypes.c_int
PyObject_Print.argtypes = ctypes.py_object, ctypes.c_void_p, ctypes.c_int
PyObject_Print(get_stdio_streams, stdout, 1)
PyObject_Print('\n\n', stdout, 1)
# ______________________________________________________________________
if __name__ == "__main__":
main()
# ______________________________________________________________________
# End of stdio_util.py
########NEW FILE########
__FILENAME__ = ctypes_support
import ctypes.util
import warnings
import numba
# from numba.typesystem.defaults import numba_typesystem as ts
from numba.typesystem import numbatypes as ts
from numba.typesystem.ctypestypes import ctypes_map
import numba.utils
#-------------------------------------------------------------------
# CTypes Types for Type Checking
#-------------------------------------------------------------------
_ctypes_scalar_type = type(ctypes.c_int)
_ctypes_func_type = type(ctypes.CFUNCTYPE(ctypes.c_int))
_ctypes_pointer_type = type(ctypes.POINTER(ctypes.c_int))
_ctypes_array_type = type(ctypes.c_int * 2)
CData = type(ctypes.c_int(10)).__mro__[-2]
#-------------------------------------------------------------------
# Check Whether values are ctypes values
#-------------------------------------------------------------------
def is_ctypes_function(value):
return isinstance(type(value), _ctypes_func_type)
def is_ctypes_value(ctypes_value):
return isinstance(ctypes_value, CData)
def is_ctypes_struct_type(ctypes_type):
return (isinstance(ctypes_type, type) and
issubclass(ctypes_type, ctypes.Structure))
def is_ctypes_type(ctypes_type):
return (
(isinstance(ctypes_type, _ctypes_scalar_type)) or
is_ctypes_struct_type(ctypes_type)
)
def is_ctypes(value):
"Check whether the given value is a ctypes value"
return is_ctypes_value(value) or is_ctypes_type(value)
#-------------------------------------------------------------------
# Type mapping (ctypes -> numba)
#-------------------------------------------------------------------
def from_ctypes_type(ctypes_type):
"""
Convert a ctypes type to a numba type
"""
if numba.utils.hashable(ctypes_type) and ctypes_type in ctypes_map:
return ctypes_map[ctypes_type]
elif ctypes_type is ctypes.c_void_p:
return from_ctypes_type(None).pointer()
elif isinstance(ctypes_type, _ctypes_pointer_type):
return from_ctypes_type(ctypes_type._type_).pointer()
elif isinstance(ctypes_type, _ctypes_array_type):
base_type = from_ctypes_type(ctypes_type._type_)
return ts.carray(base_type, ctypes_type._length_)
elif issubclass(ctypes_type, ctypes.Structure):
fields = [(name, from_ctypes_type(field_type))
for name, field_type in ctypes_type._fields_]
return ts.struct_(fields)
else:
raise NotImplementedError(ctypes_type)
def from_ctypes_value(value):
"""
Convert a ctypes value to a numba type
"""
if is_ctypes_type(value):
# Value is a ctypes type, e.g. c_int
return ts.meta(from_ctypes_type(value))
elif is_ctypes_function(value):
# TODO: move this to from_ctypes_type
if value.argtypes is None:
warnings.warn(
"ctypes function %s has no argument types set" % (value,))
return ts.object_
restype = from_ctypes_type(value.restype)
argtypes = [from_ctypes_type(at) for at in value.argtypes]
signature = ts.function(return_type=restype, args=argtypes)
return signature
elif is_ctypes_type(type(value)) or hasattr(value, '_type_'):
# Value is a ctypes value, e.g. c_int(10)
result_type = from_ctypes_type(type(value))
if result_type.is_pointer:
# Handle ctypes pointers
try:
ctypes.cast(value, ctypes.c_void_p)
except ctypes.ArgumentError:
pass
else:
addr_int = ctypes.cast(value, ctypes.c_void_p).value
result_type = ts.known_pointer(result_type.base_type, addr_int)
return result_type
else:
raise NotImplementedError(value)
########NEW FILE########
__FILENAME__ = slicenodes
# -*- coding: utf-8 -*-
"""
AST nodes for native slicing.
"""
from __future__ import print_function, division, absolute_import
import ast
import numba
from numba import *
from numba import nodes
class SliceDimNode(nodes.ExprNode):
"""
Array is sliced, and this dimension contains an integer index or newaxis.
"""
_fields = ['subslice']
def __init__(self, subslice, src_dim, dst_dim, **kwargs):
super(SliceDimNode, self).__init__(**kwargs)
self.subslice = subslice
self.src_dim = src_dim
self.dst_dim = dst_dim
self.type = subslice.type
# PyArrayAccessor wrapper of llvm fake PyArrayObject value
# set by NativeSliceNode
self.view_accessor = None
self.view_copy_accessor = None
class SliceSliceNode(SliceDimNode):
"""
Array is sliced, and this dimension contains a slice.
"""
_fields = ['start', 'stop', 'step']
def __init__(self, subslice, src_dim, dst_dim, **kwargs):
super(SliceSliceNode, self).__init__(subslice, src_dim, dst_dim,
**kwargs)
self.start = subslice.lower and nodes.CoercionNode(subslice.lower, npy_intp)
self.stop = subslice.upper and nodes.CoercionNode(subslice.upper, npy_intp)
self.step = subslice.step and nodes.CoercionNode(subslice.step, npy_intp)
class BroadcastNode(nodes.ExprNode):
"""
Broadcast a bunch of operands:
- set strides of single-sized dimensions to zero
- find big shape
"""
_fields = ['operands', 'check_errors']
def __init__(self, array_type, operands, **kwargs):
super(BroadcastNode, self).__init__(**kwargs)
self.operands = operands
self.shape_type = numba.carray(npy_intp, array_type.ndim)
self.array_type = array_type
self.type = npy_intp.pointer()
self.broadcast_retvals = {}
self.check_errors = []
for op in operands:
if op.type.is_array:
# TODO: Put the raise code in a separate basic block and jump
return_value = nodes.LLVMValueRefNode(int_, None)
check_error = nodes.CheckErrorNode(
return_value, 0, exc_type=ValueError,
exc_msg="Shape mismatch while broadcasting")
self.broadcast_retvals[op] = return_value
self.check_errors.append(check_error)
def create_slice_dim_node(subslice, *args):
if subslice.type.is_slice:
return SliceSliceNode(subslice, *args)
else:
return SliceDimNode(subslice, *args)
class NativeSliceNode(nodes.ExprNode):
"""
Aggregate of slices in all dimensions.
In nopython context, uses a fake stack-allocated PyArray struct.
In python context, it builds an actual heap-allocated numpy array.
In this case, the following attributes are patched during code generation
time that sets the llvm values:
dst_data, dst_shape, dst_strides
"""
_fields = ['value', 'subslices', 'build_array_node']
def __init__(self, type, value, subslices, nopython, **kwargs):
super(NativeSliceNode, self).__init__(**kwargs)
value = nodes.CloneableNode(value)
self.type = type
self.value = value
self.subslices = subslices
self.shape_type = numba.carray(npy_intp, type.ndim)
self.nopython = nopython
if not nopython:
self.build_array_node = self.build_array()
else:
self.build_array_node = None
def mark_nopython(self):
self.nopython = True
self.build_array_node = None
def build_array(self):
self.dst_data = nodes.LLVMValueRefNode(void.pointer(), None)
self.dst_shape = nodes.LLVMValueRefNode(self.shape_type, None)
self.dst_strides = nodes.LLVMValueRefNode(self.shape_type, None)
array_node = nodes.ArrayNewNode(
self.type, self.dst_data, self.dst_shape, self.dst_strides,
base=self.value.clone)
return nodes.CoercionNode(array_node, self.type)
def rewrite_slice(node, nopython):
"""
Rewrites array slices to its native equivalent without
using the Python API.
node: ast.Subscript with an array type as result
nopython: whether the node is encountered in a nopython context
"""
# assert self.nopython
if isinstance(node.slice, ast.ExtSlice):
dims = node.slice.dims
else:
assert not isinstance(node.slice, ast.Ellipsis)
dims = [node.slice]
slices = []
src_dim = 0
dst_dim = 0
all_slices = True
for subslice in dims:
slices.append(create_slice_dim_node(subslice, src_dim, dst_dim))
if subslice.type.is_slice:
src_dim += 1
dst_dim += 1
elif nodes.is_newaxis(subslice):
all_slices = False
dst_dim += 1
else:
assert subslice.type.is_int
all_slices = False
src_dim += 1
#if all_slices and all(empty(subslice) for subslice in slices):
# return node.value
# print node, node.type
return NativeSliceNode(node.type, node.value, slices, nopython)
class MarkNoPython(ast.NodeVisitor):
"""
Mark array slicing nodes as nopython, which allows them to use
stack-allocated fake arrays.
"""
def visit_NativeSliceNode(self, node):
node.mark_nopython()
self.generic_visit(node)
return node
def mark_nopython(ast):
MarkNoPython().visit(ast)
########NEW FILE########
__FILENAME__ = sliceutils
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from llvm_cbuilder import *
from llvm_cbuilder import shortnames as C
from numba.utility.cbuilder.library import register
from numba.utility.cbuilder.numbacdef import NumbaCDefinition, from_numba
def get_constants(cbuilder):
zero = cbuilder.constant(C.npy_intp, 0)
one = cbuilder.constant(C.npy_intp, 1)
return one, zero
# @register
class SliceArray(CDefinition):
_name_ = "slice"
_retty_ = C.char_p
_argtys_ = [
('data', C.char_p),
('in_shape', C.pointer(C.npy_intp)),
('in_strides', C.pointer(C.npy_intp)),
('out_shape', C.pointer(C.npy_intp)),
('out_strides', C.pointer(C.npy_intp)),
('start', C.npy_intp),
('stop', C.npy_intp),
('step', C.npy_intp),
('src_dim', C.int),
('dst_dim', C.int),
]
def _adjust_given_index(self, extent, negative_step, index, is_start):
# Tranliterate the below code to llvm cbuilder
# TODO: write in numba
# For the start index in start:stop:step, do:
# if have_start:
# if start < 0:
# start += shape
# if start < 0:
# start = 0
# elif start >= shape:
# if negative_step:
# start = shape - 1
# else:
# start = shape
# else:
# if negative_step:
# start = shape - 1
# else:
# start = 0
# For the stop index, do:
# if stop is not None:
# if stop < 0:
# stop += extent
# if stop < 0:
# stop = 0
# elif stop > extent:
# stop = extent
# else:
# if negative_step:
# stop = -1
# else:
# stop = extent
one, zero = get_constants(self)
with self.ifelse(index < zero) as ifelse:
with ifelse.then():
index += extent
with self.ifelse(index < zero) as ifelse_inner:
with ifelse_inner.then():
index.assign(zero)
with ifelse.otherwise():
with self.ifelse(index >= extent) as ifelse:
with ifelse.then():
if is_start:
# index is 'start' index
with self.ifelse(negative_step) as ifelse:
with ifelse.then():
index.assign(extent - one)
with ifelse.otherwise():
index.assign(extent)
else:
# index is 'stop' index. Stop is exclusive, to
# we don't care about the sign of the step
index.assign(extent)
def _set_default_index(self, default1, default2, negative_step, index):
with self.ifelse(negative_step) as ifelse:
with ifelse.then():
index.assign(default1)
with ifelse.otherwise():
index.assign(default2)
def adjust_index(self, extent, negative_step, index, default1, default2,
is_start=False, have_index=True):
if have_index:
self._adjust_given_index(extent, negative_step, index, is_start)
else:
self._set_default_index(default1, default2, negative_step, index)
def body(self, data, in_shape, in_strides, out_shape, out_strides,
start, stop, step, src_dim, dst_dim):
stride = in_strides[src_dim]
extent = in_shape[src_dim]
one, zero = get_constants(self)
if not self.have_step:
step = one
negative_step = step < zero
self.adjust_index(extent, negative_step, start,
default1=extent - one, default2=zero,
is_start=True, have_index=self.have_start)
self.adjust_index(extent, negative_step, stop,
default1=-one, default2=extent,
have_index=self.have_stop)
# self.debug("extent", extent)
# self.debug("negative_step", negative_step.cast(C.npy_intp))
# self.debug("start/stop/step", start, stop, step)
new_extent = self.var(C.npy_intp)
new_extent.assign((stop - start) / step)
with self.ifelse((stop - start) % step != zero) as ifelse:
with ifelse.then():
new_extent += one
with self.ifelse(new_extent < zero) as ifelse:
with ifelse.then():
new_extent.assign(zero)
result = self.var(data.type, name='result')
result.assign(data[start * stride:])
out_shape[dst_dim] = new_extent
# self.debug("new_extent", new_extent)
# self.debug("out stride:", dst_dim, stride * step)
out_strides[dst_dim] = stride * step
self.ret(result)
def specialize(self, context, have_start, have_stop, have_step):
self.context = context
self.have_start = have_start
self.have_stop = have_stop
self.have_step = have_step
self._name_ = "slice_%s_%s_%s" % (have_start, have_stop, have_step)
@register
class IndexAxis(NumbaCDefinition):
_name_ = "index"
_retty_ = C.char_p
_argtys_ = [
('data', C.char_p),
('in_shape', C.pointer(C.npy_intp)),
('in_strides', C.pointer(C.npy_intp)),
('src_dim', C.npy_intp),
('index', C.npy_intp),
]
def body(self, data, in_shape, in_strides, src_dim, index):
result = self.var(data.type, name='result')
# self.debug("indexing...", src_dim, "stride", in_strides[src_dim])
result.assign(data[in_strides[src_dim] * index:])
self.ret(result)
@register
class NewAxis(NumbaCDefinition):
_name_ = "newaxis"
_argtys_ = [
('out_shape', C.pointer(C.npy_intp)),
('out_strides', C.pointer(C.npy_intp)),
('dst_dim', C.int),
]
def body(self, out_shape, out_strides, dst_dim):
one, zero = get_constants(self)
out_shape[dst_dim] = one
out_strides[dst_dim] = zero
# self.debug("newaxis in dimension:", dst_dim)
self.ret()
# TODO: Transliterate the below to a numba function
@register
class Broadcast(NumbaCDefinition):
"""
Transliteration of
@cname('__pyx_memoryview_broadcast')
cdef bint __pyx_broadcast(Py_ssize_t *dst_shape,
Py_ssize_t *input_shape,
Py_ssize_t *strides,
int max_ndim, int ndim,
bint *p_broadcast) nogil except -1:
cdef Py_ssize_t i
cdef int dim_offset = max_ndim - ndim
for i in range(ndim):
src_extent = input_shape[i]
dst_extent = dst_shape[i + dim_offset]
if src_extent == 1:
p_broadcast[0] = True
strides[i] = 0
elif dst_extent == 1:
dst_shape[i + dim_offset] = src_extent
elif src_extent != dst_extent:
__pyx_err_extents(i, dst_shape[i], input_shape[i])
"""
_name_ = "__numba_util_broadcast"
_argtys_ = [
('dst_shape', C.pointer(C.npy_intp)),
('src_shape', C.pointer(C.npy_intp)),
('src_strides', C.pointer(C.npy_intp)),
('max_ndim', C.int),
('ndim', C.int),
]
_retty_ = C.int
def body(self, dst_shape, src_shape, src_strides, max_ndim, ndim):
dim_offset = max_ndim - ndim
def constants(type):
return self.constant(type, 0), self.constant(type, 1)
zero, one = constants(C.npy_intp)
zero_int, one_int = constants(C.int)
with self.for_range(ndim) as (loop, i):
src_extent = src_shape[i]
dst_extent = dst_shape[i + dim_offset]
with self.ifelse(src_extent == one) as ifelse:
with ifelse.then():
src_strides[i] = zero
with ifelse.otherwise():
with self.ifelse(dst_extent == one) as ifelse:
with ifelse.then():
dst_shape[i + dim_offset] = src_extent
with ifelse.otherwise():
with self.ifelse(src_extent != dst_extent) as ifelse:
with ifelse.then():
# Shape mismatch
self.ret(zero_int)
self.ret(one_int)
########NEW FILE########
__FILENAME__ = slicing
# -*- coding: utf-8 -*-
"""
Module that deals with NumPy array slicing.
- normalize ellipses
- recognize newaxes
- track how contiguity is affected (C or Fortran)
"""
from __future__ import print_function, division, absolute_import
import ast
from numba import *
from numba import nodes, typesystem
from numba.symtab import Variable
def unellipsify(node, slices, subscript_node):
"""
Given an array node `node`, process all AST slices and create the
final type:
- process newaxes (None or numpy.newaxis)
- replace Ellipsis with a bunch of ast.Slice objects
- process integer indices
- append any missing slices in trailing dimensions
"""
type = node.variable.type
if not type.is_array:
assert type.is_object
return object_, node
if (len(slices) == 1 and nodes.is_constant_index(slices[0]) and
slices[0].value.pyval is Ellipsis):
# A[...]
return type, node
result = []
seen_ellipsis = False
# Filter out newaxes
newaxes = [newaxis for newaxis in slices if nodes.is_newaxis(newaxis)]
n_indices = len(slices) - len(newaxes)
full_slice = ast.Slice(lower=None, upper=None, step=None)
full_slice.variable = Variable(typesystem.slice_)
ast.copy_location(full_slice, slices[0])
# process ellipses and count integer indices
indices_seen = 0
for slice_node in slices[::-1]:
slice_type = slice_node.variable.type
if slice_type.is_ellipsis:
if seen_ellipsis:
result.append(full_slice)
else:
nslices = type.ndim - n_indices + 1
result.extend([full_slice] * nslices)
seen_ellipsis = True
elif (slice_type.is_slice or slice_type.is_int or
nodes.is_newaxis(slice_node)):
indices_seen += slice_type.is_int
result.append(slice_node)
else:
# TODO: Coerce all object operands to integer indices?
# TODO: (This will break indexing with the Ellipsis object or
# TODO: with slice objects that we couldn't infer)
return object_, nodes.CoercionNode(node, object_)
# Reverse our reversed processed list of slices
result.reverse()
# append any missing slices (e.g. a2d[:]
result_length = len(result) - len(newaxes)
if result_length < type.ndim:
nslices = type.ndim - result_length
result.extend([full_slice] * nslices)
subscript_node.slice = ast.ExtSlice(result)
ast.copy_location(subscript_node.slice, slices[0])
# create the final array type and set it in value.variable
result_dtype = node.variable.type.dtype
result_ndim = node.variable.type.ndim + len(newaxes) - indices_seen
if result_ndim > 0:
result_type = result_dtype[(slice(None),) * result_ndim]
elif result_ndim == 0:
result_type = result_dtype
else:
result_type = object_
return result_type, node
########NEW FILE########
__FILENAME__ = ctypes_values
# Example from Travis Oliphant
import ctypes as ct
import numpy.random as nr
import os.path
from numpy.distutils.misc_util import get_shared_lib_extension
mtrand = ct.CDLL(nr.mtrand.__file__)
# Should we parse this from randomkit.h in the numpy directory?
RK_STATE_LEN = len(nr.get_state()[1])
class rk_state(ct.Structure):
_fields_ = [("key", ct.c_ulong * RK_STATE_LEN),
("pos", ct.c_int),
("has_gauss", ct.c_int),
("gauss", ct.c_double),
("has_binomial", ct.c_int),
("psave", ct.c_double),
("nsave", ct.c_long),
("r", ct.c_double),
("q", ct.c_double),
("fm", ct.c_double),
("m", ct.c_long),
("p1", ct.c_double),
("xm", ct.c_double),
("xl", ct.c_double),
("xr", ct.c_double),
("c", ct.c_double),
("laml", ct.c_double),
("lamr", ct.c_double),
("p2", ct.c_double),
("p3", ct.c_double),
("p4", ct.c_double)]
try:
rk_randomseed = mtrand.rk_randomseed
rk_seed = mtrand.rk_seed
rk_gamma = mtrand.rk_gamma
rk_normal = mtrand.rk_normal
except AttributeError as e:
raise ImportError(str(e))
rk_randomseed.argtypes = [ct.POINTER(rk_state)]
rk_seed.restype = None
rk_seed.argtypes = [ct.c_long, ct.POINTER(rk_state)]
state = rk_state()
state_p = ct.pointer(state)
state_vp = ct.cast(state_p, ct.c_void_p)
rk_gamma.restype = ct.c_double
rk_gamma.argtypes = [ct.POINTER(rk_state), ct.c_double, ct.c_double]
rk_normal.restype = ct.c_double
rk_normal.argtypes = [ct.POINTER(rk_state), ct.c_double, ct.c_double]
def init():
if rk_randomseed(state_p) != 0:
raise ValueError("Cannot initialize the random number generator.")
init()
########NEW FILE########
__FILENAME__ = test_ctypes
"""
Test support for ctypes. See also numba.tests.foreign_call.test_ctypes_call.
"""
import ctypes
import numba as nb
from numba import *
try:
from numba.tests.support import ctypes_values
except ImportError:
ctypes_values = None
#-------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------
from_python = nb.typeof
def get_cast_type(type):
assert type.is_cast
return type.dst_type
def assert_signature(ctypes_func, expected=None):
sig = from_python(ctypes_func)
assert sig.is_pointer_to_function
if expected:
assert sig.signature == expected, (sig.signature, expected)
#-------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------
if ctypes_values:
rk_state_t = get_cast_type(from_python(ctypes_values.rk_state))
def ctypes_func_values():
int_or_long = long_ if ctypes.c_int == ctypes.c_long else int_
long_or_longlong = (longlong if ctypes.c_long == ctypes.c_longlong
else long_)
signature = int_or_long(rk_state_t.pointer())
assert_signature(ctypes_values.rk_randomseed, signature)
signature = void(long_or_longlong, rk_state_t.pointer())
assert_signature(ctypes_values.rk_seed, signature)
signature = double(rk_state_t.pointer(), double, double)
assert_signature(ctypes_values.rk_gamma, signature)
def ctypes_data_values():
assert from_python(ctypes_values.state) == rk_state_t
assert from_python(ctypes_values.state_p) == rk_state_t.pointer()
assert from_python(ctypes_values.state_vp) == void.pointer()
assert from_python(ctypes.c_void_p(10)) == void.pointer()
ctypes_double_p = ctypes.POINTER(ctypes.c_double)(ctypes.c_double(10))
assert from_python(ctypes_double_p) == double.pointer()
def ctypes_c_void_p():
savethread = ctypes.pythonapi.PyEval_SaveThread
savethread.argtypes = []
savethread.restype = ctypes.c_void_p
restorethread = ctypes.pythonapi.PyEval_RestoreThread
restorethread.argtypes = [ctypes.c_void_p]
restorethread.restype = None
@autojit(nopython=True)
def test_gil():
threadstate = savethread()
restorethread(threadstate)
test_gil()
def test():
if ctypes_values is not None:
ctypes_func_values()
ctypes_data_values()
ctypes_c_void_p()
if __name__ == '__main__':
test()
########NEW FILE########
__FILENAME__ = test_ctypes_gibbs
# Example from Travis Oliphant
import math
import numpy as np
from numba import jit, autojit
try:
from numba.tests.support import ctypes_values as rng
except ImportError:
rng = None
#@jit('double[:,:](int64, int64)')
@autojit
def gibbs(rk_seed, N, thin):
rk_seed(0, rng.state_p)
x = 0
y = 0
samples = np.empty((N,2))
for i in range(N):
for j in range(thin):
#x = np.random.gamma(3,1.0/(y**2+4))
x = rng.rk_gamma(rng.state_p, 3.0, 1.0/(y**2+4))
#y = np.random.normal(1.0/(x+1), 1.0/math.sqrt(2+2*x))
y = rng.rk_normal(rng.state_p, 1.0/(x+1), 1.0/math.sqrt(2+2*x))
samples[i, 0] = x
samples[i, 1] = y
return samples
def test():
if rng is not None:
assert np.allclose(gibbs(rng.rk_seed, 10, 10),
gibbs.py_func(rng.rk_seed, 10, 10))
if __name__ == '__main__':
test()
########NEW FILE########
__FILENAME__ = test_random_gibbs
# Example by Travis Oliphant
try:
from numba import jit, random
except ImportError:
pass
else:
import numpy as np
import math
state = random.state_p
@jit('f8[:,:](int64, int32)')
def gibbs(N, thin):
x = 0
y = 0
samples = np.empty((N,2))
for i in range(N):
for j in range(thin):
x = random.rk_gamma(state, 3, 1.0/(y**2+4))
y = random.rk_normal(state, 1.0/(x+1), 1.0/math.sqrt(2+2*x))
samples[i, 0] = x
samples[i, 1] = y
return samples
########NEW FILE########
__FILENAME__ = symtab
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import utils
try:
from collections import OrderedDict
except ImportError:
from .odict import OrderedDict
import llvm.core
class Variable(object):
"""
Variables placed on the stack. They allow an indirection
so, that when used in an operation, the correct LLVM type can be inserted.
Attributes:
type: the Numba type (see numba.typesystem)
is_local/is_global/is_constant
name: name of local or global
lvalue: LLVM Value
state: state passed from one stage to the next
"""
_type = None
warn_unused = True
is_global = False
is_builtin = False
def __init__(self, type, is_constant=False, is_local=False,
is_global=False, is_builtin=False,
name=None, lvalue=None, constant_value=None,
promotable_type=True, is_arg=False):
self.type = type
self.name = name
self.renameable = not is_constant
self.renamed_name = None
self.is_constant = is_constant
self.constant_value = constant_value
self.is_global = is_global
self.is_builtin = is_builtin
self.lvalue = lvalue
self.promotable_type = promotable_type
self.deleted = False
self.uninitialized = False
self.uninitialized_value = None
self.killing_def = None # The definition that kills us, or None
self.killed_def = None # The definition that we killed
self.parent_var = None
self.block = None
self.is_phi = False
self.is_local = is_local
self.is_arg = is_arg
self.is_cellvar = False
self.is_freevar = False
self.need_arg_copy = True
# The control_flow.NameAssignment that defines this
# variable (or PhiNode if a phi)
self.name_assignment = None
self.cf_assignments = []
self.cf_references = [] # def-use chain
# position of first definition
self.lineno = -1
self.col_offset = -1
# Cached value for the deferred_type attribute
self._deferred_type = None
self.init_array_flags()
def init_array_flags(self):
# For arrays. These variables indicate whether to preload data, shape
# and strides. These are set during late specialization in
# visit_Subscript.
self.ndarray = None
def perform_assignment(self, rhs_type):
"""
Called when an assignment is made to this variable.
"""
self.type = rhs_type
@classmethod
def make_shared_property(cls, name):
def _get(self):
if self.parent_var:
return getattr(self.parent_var, name)
return getattr(self, '_' + name)
def _set(self, value):
if self.parent_var:
setattr(self.parent_var, name, value)
else:
setattr(self, '_' + name, value)
setattr(cls, '_' + name, None)
setattr(cls, name, property(_get, _set))
@property
def deferred_type(self):
if self._deferred_type:
return self._deferred_type
from numba import typesystem
self._deferred_type = typesystem.DeferredType(self)
return self._deferred_type
def _type_get(self):
return self._type
def _type_set(self, type):
assert not (self.type and type is None)
from numba import typesystem
if type is None:
print('Setting None type!')
elif not isinstance(type, typesystem.Type):
print(type)
self._type = type
# type = property(_type_get, _type_set)
@classmethod
def from_variable(cls, variable, **kwds):
result = cls(variable.type)
vars(result).update(dict(kwds, **vars(variable)))
return result
# @property
# def is_global(self):
# return self.type and self.type.is_global
@property
def ltype(self):
"""
The LLVM type for the type of this variable or LLVM Value.
"""
if self.lvalue is not None:
return self.lvalue.type
return self.type.to_llvm(utils.context)
@property
def ctypes_type(self):
"""
The ctypes type for the type of this variable.
"""
@property
def unmangled_name(self):
if not self.renamed_name:
return self.name or "<unnamed>"
name = self.renamed_name.lstrip("__numba_renamed_")
counter, sep, var_name = name.partition('_')
name = '%s_%s' % (var_name, counter)
return name
def __deepcopy__(self, memo):
return self
def __repr__(self):
args = []
if self.is_local:
args.append("is_local=True")
if self.is_global:
args.append("is_global=True")
if self.is_constant:
args.append("is_constant=True")
if self.is_freevar:
args.append("is_freevar=True")
if self.is_cellvar:
args.append("is_cellvar=True")
if self.is_phi:
args.append("is_phi=True")
if self.block:
args.append("block=%d" % self.block.id)
if self.lvalue:
args.append("llvm=%s" % (self.lvalue,))
if args:
extra_info = " " + ", ".join(args)
else:
extra_info = ""
if self.name:
if self.renamed_name:
name = self.unmangled_name
else:
name = self.name
return "<Variable(name=%r, type=%s%s)>" % (name, self.type,
extra_info)
else:
return "<Variable(type=%s%s)>" % (self.type, extra_info)
Variable.make_shared_property('is_cellvar')
Variable.make_shared_property('is_freevar')
Variable.make_shared_property('need_arg_copy')
class Symtab(object):
def __init__(self, symtab_dict=None, parent=None):
self.symtab = OrderedDict(symtab_dict or {})
self.parent = parent
self.local_counters = {}
# { (var_name, var_type) : PromotionNode }
self.promotions = {}
if parent:
self.counters = parent.counters
self.local_counters.update(parent.local_counters)
else:
self.counters = None
def lookup(self, name):
result = self.symtab.get(name, None)
if result is None and self.parent is not None:
result = self.parent.lookup(name)
return result
def lookup_most_recent(self, name):
"""
Look up the most recent definition of a variable in this block.
"""
if name in self.local_counters:
last_count = self.local_counters[name]
else:
assert self.parent
return self.parent.lookup_most_recent(name)
return self.lookup_renamed(name, last_count)
def lookup_promotion(self, var_name, dst_type):
if (var_name, dst_type) in self.promotions:
return self.promotions[var_name, dst_type]
assert self.parent
return self.parent.lookup_promotion(var_name, dst_type)
def renamed_name(self, name, count):
return '__numba_renamed_%d_%s' % (count, name)
def lookup_renamed(self, name, version):
renamed_name = self.renamed_name(name, version)
return self[renamed_name]
def rename(self, var, block, kills_previous_def=True):
"""
Create a new renamed variable linked to the given variable, which
becomes its parent.
"""
new_var = Variable.from_variable(var)
new_var.block = block
new_var.cf_references = []
self.counters[var.name] += 1
if self.counters[var.name] and kills_previous_def:
previous_var = self.lookup_most_recent(var.name)
previous_var.killing_def = new_var
new_var.killed_def = previous_var
self.local_counters[var.name] = self.counters[var.name]
new_var.renamed_name = self.renamed_name(var.name,
self.counters[var.name])
new_var.parent_var = var
self.symtab[new_var.renamed_name] = new_var
# print "renaming %s to %s" % (var, new_var)
return new_var
def __repr__(self):
return "symtab(%s)" % self.symtab
def __getitem__(self, name):
result = self.lookup(name)
if result is None:
raise KeyError(name)
return result
def __setitem__(self, name, variable):
self.symtab[name] = variable
def __iter__(self):
return iter(self.symtab)
def __getattr__(self, attr):
return getattr(self.symtab, attr)
########NEW FILE########
__FILENAME__ = templating
# -*- coding: utf-8 -*-
"""
String templating support. The following syntax is supported:
1) Arbitrary Python code
2) Placeholders for AST sub-tree substitutions:
{{some_node}}
3) Typed, untyped and code template variables:
$some_var
Typed and untyped variables are just artificial program
(Python) variables.
- Typed variables end up in the jit/autojit locals dict
- Untyped variables are renameable/ssa variables
- Code variables simply expand to arbitrary code
"""
from __future__ import print_function, division, absolute_import
import re
import ast
import string
import textwrap
import numba.decorators, numba.pipeline, numba.environment, numba.functions
from numba import *
from numba import nodes, symtab as symtab_module
from numba.symtab import Variable
import logging
logger = logging.getLogger(__name__)
prefix = '__numba_template_'
class TempName(object):
_count = 0
def count(self):
self._count += 1
return self._count
def temp_name(self, name):
return '__numba_temp%d_%s' % (self.count(), name)
_temp_name = TempName()
def temp_name(name=''):
return _temp_name.temp_name(name)
class TemplateVariable(Variable):
"""
A fake variable used in a template. The 'code' is substituted using string
interpolation before the source is parsed. If the type is set, the symbol
table is updated. The default 'code' is a temporary variable name.
"""
def __init__(self, type, name, temp_name=None, code=False, **kwargs):
super(TemplateVariable, self).__init__(type, name=name, **kwargs)
self.temp_name = temp_name
self.code = code
self.sep = "\n"
if not temp_name:
assert code
self.codes = []
def __str__(self):
if self.temp_name:
return self.temp_name
return self.sep.join(self.codes) or "pass"
@property
def node(self):
node = ast.Name(self.temp_name, ast.Load())
node.type = self.type
node.variable = self
return node
class TemplateContext(object):
"""
The context in which a template is evaluated. Allows defining
template variables and a mechanism to merge those variables back
into the function being compiled.
"""
def __init__(self, context, template, env=None):
# FIXME: Replace context with env.
self.context = context
self.templ = template
self.variables = []
self.nodes = {}
self.env = env
self.substituted_template = None
def temp_var(self, name, type=None, code=False):
"Create and add a new template $variable"
var = TemplateVariable(name=name, type=type, is_local=True,
temp_name=not code and temp_name(name),
code=code)
self.variables.append(var)
return var
def add_variable(self, var):
"Add an external template $variable to this context"
self.variables.append(var)
def code_var(self, name):
"Create a template $variable that expands to generated code statements"
return self.temp_var(name, code=True)
def temp_vars(self, *names):
"Create a number of template $variables"
for name in names:
yield self.temp_var(name)
def code_vars(self, *names):
"Create a number of code $variables"
for name in names:
yield self.code_var(name)
def string_substitute(self, s):
if self.variables:
d = dict((var.name, str(var)) for var in self.variables)
s = string.Template(s).substitute(d)
return s
def get_vars_symtab(self):
return dict((var.temp_name, Variable(name=var.temp_name,
is_local=True, type=var.type))
for var in self.variables if not var.code)
def update_locals(self, locals_dict):
"""
Update an external jit/autojit locals dict with our
template $variables
"""
for var in self.variables:
if not var.code and var.type is not None:
assert var.name not in locals_dict
locals_dict[var.temp_name] = var.type
def template(self, substitutions):
"""
Run a template and perform the given {{substitutions}}
"""
s = textwrap.dedent(self.templ)
s = self.string_substitute(s)
# print s
self.substituted_template = s
# template_variables = dict((var.name, var) for var in self.variables)
tree = template(s, substitutions, self.get_vars_symtab())
return tree
def template_type_infer(self, substitutions, **kwargs):
tree = self.template(substitutions)
symtab = kwargs.get('symtab', None)
if self.variables or symtab:
vars = self.get_vars_symtab()
symtab = dict(symtab or {}, **vars)
kwargs['symtab'] = symtab_module.Symtab(symtab)
return dummy_type_infer(self.context, tree, env=self.env, **kwargs)
def dummy_type_infer(context, tree, order=['type_infer', 'type_set'], env=None,
**kwargs):
assert env is not None
func_obj = kwargs.pop('func')
func_env = env.translation.crnt.inherit(func=func_obj, ast=tree,
func_signature=void(),
**kwargs)
numba.pipeline.run_env(
env, func_env, pipeline_name='dummy_type_infer',
function_level=1, locals=func_env.locals, **kwargs)
symtab = func_env.symtab
ast = func_env.ast
return symtab, ast
def template(s, substitutions, template_variables=None):
s = textwrap.dedent(s)
replaced = [0]
def replace(ident):
replaced[0] += 1
return '%s%s' % (prefix, ident.group(1))
source = re.sub('{{(.*?)}}', replace, s)
tree = ast.parse(source)
if replaced:
tree = Interpolate(substitutions, template_variables).visit(tree)
return ast.Suite(body=tree.body)
def template_simple(s, **substitutions):
return template(s, substitutions)
class Interpolate(ast.NodeTransformer):
"""
Interpolate template substitutions.
substitutions: { var_name : substitution_node }
This is for {{var_name}} template syntax. This allows
substituting arbitrary asts is specific places.
template_variables: [TemplateVariable(...)]
This is for $var syntax. This allows the introductions of typed
or untyped variables, as well as code variables.
"""
def __init__(self, substitutions, template_variables):
self.substitutions = substitutions
self.template_variables = template_variables or {}
self.make_substitutions_clonenodes()
def make_substitutions_clonenodes(self):
for name, replacement in self.substitutions.iteritems():
is_clone = isinstance(replacement, (nodes.CloneableNode,
nodes.CloneNode,
ast.Name, # atomic
nodes.TempLoadNode))
have_type = hasattr(replacement, 'type')
if not is_clone and have_type:
self.substitutions[name] = nodes.CloneableNode(replacement)
def visit_Name(self, node):
if node.id.startswith(prefix):
name = node.id[len(prefix):]
node = self.substitutions[name]
if isinstance(node, nodes.CloneableNode):
self.substitutions[name] = node.clone
elif node.id in self.template_variables:
node.variable = self.template_variables[node.id]
node = nodes.MaybeUnusedNode(node)
return node
########NEW FILE########
__FILENAME__ = doctest_support
"""
Adpated from http://wiki.cython.org/FAQ#HowcanIrundoctestsinCythoncode.28pyxfiles.29.3F
Use the testmod function from test_support, don't use this directly.
====================================================================
Cython-compatible wrapper for doctest.testmod().
Usage example, assuming a Cython module mymod.pyx is compiled.
This is run from the command line, passing a command to Python:
python -c "import cydoctest, mymod; cydoctest.testmod(mymod)"
(This still won't let a Cython module run its own doctests
when called with "python mymod.py", but it's pretty close.
Further options can be passed to testmod() as desired, e.g.
verbose=True.)
"""
import sys
import unittest
import doctest
import inspect
import numba.decorators
from numba import numbawrapper
doctest_options = doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE
def from_module(module, object):
"""
Return true if the given object is defined in the given module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def fix_module_doctest(module):
"""
Extract docstrings from cython functions, that would be skipped by doctest
otherwise.
"""
module.__test__ = {}
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, numbawrapper.NumbaWrapper) and
from_module(module, value.py_func) and value.py_func.__doc__):
module.__test__[name] = value.py_func.__doc__
elif (inspect.isbuiltin(value) and isinstance(value.__doc__, str) and
from_module(module, value)):
module.__test__[name] = value.__doc__
class MyDocTestFinder(doctest.DocTestFinder):
def find(self, obj, **kws):
res = doctest.DocTestFinder.find(self, obj, **kws)
return res
def testmod(m=None, run_doctests=True, optionflags=doctest_options,
verbosity=2):
"""
Fix a Cython module's doctests, then call doctest.testmod()
"""
fix_module_doctest(m)
if run_doctests:
finder = MyDocTestFinder(exclude_empty=False)
suite = doctest.DocTestSuite(m, test_finder=finder,
optionflags=optionflags)
result = unittest.TextTestRunner(verbosity=verbosity).run(suite)
if not result.wasSuccessful():
raise Exception("Doctests failed: %s" % result)
########NEW FILE########
__FILENAME__ = runner
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import sys
from itertools import ifilter
from functools import partial
import subprocess
from numba import PY3
import numba
root = os.path.dirname(os.path.abspath(numba.__file__))
# ______________________________________________________________________
# Test filtering
EXCLUDE_TEST_PACKAGES = [
"numba.minivect",
"numba.pyextensibletype",
"numba.tests.broken_issues",
]
if PY3:
EXCLUDE_TEST_PACKAGES.append("numba.tests.py2x")
def make_path(root, predicate):
"Call the predicate with a file path (e.g. numba/test/foo.py)"
return lambda item: predicate(os.path.join(root, item))
def qualify_path(root, predicate):
"Call the predicate with a dotted name (e.g. numba.tests.foo)"
return make_path(root, lambda item: predicate(qualify_test_name(item)))
class Filter(object):
def __init__(self, matcher=None):
self.matcher = matcher
def filter(self, root, dirs, files):
matcher = make_path(root, self.matcher)
return ifilter(matcher, dirs), ifilter(matcher, files)
class PackageFilter(Filter):
def filter(self, root, dirs, files):
matcher = qualify_path(root, self.matcher)
return ifilter(matcher, dirs), files
class ModuleFilter(Filter):
def filter(self, root, dirs, files):
matcher = qualify_path(root, self.matcher)
return dirs, ifilter(matcher, files)
class FileFilter(Filter):
def filter(self, root, dirs, files):
return dirs, [fn for fn in files if fn.endswith(".py")]
# ______________________________________________________________________
# Test discovery
class Walker(object):
def __init__(self, root, filters):
self.root = root
self.filters = filters
def walk(self):
for root, dirs, files in os.walk(self.root):
dirs[:], files[:] = apply_filters(root, dirs, files, self.filters)
yield ([os.path.join(root, dir) for dir in dirs],
[os.path.join(root, fn) for fn in files])
def apply_filters(root, dirs, files, filters):
for filter in filters:
dirs, files = list(dirs), list(files)
# print(filter, list(dirs), list(files))
dirs, files = filter.filter(root, dirs, files)
return dirs, files
def qualify_test_name(root):
root, ext = os.path.splitext(root)
qname = root.replace("/", ".").replace("\\", ".").replace(os.sep, ".") + "."
offset = qname.rindex('numba.')
return qname[offset:].rstrip(".")
def match(items, modname):
return any(item in modname for item in items)
# ______________________________________________________________________
# Signal handling
def map_returncode_to_message(retcode):
if retcode < 0:
retcode = -retcode
return signal_to_name.get(retcode, "Signal %d" % retcode)
return ""
try:
import signal
except ImportError:
signal_to_name = {}
else:
signal_to_name = dict((signal_code, signal_name)
for signal_name, signal_code in vars(signal).items()
if signal_name.startswith("SIG"))
# ______________________________________________________________________
# Test running
def test(whitelist=None, blacklist=None, print_failures_only=False, loop=False):
while True:
exit_status = _test(whitelist, blacklist, print_failures_only)
if exit_status != 0 or not loop:
return exit_status
def _test(whitelist, blacklist, print_failures_only):
# FIXME
# temporarily disable pycc test on win32
if sys.platform.startswith('win32'):
blacklist = ['test_pycc_tresult']
# Make some test filters
filters = [
PackageFilter(lambda pkg: not any(
pkg.startswith(p) for p in EXCLUDE_TEST_PACKAGES)),
PackageFilter(lambda pkg: not pkg.endswith(".__pycache__")),
ModuleFilter(lambda modname: modname.split('.')[-1].startswith("test_")),
FileFilter(),
]
if whitelist:
filters.append(ModuleFilter(partial(match, whitelist)))
if blacklist:
filters.append(ModuleFilter(lambda item: not match(blacklist, item)))
# Run tests
runner = TestRunner(print_failures_only)
run_tests(runner, filters)
sys.stdout.write("ran test files: failed: (%d/%d)\n" % (runner.failed,
runner.ran))
return 0 if runner.failed == 0 else 1
def run_tests(test_runner, filters, root=root):
"""
Run tests:
- Find tests in packages called 'tests'
- Run any test files under a 'tests' package or a subpackage
"""
testpkg_walker = Walker(root, filters)
print("Running tests in %s" % os.path.join(root, "numba"))
for testpkgs, _ in testpkg_walker.walk():
for testpkg in testpkgs:
if os.path.basename(testpkg) == "tests":
# print("testdir:", testpkg)
test_walker = Walker(testpkg, filters)
for _, testfiles in test_walker.walk():
for testfile in testfiles:
# print("testfile:", testfile)
modname = qualify_test_name(testfile)
test_runner.run(modname)
class TestRunner(object):
"""
Test runner used by runtests.py
"""
def __init__(self, print_failures_only):
self.ran = 0
self.failed = 0
self.print_failures_only = print_failures_only
def run(self, modname):
self.ran += 1
if not self.print_failures_only:
sys.stdout.write("%-70s" % (modname,))
process = subprocess.Popen([sys.executable, '-m', modname],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
if process.returncode == 0:
if not self.print_failures_only:
sys.stdout.write(" SUCCESS\n")
else:
if self.print_failures_only:
sys.stdout.write("%-69s" % (modname,))
sys.stdout.write(" FAILED:\n%79s\n" % map_returncode_to_message(
process.returncode))
if PY3:
out = str(out, encoding='UTF-8')
err = str(err, encoding='UTF-8')
sys.stdout.write(out)
sys.stdout.write(err)
sys.stdout.write("-" * 80)
sys.stdout.write('\n')
self.failed += 1
# ______________________________________________________________________
# Nose test running
def nose_run(module=None):
import nose.config
import __main__
#os.environ["NOSE_EXCLUDE"] = "(test_all|test_all_noskip|.*compile_with_pycc.*|bytecode)"
#os.environ["NOSE_VERBOSE"] = "4"
result = nose.main()
return len(result.errors), len(result.failures)
########NEW FILE########
__FILENAME__ = test_parametrize
from numba.testing.test_support import parametrize
@parametrize('foo', 'bar')
def func(arg):
return arg
assert func_testcase.__name__ == 'func'
assert hasattr(func_testcase, 'test_func_0')
assert hasattr(func_testcase, 'test_func_1')
assert func_testcase('test_func_0').test_func_0() == 'foo'
assert func_testcase('test_func_1').test_func_1() == 'bar'
########NEW FILE########
__FILENAME__ = test_user_doctest
import doctest
import numba
@numba.autojit
def func(value):
"""
>>> func(10.0)
10.0
"""
return value
numba.testmod()
@numba.autojit
def func2(value):
"""
>>> raise ValueError("I am a message")
Traceback (most recent call last):
...
ValueError: I am a ...
"""
numba.testmod(verbosity=2, optionflags=doctest.ELLIPSIS)
########NEW FILE########
__FILENAME__ = test_support
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import itertools
import os
import sys
import types
import unittest
try:
from nose.tools import nottest
except ImportError:
def nottest(fn):
def _nottest(*args, **kws):
raise Exception("nose not available")
return _nottest
import numba
from numba import *
from numba.testing import doctest_support
if numba.PY3:
import io
else:
import StringIO as io
jit_ = jit
if numba.PY3:
import re
def rewrite_doc(doc):
doc = re.sub(r'(\d+)L', r'\1', doc)
doc = re.sub(r'([^\.])NumbaError', r'\1numba.error.NumbaError', doc)
doc = re.sub(r'([^\.])InvalidTemplateError', r'\1numba.error.InvalidTemplateError', doc)
doc = re.sub(r'([^\.])UnpromotableTypeError', r'\1numba.error.UnpromotableTypeError', doc)
return doc
def autojit_py3doc(*args, **kwargs):
if kwargs:
def _inner(fun):
fun.__doc__ = rewrite_doc(fun.__doc__)
return autojit(*args, **kwargs)(fun)
return _inner
else:
fun = args[0]
fun.__doc__ = rewrite_doc(fun.__doc__)
return autojit(fun)
else:
def rewrite_doc(doc):
return doc
autojit_py3doc = autojit
class ASTTestCase(unittest.TestCase):
jit = staticmethod(lambda *args, **kw: jit_(*args, **dict(kw, backend='ast')))
backend = 'ast'
autojit = staticmethod(autojit(backend=backend))
#------------------------------------------------------------------------
# Support for unittest in < py2.7
#------------------------------------------------------------------------
have_unit_skip = sys.version_info[:2] > (2, 6)
if have_unit_skip:
from unittest import SkipTest
else:
class SkipTest(Exception):
"Skip a test in < py27"
@nottest
def skip_test(reason):
if have_unit_skip:
raise SkipTest(reason)
else:
print("Skipping: " + reason, file=sys.stderr)
def skip_if(should_skip, message):
def decorator(func):
def wrapper(*args, **kwargs):
if should_skip:
skip_test(message)
else:
return func(*args, **kwargs)
return wrapper
return decorator
def skip_unless(should_skip, message):
return skip_if(not should_skip, message)
def skip(message):
return skip_if(True, message)
def checkSkipFlag(reason):
def _checkSkipFlag(fn):
@nottest
def _checkSkipWrapper(self, *args, **kws):
skip_test(reason)
return _checkSkipWrapper
return _checkSkipFlag
#------------------------------------------------------------------------
# Test running
#------------------------------------------------------------------------
def main():
import sys, logging
if '-d' in sys.argv:
logging.getLogger().setLevel(logging.DEBUG)
sys.argv.remove('-d')
if '-D' in sys.argv:
logging.getLogger().setLevel(logging.NOTSET)
sys.argv.remove('-D')
unittest.main()
class StdoutReplacer(object):
def __enter__(self, *args):
self.out = sys.stdout
sys.stdout = io.StringIO()
return sys.stdout
def __exit__(self, *args):
sys.stdout = self.out
def fix_module_doctest_py3(module):
"""
Rewrite docs for python 3
"""
if not numba.PY3:
return
if module.__doc__:
try:
module.__doc__ = rewrite_doc(module.__doc__)
except:
pass
for name in dir(module):
if name.startswith('__'):
continue
value = getattr(module, name)
try:
value.__doc__ = rewrite_doc(value.__doc__)
except:
pass
def testmod(module=None, run=True, optionflags=None,):
"""
Tests a doctest modules with numba functions. When run in nosetests, only
populates module.__test__, when run as main, runs the doctests.
"""
if module is None:
mod_globals = sys._getframe(1).f_globals
modname = mod_globals['__name__']
module = __import__(modname)
# module = types.ModuleType(modname)
# vars(module).update(mod_globals)
fix_module_doctest_py3(module)
doctest_support.testmod(module, run_doctests=run)
#------------------------------------------------------------------------
# Test Parametrization
#------------------------------------------------------------------------
def parametrize(*parameters, **named_parameters):
"""
@parametrize('foo', 'bar')
def test_func(foo_or_bar):
print foo_or_bar # prints 'foo' or 'bar'
or
@parametrize(x=['foo', 'bar'], y=['baz', 'quux'])
def test_func(x, y):
print x, y # prints all combinations
Generates a unittest TestCase in the function's global scope named
'test_func_testcase' with parametrized test methods.
':return: The original function
"""
if parameters and named_parameters:
raise TypeError('Cannot specify both parameters and named_parameters')
def decorator(func):
class TestCase(unittest.TestCase):
pass
TestCase.__name__ = func.__name__
TestCase.__module__ = func.__module__
names = named_parameters.keys()
values = parameters or itertools.product(*named_parameters.values())
for i, parameter in enumerate(values):
name = 'test_%s_%d' % (func.__name__, i)
if names:
def testfunc(self, parameter=parameter):
return func(**dict(zip(names, parameter)))
else:
def testfunc(self, parameter=parameter):
return func(parameter)
testfunc.__name__ = name
if func.__doc__:
testfunc.__doc__ = func.__doc__.replace(func.__name__, name)
setattr(TestCase, name, testfunc)
func.__globals__[func.__name__ + '_testcase'] = TestCase
return func
return decorator
########NEW FILE########
__FILENAME__ = user_support
# -*- coding: utf-8 -*-
"""
Doctest support exposed to numba users.
"""
from __future__ import print_function, division, absolute_import
import sys
import doctest
from numba.testing import doctest_support
doctest_options = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
def testmod(module=None, run=True, optionflags=0, verbosity=2):
"""
Tests a doctest modules with numba functions. When run in nosetests, only
populates module.__test__, when run as main, runs the doctests.
module: the module to run the doctests in
run: whether to run the doctests or just build a __test__ dict
verbosity: verbosity level passed to unittest.TextTestRunner
The defualt is 2
optionflags: doctest options (e.g. doctest.ELLIPSIS)
"""
if module is None:
mod_globals = sys._getframe(1).f_globals
modname = mod_globals['__name__']
module = __import__(modname)
doctest_support.testmod(
module,
run_doctests=run,
optionflags=optionflags,
verbosity=verbosity,
)
########NEW FILE########
__FILENAME__ = array_global
from numba import *
import numpy as np
A = np.arange(10, dtype=np.float32)
X = np.empty(10, dtype=np.float32)
@jit(void(f4[:]))
def read_global(Y):
Y[1] = A[1]
read_global(X)
assert X[1] == 1
@jit(void(f4[:]))
def write_global(Y):
A[2] = Y[2]
X[2] = 14
write_global(X)
assert A[2] == 14
########NEW FILE########
__FILENAME__ = test_array_expressions
from numba import *
import numpy as np
f = float_
@autojit(backend='ast')
def array_expr(a, b, c):
return a + b * c
@autojit(backend='ast')
def func(a):
return a * 2.0
@autojit(backend='ast')
def array_expr2(a, b, c):
return a + b + func(c)
@autojit(backend='ast')
def array_expr3(a, b, c):
# a[1:, 1:] = a[1:, 1:] + b[1:, :-1] * c[1:, :-1]
a[...] = a + b * c
def test_array_expressions():
a = np.arange(120).reshape(10, 12).astype(np.float32)
assert np.all(array_expr(a, a, a) == array_expr.py_func(a, a, a))
assert np.all(array_expr2(a, a, a) == array_expr2.py_func(a, a, a))
result, numpy_result = a.copy(), a.copy()
array_expr3(result, result, result)
array_expr3.py_func(numpy_result, numpy_result, numpy_result)
assert np.all(result == numpy_result)
#
### test matrix multiplication w/ array expressions
#
@autojit(backend='ast')
def array_expr_matmul(A, B):
m, n = A.shape
n, p = B.shape
C = np.empty((m, p), dtype=A.dtype)
for i in range(m):
for j in range(p):
C[i, j] = (A[i, :] * B[:, j]).sum()
return C
def test_matmul():
a = np.arange(120).reshape(10, 12).astype(np.float32)
b = a.T
result = array_expr_matmul(a, b)
assert np.all(result == np.dot(a, b))
@autojit
def vectorized_math(a):
a[...] = np.cos(a) * np.sin(a)
return a
def test_vectorized_math():
a = vectorized_math(np.arange(100, dtype=np.float64))
b = vectorized_math.py_func(np.arange(100, dtype=np.float64))
assert np.allclose(a, b)
@autojit
def diffuse(iter_num):
u = np.zeros((Lx, Ly), dtype=np.float64)
temp_u = np.zeros_like(u)
temp_u[Lx / 2, Ly / 2] = 1000.0
for i in range(iter_num):
u[1:-1, 1:-1] = mu * (temp_u[2:, 1:-1] + temp_u[:-2, 1:-1] +
temp_u[1:-1, 2:] + temp_u[1:-1, :-2] -
4 * temp_u[1:-1, 1:-1])
temp = u
u = temp_u
temp_u = temp
return u
mu = 0.1
Lx, Ly = 101, 101
def test_diffusion():
assert np.allclose(diffuse(100), diffuse.py_func(100))
@autojit
def array_assign_scalar(A, scalar):
A[...] = scalar
def test_assign_scalar():
A = np.empty(10, dtype=np.float32)
array_assign_scalar(A, 10.0)
assert np.all(A == 10.0)
if __name__ == '__main__':
tests = [name for name in globals().keys() if name.startswith('test_')]
for t in tests:
globals()[t]()
########NEW FILE########
__FILENAME__ = test_array_math
from numba import *
import numpy as np
def get_functions():
def sqrt(a):
return 2.7 + np.sqrt(a) + 1.6
def log(a):
return 2.7 + np.log(a) + 1.6
def log10(a):
return 2.7 + np.log10(a) + 1.6
def log1p(a):
return 2.7 + np.log1p(a) + 1.6
def log2(a):
return 2.7 + np.log2(a) + 1.6
def exp(a):
return 2.7 + np.exp(a) + 1.6
# def expm1(a):
# return 2.7 + np.expm1(a) + 1.6
def sin(a):
return 2.7 + np.sin(a) + 1.6
def cos(a):
return 2.7 + np.cos(a) + 1.6
def absolute(a):
return 2.7 + np.abs(a) + 1.6
return locals()
dtypes = ['i', 'l', 'f', 'd', np.complex128]
def test_math_funcs():
functions = get_functions()
exceptions = 0
for func_name in functions:
# func_name = 'sqrt'
func = functions[func_name]
for dtype in dtypes:
numba_func = autojit(func)
x = np.arange(8 * 12, dtype=dtype).reshape(8, 12)
x = ((x + 10) / 5).astype(dtype)
r1 = numba_func(x)
r2 = numba_func.py_func(x)
assert np.allclose(r1, r2), (r1 - r2, r1.dtype, r2.dtype,
func_name, x.dtype)
if exceptions:
raise Exception
if __name__ == '__main__':
test_math_funcs()
########NEW FILE########
__FILENAME__ = test_broadcasting
import numpy as np
from numba import *
def operands(dtype=np.double):
return np.arange(10, dtype=dtype), np.arange(100, dtype=dtype).reshape(10, 10)
def check_kernel(kernel, *args):
new_args = [arg.copy() for arg in args]
result = kernel(*new_args)
new_args = [arg.copy() for arg in args]
numpy_result = kernel.py_func(*new_args)
assert np.allclose(result, numpy_result), numpy_result - result
@autojit
def get_slices(a, b):
return [
(a, b),
(a[:, np.newaxis], b),
(a[np.newaxis, :, np.newaxis], b),
(a[np.newaxis, :, np.newaxis], b[np.newaxis, :, :]),
(a[np.newaxis, :, np.newaxis], b[:, np.newaxis, :]),
(a[np.newaxis, :, np.newaxis], b[:, :, np.newaxis]),
]
@autojit
def broadcast_expr1(m1, m2):
return m1 + m2
@autojit
def broadcast_expr2(m1, m2):
m2[...] = m1 + m2
return m2
@autojit
def broadcast_expr3(m1, m2):
m2[...] = m1 + m2 - 2
return m2
@autojit
def broadcast_expr4(m1, m2):
m2[np.newaxis, :] = m1[np.newaxis, :] + m2[np.newaxis, :]
return m2
def test(dtype):
"""
>>> test(np.double)
>>> test('l')
>>> test(np.complex128)
>>> test(np.complex64)
>> if hasattr(np, 'complex256'):
... test(np.complex256)
...
"""
a, b = operands(dtype)
views = get_slices(a, b)
py_views = get_slices.py_func(a, b)
# test slicing
for (v1, v2), (v3, v4) in zip(views, py_views):
assert v1.shape == v3.shape
assert v2.shape == v4.shape
assert v1.strides == v3.strides
assert v2.strides == v4.strides
assert v1.ctypes.data == v3.ctypes.data
assert v2.ctypes.data == v4.ctypes.data
check_kernel(broadcast_expr1, a, b)
check_kernel(broadcast_expr2, a, b)
check_kernel(broadcast_expr3, a, b)
check_kernel(broadcast_expr4, a, b)
@autojit
def broadcast_expr5(m1, m2):
m2[:, 0] = m1 * m1
return m2
@autojit
def broadcast_expr6(m1, m2):
m2[1:-1:2, 0] = m1[1:-1:2] * m1[-2:1:-2]
return m2
@autojit
def broadcast_expr7(m1, m2):
m2[1:-1:2, 0, ..., ::2] = (m1[1:-1:2, ..., ::2] *
m1[-2:1:-2, ..., ::2])
return m2
def test_index_slice_assmt(dtype):
"""
>>> test_index_slice_assmt(np.double)
>>> test_index_slice_assmt('l')
>>> test_index_slice_assmt(np.complex64)
>>> test_index_slice_assmt(np.complex128)
"""
a, b = operands(dtype)
check_kernel(broadcast_expr5, a, b)
check_kernel(broadcast_expr6, a, b)
b = np.arange(10000).reshape(10, 10, 10, 10)
a = b[0]
check_kernel(broadcast_expr7, a, b)
@autojit
def shape_mismatch(a, b):
b[...] = a + b
@autojit(nopython=True)
def shape_mismatch_nopython(a, b):
b[...] = a + b
def test_shape_mismatch():
"""
>>> a, b = operands(np.double)
>>> shape_mismatch(a[:2], b)
Traceback (most recent call last):
...
ValueError: ...
# This will abort, so don't run it :)
>> shape_mismatch_nopython(a[:2], b)
ValueError: Shape mismatch while broadcasting
"""
if __name__ == "__main__":
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_slicing
import time
import numpy as np
from numba import *
from numba.decorators import autojit
@autojit
def slice_array_start(a, start):
return a[start:]
@autojit
def slice_array_stop(a, stop):
return a[:stop]
@autojit
def slice_array_step(a, step):
return a[::step]
@autojit
def slice_array(a, start, stop, step):
return a[start:stop:step]
@autojit
def time_slicing(a, start, stop, step):
# with nopython: # should make no difference in timing!
for i in range(1000000):
a[start:stop:step] = a[start:stop:step] * a[start:stop:step]
def test_slicing():
"""
>>> test_slicing()
"""
a = np.arange(10)
assert np.all(slice_array(a, 1, 7, 2) == a[1:7:2]) # sanity test
for start in range(-5, 15):
assert np.all(slice_array_start(a, start) == a[start:])
for stop in range(-5, 15):
assert np.all(slice_array_stop(a, stop) == a[:stop])
for step in range(-3, 4):
if step == 0:
continue
assert np.all(slice_array_step(a, step) == a[::step])
assert np.all(slice_array(a, start, stop, step) ==
a[start:stop:step])
def test_slicing_result():
"""
>>> test_slicing_result()
array([2, 3, 4, 5, 6, 7, 8, 9])
"""
a = np.arange(10)
return slice_array_start(a, 2)
if __name__ == "__main__":
import numba
numba.testing.testmod()
# a = np.arange(10)
# t = time.time()
# time_slicing(a, 1, 7, 2)
# print((time.time() - t))
########NEW FILE########
__FILENAME__ = test_slicing2
# Issue: #144
# Thanks to Neal Becker
import numpy as np
from numba import *
f8_array_ty = f8[:]
@jit
class fir (object):
@void(f8[:])
def __init__ (self, coef):
self.coef = coef
self.inp = np.zeros_like (coef)
@void(f8_array_ty)
def shift (self, u):
size = self.inp.size
n = len (u)
for i in range (size-1-n, -1, -1):
self.inp[i+n] = self.inp[i]
self.inp[:n] = u
@f8 ()
def compute1(self):
s = 0
size = self.coef.size
for i in range (size):
s += self.inp[i] * self.coef[i]
return s
@f8(f8_array_ty)
def shift_compute1 (self, u):
self.shift (u)
return self.compute1()
@f8_array_ty(f8_array_ty)
def compute (self, u):
out = np.empty_like (u)
size = u.size
for i in range (size):
out[i] = self.shift_compute1 (u[i:i+1])
return out
@f8_array_ty(f8_array_ty)
def __call__ (self, u):
return self.compute (u)
if __name__ == '__main__':
coef = np.random.rand (16)
filt = fir (coef)
inp = np.random.rand (1000)
x = inp[:10].copy()
out = filt.compute (inp)
assert np.all(x == inp[:10])
########NEW FILE########
__FILENAME__ = test_vmdot
import time
import numba
from numba import *
import numpy as np
# Bug reported by Jeremiah L. Lowin
def timer(pyfunc, numbafunc, *args, **kwargs):
t1 = time.time()
pyresult = pyfunc(*args, **kwargs)
t2 = time.time()
print(('python function took: {0}'.format(t2-t1)))
t3 = time.time()
numbaresult = numbafunc(*args, **kwargs)
t4 = time.time()
print(('numba function took: {0}'.format(t4-t3)))
print(('speedup: {0}x'.format(np.round((t2-t1) / (t4-t3),2))))
assert np.allclose(pyresult, numbaresult)
def timer2(pyfunc, numbafunc, *args, **kwargs):
t1 = time.time()
pyresult = np.empty_like(args[0])
pyargs = args + (pyresult,)
pyfunc(*pyargs, **kwargs)
t2 = time.time()
print(('python function took: {0}'.format(t2-t1)))
t3 = time.time()
numbaresult = np.empty_like(args[0])
nbargs = args + (numbaresult,)
numbafunc(*nbargs, **kwargs)
t4 = time.time()
print(('numba function took: {0}'.format(t4-t3)))
print(('speedup: {0}x'.format(np.round((t2-t1) / (t4-t3),2))))
assert np.allclose(pyresult, numbaresult)
def vmdot(x, w):
out = np.empty((x.shape[0], w.shape[1]))
for i in range(x.shape[0]):
dot_prod = np.dot(x[i], w)
out[i] = np.exp(-1 * dot_prod)
return out
def vmdot2(x, w, out):
for i in range(x.shape[0]):
dot_prod = np.dot(x[i], w)
out[i] = np.exp(-1 * dot_prod)
def test_vmdot():
numba_vmdot = jit(restype=double[:,:],
argtypes=[double[:,:], double[:,:]])(vmdot)
x = np.random.random((1000, 1000))
w = np.random.random((1000, 1000)) / 1000.
timer(vmdot, numba_vmdot, x, w)
# ensure this compiles
numba_vmdot2 = jit(argtypes=[double[:,:], double[:,:], double[:,:]])(vmdot2)
timer2(vmdot2, numba_vmdot2, x, w)
if __name__ == '__main__':
test_vmdot()
########NEW FILE########
__FILENAME__ = autojit_ext_method_call
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# Thanks to Neal Becker
# TODO: Fix and dedup
import numpy as np
from numba import *
f8_array_ty = f8[:]
@autojit
class fir (object):
## @void(f8[:])
def __init__ (self, coef):
self.coef = coef
self.inp = np.zeros_like (coef)
## @void(f8)
def shift (self, u):
size = self.inp.size
for i in range (size-1-1, -1, -1):
self.inp[i+1] = self.inp[i]
self.inp[0] = u
## @double ()
def compute1(self):
s = 0
size = self.coef.size
for i in range (size):
s += self.inp[i] * self.coef[i]
return s
## @f8(f8)
def shift_compute1 (self, u):
self.shift (u)
return self.compute1()
## @f8_array_ty(f8_array_ty)
def compute (self, u):
out = np.empty_like (u)
size = u.size
for i in range (size):
out[i] = self.shift_compute1 (u[i])
return out
## @f8_array_ty(f8_array_ty)
def __call__ (self, u):
return self.compute (u)
if __name__ == '__main__':
from timeit import timeit
coef = np.arange(100, dtype=np.double)
filt = fir (coef)
inp = np.arange(100, dtype=np.double)
out = filt.compute (inp)
filt (coef)
########NEW FILE########
__FILENAME__ = test_binary_harder
import unittest
import numpy as np
from itertools import product
from numba import jit
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
return a // b
def boundary_range(s=0):
maxint = 2**32 - 1
blimit = 10
for i in xrange(s, blimit):
yield i
i = blimit
while i < maxint:
i &= maxint
yield i
i = i << 1
for i in xrange(maxint - blimit, maxint):
yield i
def boundary_range_signed(s=0):
maxint = 2**16 - 1
blimit = 10
for sign in (1, -1):
for i in xrange(s, blimit):
yield sign * i
i = blimit
while i < maxint:
yield sign * i
i = i << 1
for i in xrange(maxint - blimit, maxint):
yield sign * i
class TestBinaryOps(unittest.TestCase):
def template(self, pyfn, op, s=0):
msg = "%s %s %s -> %s (expect %s)"
types = 'uint32', 'float64'
for ty in types:
signature = '%s(%s,%s)' % (ty, ty, ty)
fn = jit(signature)(pyfn)
for a, b in product(boundary_range(s=s), boundary_range(s=s)):
if ty in ['float64']:
a, b = float(a), float(b)
exp = pyfn(a, b)
if ty in ['uint32']:
exp &= 0xffffffff
try:
got = fn(a, b)
except:
print('Exception raised at fn=%s a=%s b=%s, exp=%s ty=%s' %
(pyfn, a, b, exp, ty))
raise
self.assertTrue(exp == got, msg % (a, op, b, got, exp))
def template2(self, pyfn, op, s=0, w=True):
msg = "%s %s %s -> %s (expect %s)"
types = 'int32', 'float64'
for ty in types:
signature = '%s(%s,%s)' % (ty, ty, ty)
fn = jit(signature)(pyfn)
for a, b in product(boundary_range_signed(s=s),
boundary_range_signed(s=s)):
if ty in ['float64']:
a, b = float(a), float(b)
if w:
exp = pyfn(np.asarray(a, dtype=ty), np.asarray(b, dtype=ty))
else:
exp = pyfn(a, b)
try:
got = fn(a, b)
except:
print('Exception raised at fn=%s a=%s b=%s, exp=%s ty=%s' %
(pyfn, a, b, exp, ty))
raise
self.assertTrue(exp == got, msg % (a, op, b, got, exp))
def test_add(self):
self.template(add, '+')
self.template2(add, '+')
def test_sub(self):
self.template(sub, '-')
self.template2(sub, '-')
def test_mul(self):
self.template(mul, '*')
self.template2(mul, '*')
def test_div(self):
self.template(div, '//', s=1)
self.template2(div, '//', s=1, w=False)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_bitwise_harder
import unittest
from itertools import product
from numba import jit
@jit('uint32(uint32, uint32)')
def bitlshift(a, b):
return (a << b) & 0xffffffff
@jit('uint32(uint32, uint32)')
def bitrshift(a, b):
return (a >> b) & 0xffffffff
@jit('int32(int32, int32)')
def bitashift(a, b):
return (a >> b) & 0xffffffff
@jit('uint32(uint32, uint32)')
def bitand(a, b):
return a & b
@jit('uint32(uint32, uint32)')
def bitor(a, b):
return a | b
@jit('uint32(uint32)')
def bitnot(a):
return ~a
@jit('uint32(uint32, uint32)')
def bitxor(a, b):
return a ^ b
def boundary_range():
maxint = 2**32 - 1
blimit = 10
for i in xrange(0, blimit):
yield i
i = blimit
while i < maxint:
i &= 0xffffffff
yield i
i = i << 1
for i in xrange(maxint - blimit, maxint):
yield i
class TestBitwise(unittest.TestCase):
def template(self, testfn, expfn, op):
for a, b in product(boundary_range(), boundary_range()):
exp = expfn(a, b)
try:
got = testfn(a, b)
except:
print('Exception raised at a=%s b=%s, exp=%s' % (a, b, exp))
raise
self.assertTrue(exp == got,
"%s %s %s -> %s (expect %s)" % (a, op, b, got, exp))
def test_lshift(self):
self.template(bitlshift, bitlshift.py_func, '<<')
def test_rshift(self):
self.template(bitrshift, bitrshift.py_func, '>>')
def test_ashift(self):
self.template(bitashift, bitashift.py_func, '>>')
def test_and(self):
self.template(bitand, bitand.py_func, '&')
def test_or(self):
self.template(bitor, bitor.py_func, '|')
def test_xor(self):
self.template(bitxor, bitxor.py_func, '^')
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_log1p_vectorize
# -*- coding: utf-8 -*-
# from __future__ import division, absolute_import
from math import log1p
from numba import *
from numba.vectorize import vectorize
import numpy as np
@jit(double(double))
def jit_log1p(x):
return log1p(x)
x = 3.4
assert np.allclose([jit_log1p(x)], [jit_log1p.py_func(x)])
@vectorize([double(double)])
def vec_log1p(x):
return log1p(x)
x = np.array([x])
assert np.allclose(vec_log1p(x), [jit_log1p.py_func(x)])
########NEW FILE########
__FILENAME__ = test_builtin_abs
# adapted from cython/tests/run/builtin_abs.pyx
"""
>>> _abs = abs_as_name()
>>> _abs(-5)
5
>>> py_abs(-5)
5
>>> py_abs(-5.5)
5.5
>>> int(int32_abs(-5))
10
>>> int(int_abs(-5))
10
>>> int(long_abs(-5))
10
>>> int(ulong_abs(5))
10
>>> long_long_abs(-(2**33)) == 2**34
True
>>> ulong_long_abs(2**33) == 2**34
True
>>> double_abs(-5)
10.0
>>> double_abs(-5.5)
11.0
>>> float_abs(-5)
10.0
>>> float_abs(-5.5)
11.0
>>> '%.2f' % round(complex64_abs(-10-2j), 2)
'20.40'
>>> '%.2f' % round(complex128_abs(-10-2j), 2)
'20.40'
"""
from numba import *
### Python usage
@jit(object_())
def abs_as_name():
x = abs
return x
@jit(argtypes=[object_])
def py_abs(a):
return abs(a)
### nopython usage
@autojit(nopython=True)
def _abs(value):
result = abs(value)
with nopython:
return result * 2 # test return type being non-object
@jit(nopython=True, argtypes=[int_])
def int_abs(a):
return _abs(a)
@jit(nopython=True, argtypes=[long_])
def long_abs(a):
return _abs(a)
@jit(nopython=True, argtypes=[ulong])
def ulong_abs(a):
return _abs(a)
@jit(nopython=True, argtypes=[int32])
def int32_abs(a):
return _abs(a)
@jit(nopython=True, argtypes=[longlong])
def long_long_abs(a):
return _abs(a)
@jit(nopython=True, argtypes=[ulonglong])
def ulong_long_abs(a):
return _abs(a)
@jit(nopython=True, argtypes=[double])
def double_abs(a):
return _abs(a)
@jit(nopython=True, argtypes=[float_])
def float_abs(a):
return _abs(a)
@jit(nopython=True, argtypes=[complex64])
def complex64_abs(a):
return _abs(a)
@jit(nopython=True, argtypes=[complex128])
def complex128_abs(a):
return _abs(a)
if __name__ == '__main__':
# print long(int32_abs(-5))
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_chr
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import autojit
@autojit(nopython=True)
def test_chr(x):
return chr(x)
assert test_chr(97) == b'a'
########NEW FILE########
__FILENAME__ = test_builtin_complex
"""
>>> empty_complex()
0j
>>> new_complex(1., 5)
(1+5j)
>>> convert_to_complex(10)
(10+0j)
>>> convert_to_complex(10+2j)
(10+2j)
>>> convert_to_complex(10.0)
(10+0j)
"""
import sys
from numba import *
@autojit(backend='ast')
def empty_complex():
x = complex()
return x
@autojit(backend='ast')
def new_complex(x, y):
return complex(x, y)
@autojit(backend='ast')
def convert_to_complex(x):
return complex(x)
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_enumerate
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# Based on cython/tests/run/enumerate_T316.pyx
from numba import *
@autojit
def go_py_enumerate():
"""
>>> go_py_enumerate()
0 1
1 2
2 3
3 4
"""
for i,k in enumerate(range(1,5)):
print(i, k)
@autojit
def py_enumerate_list_index_target():
"""
>>> py_enumerate_list_index_target()
[0] 1
[1] 2
[2] 3
[3] 4
"""
target = [None]
for target[0],k in enumerate(range(1,5)):
print(target, k)
@autojit
def go_py_enumerate_start():
"""
>>> go_py_enumerate_start()
5 1
6 2
7 3
8 4
"""
for i,k in enumerate(list(range(1,5)), 5):
print(i, k)
@autojit
def go_c_enumerate():
"""
>>> go_c_enumerate()
0 1
1 2
2 3
3 4
"""
for i,k in enumerate(range(1,5)):
print(i, k)
@autojit
def go_c_enumerate_step():
"""
>>> go_c_enumerate_step()
0 1
1 3
2 5
"""
for i,k in enumerate(range(1,7,2)):
print(i, k)
# @autojit # TODO:
def py_enumerate_dict(d):
"""
>>> py_enumerate_dict({})
:: 55 99
>>> py_enumerate_dict(dict(a=1, b=2, c=3))
0 True
1 True
2 True
:: 2 True
"""
i = 55
k = 99
keys = list(d.keys())
for i,k in enumerate(d):
k = keys[i] == k
print(i, k)
print("::", i, k)
@autojit
def py_enumerate_break(t):
"""
>>> py_enumerate_break([1,2,3,4])
0 1
:: 0 1
"""
i,k = 55,99
for i,k in enumerate(t):
print(i, k)
break
print("::", i, k)
@autojit
def py_enumerate_return(t):
"""
>>> py_enumerate_return([])
:: 55 99
>>> py_enumerate_return([1,2,3,4])
0 1
"""
i,k = 55,99
for i,k in enumerate(t):
print(i, k)
return
print("::", i, k)
@autojit
def py_enumerate_continue(t):
"""
>>> py_enumerate_continue([1,2,3,4])
0 1
1 2
2 3
3 4
:: 3 4
"""
i,k = 55,99
for i,k in enumerate(t):
print(i, k)
continue
print("::", i, k)
@autojit
def empty_c_enumerate():
"""
>>> empty_c_enumerate()
(55, 99)
"""
i,k = 55,99
for i,k in enumerate(range(0)):
print(i, k)
return i, k
# Not supported (yet)
# @autojit
# def single_target_enumerate():
# """
# >>> single_target_enumerate()
# 0 1
# 1 2
# 2 3
# 3 4
# """
# for t in enumerate(range(1,5)):
# print(t[0], t[1])
# @autojit # TODO:
def multi_enumerate():
"""
>>> multi_enumerate()
0 0 0 1
1 1 1 2
2 2 2 3
3 3 3 4
"""
for a,(b,(c,d)) in enumerate(enumerate(enumerate(range(1,5)))):
print(a,b,c,d)
# @autojit # TODO:
def multi_enumerate_start():
"""
>>> multi_enumerate_start()
0 2 0 1
1 3 1 2
2 4 2 3
3 5 3 4
"""
for a,(b,(c,d)) in enumerate(enumerate(enumerate(range(1,5)), 2)):
print(a,b,c,d)
# @autojit # TODO:
def multi_c_enumerate():
"""
>>> multi_c_enumerate()
0 0 0 1
1 1 1 2
2 2 2 3
3 3 3 4
"""
for a,(b,(c,d)) in enumerate(enumerate(enumerate(range(1,5)))):
print(a,b,c,d)
@autojit
def convert_target_enumerate(L):
"""
>>> convert_target_enumerate([2,3,5])
0 2
1 3
2 5
"""
for a, b in enumerate(L):
print(a,b)
@autojit
def convert_target_enumerate_start(L, n):
"""
>>> convert_target_enumerate_start([2,3,5], 3)
3 2
4 3
5 5
"""
for a, b in enumerate(L, n):
print(a,b)
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_float
"""
>>> empty_float()
0.0
>>> convert_float(10)
10.0
>>> float_conjugate()
1.5
"""
import sys
from numba import *
@autojit(backend='ast')
def empty_float():
return float()
@autojit(backend='ast')
def convert_float(y):
x = float(y)
return x
@autojit(backend='ast')
def float_conjugate():
return 1.5.conjugate()
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_int
"""
>>> empty_int()
0
>>> convert_int(2.5)
2
>>> convert_to_int('FF', 16)
255
"""
from numba import autojit
@autojit
def empty_int():
return int()
@autojit
def convert_int(x):
return int(x)
@autojit
def convert_to_int(s, base):
return int(s, base)
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_minmax
from numba import autojit
from numba.testing.test_support import autojit_py3doc
@autojit_py3doc
def max1(x):
"""
>>> max1([100])
100
>>> max1([1,2.0,3])
3
>>> max1([-1,-2,-3.0])
-1
>>> max1(1)
Traceback (most recent call last):
...
TypeError: 'int' object is not iterable
"""
return max(x)
@autojit_py3doc
def min1(x):
"""
>>> min1([100])
100
>>> min1([1,2,3.0])
1
>>> min1([-1,-2.0,-3])
-3
>>> min1(1)
Traceback (most recent call last):
...
TypeError: 'int' object is not iterable
"""
return min(x)
@autojit_py3doc
def max2(x, y):
"""
>>> max2(1, 2)
2
>>> max2(1, -2)
1
>>> max2(10, 10.25)
10.25
>>> max2(10, 9.9)
10.0
>>> max2(0.1, 0.25)
0.25
>>> max2(1, 'a')
Traceback (most recent call last):
...
UnpromotableTypeError: Cannot promote types int and string
"""
return max(x, y)
@autojit_py3doc
def min2(x, y):
"""
>>> min2(1, 2)
1
>>> min2(1, -2)
-2
>>> min2(10, 10.1)
10.0
>>> min2(10, 9.75)
9.75
>>> min2(0.25, 0.3)
0.25
>>> min2(1, 'a')
Traceback (most recent call last):
...
UnpromotableTypeError: Cannot promote types int and string
"""
return min(x, y)
@autojit
def max4(x):
"""
>>> max4(20)
20.0
"""
return max(1, 2.0, x, 14)
@autojit
def min4(x):
"""
>>> min4(-2)
-2.0
"""
return min(1, 2.0, x, 14)
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_ord
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import autojit
@autojit(nopython=True)
def test_ord(s):
return ord(s[1])
assert test_ord('hello') == 101
########NEW FILE########
__FILENAME__ = test_builtin_pow
"""
>>> pow3(2,3,5)
3
>>> pow3(3,3,5)
2
>>> pow3_const()
3
>>> pow2(2,3)
8
>>> pow2(3,3)
27
>>> pow2(3.0,3)
27.0
>>> pow2(3,3.0)
27.0
>>> pow2(3.0,3.0)
27.0
>>> pow2(1.5, 2)
2.25
>>> pow2(1.5, 1.5) == pow(1.5, 1.5)
True
>>> pow_op(3,3)
27
>>> pow_op(3.0,3)
27.0
>>> pow_op(3,3.0)
27.0
>>> pow_op(3.0,3.0)
27.0
>>> pow_op(1.5, 2)
2.25
>>> pow_op(1.5, 1.5) == pow(1.5, 1.5)
True
>>> pow2_const()
8
>>> c1, c2 = 1.2 + 4.1j, 0.6 + 0.5j
>>> allclose(pow2(c1, c2), pow(c1, c2))
True
>>> d1, d2 = 4.2, 5.1
>>> allclose(pow2(d1, d2), pow(d1, d2))
True
"""
from numba import autojit
from numpy import allclose
@autojit
def pow3(a,b,c):
return pow(a,b,c)
@autojit
def pow3_const():
return pow(2,3,5)
@autojit(nopython=True)
def pow2(a,b):
return pow(a,b)
@autojit(nopython=True)
def pow_op(a,b):
return a**b
@autojit(nopython=True)
def pow2_const():
return pow(2,3)
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_range
"""
>>> range_ret1()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> range_ret2()
[1, 2, 3, 4]
>>> range_ret3()
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4]
>>> forward1()
0 1 2 3 4 5 6 7 8 9 done
>>> forward2()
1 2 3 4 done
>>> forward3()
5 8 11 14 done
>>> backward1()
10 7 4 done
>>> backward2()
done
>>> backward3()
-5 -8 -11 -14 done
>>> empty_assign()
14
>>> last_value()
Warning 92:10: local variable 'i' might be referenced before assignment
9
"""
from __future__ import print_function
from numba import *
@autojit
def range_ret1():
return range(10)
@autojit
def range_ret2():
return range(1, 5)
@autojit
def range_ret3():
return range(10, -5, -1)
@autojit
def forward1():
for i in range(10):
print(i, end=' ')
print("done")
@autojit
def forward2():
for i in range(1, 5):
print(i, end=' ')
print("done")
@autojit
def forward3():
for i in range(5, 15, 3):
print(i, end=' ')
print("done")
@autojit
def backward1():
for i in range(10, 2, -3):
print(i, end=' ')
print("done")
@autojit
def backward2():
for i in range(1, 5, -1):
print(i, end=' ')
print("done")
@autojit
def backward3():
for i in range(-5, -15, -3):
print(i, end=' ')
print("done")
@autojit
def empty_assign():
i = 14
for i in range(10, 4):
pass
print(i)
@autojit(warnstyle='simple')
def last_value():
for i in range(10):
pass
print(i)
if __name__ == '__main__':
# backward3()
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_round
"""
>>> round_val(2.2)
2.0
>>> round_val(3.6)
4.0
>>> round_val(5)
5.0
>>> round_val(object())
Traceback (most recent call last):
...
TypeError: a float is required
>>> round2(10.497, 2)
10.5
>>> round2(497, -1)
500.0
"""
import numpy as np
from numba import *
@autojit(backend='ast')
def round_val(a):
return round(a)
@autojit(backend='ast')
def round2(a, b):
return round(a, b)
if __name__ == '__main__':
# round2(10.497, 2)
# round_val(object())
round_val(3.6)
import numba
if numba.PY3:
__doc__ = __doc__.replace('TypeError: a float is required',
"TypeError: type object doesn't define __round__ method")
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_str
"""
>>> empty_str()
''
>>> str_convert(12.2)
'12.2'
"""
import sys
from numba import *
@autojit(backend='ast')
def empty_str():
x = str()
return x
@autojit(backend='ast')
def str_convert(x):
return str(x)
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_builtin_zip
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
@autojit
def zip1(L1, L2):
"""
>>> zip1(range(2), range(5, 8))
[(0, 5), (1, 6)]
"""
return list(zip(L1, L2))
@autojit
def zip2(L1, L2, L3):
"""
>>> zip2(range(2), range(5, 8), range(9, 13))
[(0, 5, 9), (1, 6, 10)]
"""
return list(zip(L1, L2, L3))
@autojit
def ziploop1(L1, L2):
"""
>>> ziploop1(range(2), range(5, 8))
0 5
1 6
"""
for i, j in zip(L1, L2):
print(i, j)
@autojit
def ziploop2(L1, L2, L3):
"""
>>> ziploop2(range(2), range(5, 8), range(9, 13))
0 5 9
1 6 10
"""
for i, j, k in zip(L1, L2, L3):
print(i, j, k)
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_object_builtins
"""
>>> get_globals()
20
>>> get_locals()
Traceback (most recent call last):
...
NumbaError: locals() is not supported in numba functions
>>> get_sum(3)
6
>>> eval_something("'hello'")
'hello'
>>> list(enumerate_list())
[(0, 1), (1, 2), (2, 3)]
>>> max_(20) == 20
True
>>> min_(-2) == -2
True
"""
from numba import *
myglobal = 20
autojit = autojit(warn=False, warnstyle="simple")
@autojit
def get_globals():
return globals()['myglobal']
@autojit
def get_locals():
x = 2
return locals()['x']
@autojit
def get_sum(x):
return sum([1, 2, x])
@autojit
def eval_something(s):
return eval(s)
@autojit
def enumerate_list():
return enumerate([1, 2, 3])
@autojit
def max_(x):
return max(1, 2.0, x, 14)
@autojit
def min_(x):
return min(1, 2.0, x, 14)
if __name__ == '__main__':
get_globals()
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_closure
import numba
from numba import *
from numba.error import NumbaError
autojit = autojit(warn=False, warnstyle='simple')
@autojit
def error1():
def inner():
pass
@autojit
def error2():
@autojit
def inner():
pass
@autojit
def error3():
inner(10, 20, 30)
@jit(restype=void, argtypes=[int_, int_, int_])
def inner(a, b, c):
print(str(a) + ' ' + str(b) + ' ' + str(c))
@autojit
def error4():
@jit(restype=void, argtypes=[int_, int_, int_])
def inner(a, b, c):
print(str(a) + ' ' + str(b) + ' ' + str(c))
inner(10, a=20, b=30, c=40)
@autojit
def error5():
@jit(restype=void, argtypes=[int_, int_, int_])
def inner(a, b, c):
print(str(a) + ' ' + str(b) + ' ' + str(c))
inner(10, a=20, b=30)
@autojit
def closure1():
a = 10
@jit(restype=void, argtypes=[int_])
def inner(arg):
print(str(arg))
return inner
@autojit
def closure2():
a = 10
@jit(restype=void, argtypes=[int_])
def inner(arg):
print(str(arg))
inner(arg=a)
@autojit
def closure3():
a = 10
@jit('void()')
def inner():
print(a)
a = 12
inner()
@autojit
def closure4():
a = 10
@jit('void()')
def inner():
print(a)
a = 12
return inner
@autojit
def nested_closure():
a = 20
b = 21
@jit('object_()')
def c1():
b = 10
@jit('void()')
def c2():
print(str(a) + ' ' + str(b))
return c2
@jit('void()')
def c3():
print(b)
return c1, c3
__doc__ = """
>>> error1()
Traceback (most recent call last):
...
NumbaError: ...: Closure must be decorated with 'jit' or 'autojit'
>>> error2()
Traceback (most recent call last):
...
NumbaError: ...: Dynamic closures not yet supported, use @jit
>>> error3()
Traceback (most recent call last):
...
NumbaError: ...: local variable 'inner' referenced before assignment
>>> error4()
Traceback (most recent call last):
...
NumbaError: ...: Expected 3 arguments, got 4
>>> error5()
Traceback (most recent call last):
...
NumbaError: ...: Got multiple values for positional argument 'a'
Test closures
>>> closure1().__name__
'inner'
>>> closure1()()
Traceback (most recent call last):
...
TypeError: function takes exactly 1 argument (0 given)
>>> closure1()(object())
Traceback (most recent call last):
...
TypeError: an integer is required
>>> closure1()(10.0)
10
>>> closure2()
10
>>> closure3()
12
>>> func = closure4()
>>> print(func.__name__)
inner
>>> field, = func.__closure__._numba_attrs._fields_
>>> import ctypes
>>> print((field[0], field[1] == ctypes.c_int))
('a', True)
>>> print(func.__closure__._numba_attrs.a)
12
>>> func()
12
>>> c1, c3 = nested_closure()
>>> c1.__name__
'c1'
>>> c3.__name__
'c3'
>>> c1().__name__
'c2'
>>> c1()()
20 10
>>> c3()
21
"""
@autojit
def closure_arg(a):
@jit('object_(object_)')
def closure1(b):
print(str(a) + ' ' + str(b))
x = 10 + int_(b)
@jit('object_(object_)')
def closure2(c):
print(str(a) + ' ' + str(b) + ' ' + str(c) + ' ' + str(x))
y = double(x) + double(c)
@jit('void(object_)')
def closure3(d):
print(str(a) + ' ' + str(b) + ' ' + str(c) + ' ' + str(d) + ' ' + str(x) + ' ' + str(y))
return closure3
return closure2
return closure1
__doc__ += \
"""
>>> closure1 = closure_arg(1)
>>> closure1.__name__
'closure1'
>>> closure2_1 = closure1(2)
1 2
>>> closure2_1.__name__
'closure2'
>>> closure2_2 = closure1(3)
1 3
>>> closure2_2.__name__
'closure2'
>>> closure3_1 = closure2_1(4)
1 2 4 12
>>> closure3_1.__name__
'closure3'
>>> closure3_2 = closure2_2(5)
1 3 5 13
>>> closure3_2.__name__
'closure3'
>>> closure3_1(6)
1 2 4 6 12 16.0
>>> closure3_2(7)
1 3 5 7 13 18.0
"""
@autojit
def closure_arg_simple(a):
@jit('object_(object_)')
def inner(b):
print(str(a) + ' ' + str(b))
@jit('void(object_)')
def inner_inner(c):
print(str(a) + ' ' + str(b) + ' ' + str(c))
return inner_inner
return inner
__doc__ += """
>>> closure_arg_simple(10)(20)(30)
10 20
10 20 30
"""
@autojit
def closure_skip_level(a):
@jit('object_()')
def inner():
@jit('void()')
def inner_inner():
print(str(a))
return inner_inner
return inner
__doc__ += """
>>> closure_skip_level(10)()()
10
"""
@autojit
def objects(s):
@jit('object_()')
def inner():
return s.upper()
return inner
__doc__ += """
>>> objects("hello")()
'HELLO'
"""
@autojit
def wrong_signature(s):
@jit('object_(object_)')
def inner():
return s.upper()
return inner
__doc__ += """
>>> try_(wrong_signature, "foo")
NumbaError: ...: Expected 1 arguments type(s), got 0
"""
@autojit
def wrong_restype():
@jit('object_()')
def inner():
pass
return inner
__doc__ += """
>>> try_(wrong_restype)
NumbaError: ...: Function with non-void return does not return a value
"""
#
### Test signatures like @double(object_)
#
@autojit
def signature_dec():
@object_()
def inner():
return "hello"
return inner
__doc__ += """
>>> signature_dec()()
'hello'
"""
@autojit
def wrong_signature2(s):
@object_(object_)
def inner():
return s.upper()
return inner
__doc__ += """
>>> try_(wrong_signature2, "foo")
NumbaError: ...: Expected 1 arguments type(s), got 0
"""
@autojit
def get_closure(arg):
@void()
def closure():
print(arg)
closure()
return closure
@autojit
def test_call_closure():
closure = get_closure(10.0)
closure() # TODO: This still goes through the object layer, amend
__doc__ += """
>>> test_call_closure()
10.0
10.0
"""
@autojit
def test_call_closure_from_closure():
closure = get_closure(10.0)
@void()
def inner():
closure()
return inner
__doc__ += """
>>> test_call_closure_from_closure()()
10.0
10.0
"""
@autojit
def test_closure_loop():
"""
>>> test_closure_loop()
0 3
1 3
2 3
<BLANKLINE>
0 3
1 3
2 3
"""
cellvar = 3
@jit(void())
def inner():
for i in range(cellvar):
print(str(i) + ' ' + str(cellvar))
print('')
for i in range(cellvar):
for j in range(cellvar):
if i == j:
print(str(i) + ' ' + str(cellvar))
inner()
@numba.autojit(locals=dict(var=int_), warn=False)
def test_closure_outer_locals():
"""
>>> test_closure_outer_locals()
"""
var = 10
@jit(void())
def inner():
var = "hello"
inner()
#__doc__ = rewrite_doc(__doc__)
def try_(func, *args):
try:
func(*args)
except NumbaError as e:
print("%s%s: %s" % ('numba.error.' if numba.PY3 else '',
type(e).__name__, e))
if __name__ == '__main__':
# closure1 = closure_arg(1)
# print closure1.__name__
# closure1(10)
# test_call_closure()
# closure4()
# signature_dec()()
# test_closure_outer_locals()
# test_closure_loop()
# test_closure_outer_locals()
# test_call_closure_from_closure()()
# wrong_restype()
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_closure_type_inference
"""
>>> from numba.tests.closures import test_closure_type_inference
"""
import numpy as np
from numba import *
from numba import error
from numba.testing.test_support import testmod
@autojit
def test_cellvar_promotion(a):
"""
>>> inner = test_cellvar_promotion(10)
200.0
>>> inner.__name__
'inner'
>>> inner()
1000.0
"""
b = int(a) * 2
@jit(void())
def inner():
print((a * b))
inner()
a = float(a)
b = a * a # + 1j # Promotion issue
return inner
@autojit
def test_cellvar_promotion_error(a):
"""
>>> from numba.minivect import minierror
>>> try:
... test_cellvar_promotion_error(10)
... except error.UnpromotableTypeError as e:
... print(sorted(e.args, key=str))
[(int, string)]
"""
b = int(a) * 2
@jit(void())
def inner():
print((a * b))
inner()
a = np.empty(10, dtype=np.double)
b = "hello"
return inner
#test_cellvar_promotion(10)
#test_cellvar_promotion_error(10)
testmod()
########NEW FILE########
__FILENAME__ = conftest
import logging
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
#def pytest_ignore_collect(path, config):
# print config.option
########NEW FILE########
__FILENAME__ = test_callbacks
import os
import ctypes
from numba import *
import numba
try:
import cffi
ffi = cffi.FFI()
except ImportError:
ffi = None
root = os.path.dirname(os.path.abspath(__file__))
include_dirs = [root]
# ______________________________________________________________________
def test():
if ffi is not None:
run_callback()
# ______________________________________________________________________
# Tests
@jit(int_(int_, int_))
def numba_callback(a, b):
return a * b
@autojit #(nopython=True)
def call_cffi_func(eat_callback):
return eat_callback(numba_callback)
def run_callback():
# Define C function taking a callback
ffi.cdef("typedef int (*callback_t)(int, int);")
ffi.cdef("typedef int (*eat_callback_t)(callback_t);")
ffi.cdef("eat_callback_t get_eat_callback();", override=True)
ffi.cdef("int printf(char *, ...);")
lib = ffi.verify("""
typedef int (*callback_t)(int, int);
typedef int (*eat_callback_t)(callback_t);
int eat_callback(callback_t callback) {
return callback(5, 2);
}
eat_callback_t get_eat_callback() {
return (callback_t) eat_callback;
}
""", include_dirs=include_dirs)
# CFFI returns builtin methods instead of CData functions for
# non-external functions. Get the CDATA function through an indirection.
eat_callback = lib.get_eat_callback()
assert call_cffi_func(eat_callback) == 10
# ______________________________________________________________________
if __name__ == "__main__":
test()
########NEW FILE########
__FILENAME__ = test_cffi_call
import os
import ctypes
import doctest
from numba import *
import numba
try:
import cffi
ffi = cffi.FFI()
except ImportError:
ffi = None
# ______________________________________________________________________
def test_cffi_calls():
if ffi is not None:
make_cffi_calls()
# ______________________________________________________________________
# Tests
@autojit(nopython=True)
def call_cffi_func(func, value):
return func(value)
def make_cffi_calls():
# Test printf for nopython and no segfault
ffi.cdef("int printf(char *, ...);", override=True)
lib = ffi.dlopen(None)
printf = lib.printf
call_cffi_func(printf, "Hello world!\n")
# ______________________________________________________________________
if __name__ == "__main__":
test_cffi_calls()
########NEW FILE########
__FILENAME__ = test_ctypes_call
import os
import ctypes
from numba import *
@autojit(nopython=True)
def call_ctypes_func(func, value):
return func(value)
def test_ctypes_calls():
# Test puts for no segfault
libc = ctypes.CDLL(ctypes.util.find_library('c'))
puts = libc.puts
puts.argtypes = [ctypes.c_char_p]
call_ctypes_func(puts, "Hello World!")
# Test ceil result
libm = ctypes.CDLL(ctypes.util.find_library('m'))
ceil = libm.ceil
ceil.argtypes = [ctypes.c_double]
ceil.restype = ctypes.c_double
result = call_ctypes_func(ceil, 10.1)
assert result == 11.0, result
def test_str_return():
try:
import errno
except ImportError:
return
libc = ctypes.CDLL(ctypes.util.find_library('c'))
strerror = libc.strerror
strerror.argtypes = [ctypes.c_int]
strerror.restype = ctypes.c_char_p
expected = os.strerror(errno.EACCES)
got = call_ctypes_func(strerror, errno.EACCES)
assert expected == got, (expected, got)
if __name__ == "__main__":
test_ctypes_calls()
test_str_return()
########NEW FILE########
__FILENAME__ = issue_305_helper1
from numba import jit, autojit
@autojit
def test2():
return 0
########NEW FILE########
__FILENAME__ = issue_305_helper2
from numba import jit, autojit
@autojit
def test2():
return 0
########NEW FILE########
__FILENAME__ = test_compare
import numba
from numba import *
tests = []
def _make_test(f):
def test():
for argtype in [object_, float_, double]:
# f_ = autojit(f)
f_ = jit(numba.function(None, [argtype]))(f)
for v in range(-10,10):
assert f_(v)==f(v)
assert f_(float(v))==f(float(v))
test.__name__ = f.__name__
tests.append(test)
return test
@_make_test
def test_single_comparator(a):
return a<4
@_make_test
def test_single_float_comparator(a):
return a<4.0
@_make_test
def test_multiple_comparators(a):
return 0<a<=4
@_make_test
def test_multiple_comparators_mixed_types(a):
return 0.0<a<=10
@_make_test
def test_compare_span_basic_blocks(a):
a = a + 1j
if abs(a) > 2:
return 0 < abs(a) < 10
return not a.real > 0
@_make_test
def test_compare_while(a):
while True:
while True:
break
else:
print("hello")
return a * 3
break
return a * 2
class Class(object):
def __eq__(self, other):
raise Exception("I cannot compare!")
@autojit
def compare_error():
return 0 == Class()
def test_compare_error():
try:
compare_error()
except Exception as e:
assert str(e) == "I cannot compare!", str(e)
else:
raise Exception("Expected exception!")
if __name__ == "__main__":
for test in tests:
test()
test_compare_error()
########NEW FILE########
__FILENAME__ = test_issue_112
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import autojit, register_callable, npy_intp, typesystem
restype = typesystem.tuple_(npy_intp[:, :], 2)
@register_callable(restype(npy_intp[:], npy_intp[:]))
def meshgrid(x, y):
return np.meshgrid(x, y)
@autojit
def run_meshgrid(size):
x1d = np.arange(-size, size + 1)
y1d = np.arange(-size, size + 1)
x, y = meshgrid(x1d, y1d)
return x, y
if __name__ == '__main__':
size = 3
nb_gauss = run_meshgrid(size)
########NEW FILE########
__FILENAME__ = test_issue_117
# -*- coding: utf-8 -*-
'''
This bug is nondeterministic. Run it a few times.
'''
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import autojit, typeof, int32
@autojit
def jenks_matrices_no_return(data):
variance = 0.0
for l in range(2, len(data) + 1):
sum = 0.0
sum_squares = 0.0
w = 0.0
for m in range(1, l + 1):
lower_class_limit = l - m + 1
val = data[lower_class_limit - 1]
w += 1
sum += val
sum_squares += val * val
variance = sum_squares - (sum * sum) / w
@autojit
def jenks_matrices_with_return(data):
variance = 0.0
for l in range(2, len(data) + 1):
sum = 0.0
sum_squares = 0.0
w = 0.0
for m in range(1, l + 1):
lower_class_limit = l - m + 1
val = data[lower_class_limit - 1]
w += 1
sum += val
sum_squares += val * val
variance = sum_squares - (sum * sum) / w
return variance
def test():
data = np.empty(10, dtype=np.float64)
# File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 493, in resolve_variable_types
# start_point.simplify()
# File "/Users/sklam/dev/numba/numba/typesystem/ssatypes.py", line 625, in simplify
# assert False
jenks_matrices_no_return(data)
# File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 495, in resolve_variable_types
# self.remove_resolved_type(start_point)
# File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 393, in remove_resolved_type
# assert not type.is_scc
jenks_matrices_with_return(data)
if __name__ == "__main__":
test()
########NEW FILE########
__FILENAME__ = test_issue_118
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import autojit
@autojit
def get_jenks_breaks(data, lower_class_limits, n_classes):
k = int(len(data) - 1)
for countNum in range(n_classes):
# problem inferring `k` in slice
k = int(lower_class_limits[k, countNum] - 1)
def test():
n_classes = 5
data = np.ones(10)
lower_class_limits = np.empty((data.size + 1, n_classes + 1))
#
# File "/Users/sklam/dev/numba/numba/type_inference/infer.py", line 1055, in visit_Subscript
# if slice_type.variable.type.is_unresolved:
# File "/Users/sklam/dev/numba/numba/minivect/minitypes.py", line 492, in __getattr__
# return getattr(type(self), attr)
#AttributeError: type object 'tuple_' has no attribute 'variable'
print((get_jenks_breaks(data, lower_class_limits, n_classes)))
if __name__ == '__main__':
test()
########NEW FILE########
__FILENAME__ = test_issue_125
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import autojit, jit, double, void, uint32, npy_intp, typeof
def uint_int_div_ary(elts, normdist, seed):
for i in xrange(elts.shape[0]):
# Problem with using sext instead of zext for uint32
elt = (seed[i] // uint32(normdist.shape[0]))
elts[i] = elt
def test_uint_int_div_ary():
NPATHS = 10
normdist = np.empty(1000) #np.random.normal(0., 1., 1000)
seed = np.arange(0x80000000, 0x80000000 + NPATHS, dtype=np.uint32)
gold = np.empty(NPATHS, dtype=np.int32)
got = gold.copy()
uint_int_div_ary(gold, normdist, seed)
print('expect %s' % gold)
sig = void(uint32[:], double[:], uint32[:])
numba_func = jit(sig)(uint_int_div_ary)
numba_func(got, normdist, seed)
print('got %s' % got)
assert all(gold == got)
if __name__ == '__main__':
test_uint_int_div_ary()
########NEW FILE########
__FILENAME__ = test_issue_126
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import autojit
import math
import unittest
@autojit(nopython=True)
def nopython_abs(x):
return abs(x)
@autojit(nopython=True)
def nopython_sin(x):
return math.sin(x)
@autojit(nopython=True)
def nopython_cos(x):
return math.cos(x)
@autojit(nopython=True)
def nopython_log(x):
return math.log(x)
@autojit(nopython=True)
def nopython_exp(x):
return math.exp(x)
class Test(unittest.TestCase):
def test_nopython_abs(self):
x = -1
y = nopython_abs(x)
self.assertAlmostEqual(y, abs(x))
def test_nopython_sin(self):
x = -1
y = nopython_sin(x)
self.assertAlmostEqual(y, math.sin(x))
def test_nopython_cos(self):
x = -1
y = nopython_cos(x)
self.assertAlmostEqual(y, math.cos(x))
def test_nopython_log(self):
x = 10
y = nopython_log(x)
self.assertAlmostEqual(y, math.log(x))
def tet_nopython_exp(self):
x = 10
y = nopython_exp(x)
self.assertAlmostEqual(y, math.exp(x))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_issue_138
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import autojit, jit, double, void, int32
@autojit
def limiter(x, n):
if x + 1 > n:
return n
else:
return x + 1
@jit(void(double[:,:,:], double[:,:,:], double[:,:,:], int32, int32, int32))
def gather_convolution_core(A, B, C, x, y, z):
dimA = A.shape[0]
bx = limiter(x, dimA)
by = limiter(y, dimA)
bz = limiter(z, dimA)
for x1 in range(bx):
for x2 in range(bx):
if x1 + x2 == x:
for y1 in range(by):
for y2 in range(by):
if y1 + y2 == y:
for z1 in range(bz):
for z2 in range(bz):
if z1 + z2 == z:
C[x, y, z] += A[x1, y1, z1] * B[x2, y2, z2]
def gather_convolution(A, B, C):
from itertools import product
dimC = C.shape[0]
for x, y, z in product(range(dimC), range(dimC), range(dimC)):
gather_convolution_core(A, B, C, x, y, z)
def test_convolution():
# Creating some fake data to test this problem
s = 4
array_a = np.random.rand(s ** 3).reshape(s, s, s)
array_b = np.random.rand(s ** 3).reshape(s, s, s)
dimA = array_a.shape[0]
dimB = array_b.shape[0]
dimC = dimA + dimB
array_c = np.zeros((dimC, dimC, dimC))
gather_convolution(array_a, array_b, array_c)
if __name__ == '__main__':
test_convolution()
########NEW FILE########
__FILENAME__ = test_issue_159
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
import numpy as np
@jit(void(f8[:]))
def ff(T):
for j in range(100): #reduce 100 to 10 get no error
T[j]=1.0
x=np.ones(100,dtype=np.double)
ff(x)
assert np.all(x == 1.0)
########NEW FILE########
__FILENAME__ = test_issue_161
# -*- coding: utf-8 -*-
"""
>>> tuple_unpacking_error(2)
Traceback (most recent call last):
...
NumbaError: ...: Cannot unpack value of type int
"""
from __future__ import print_function, division, absolute_import
import numba
from numba import *
import numpy as np
@autojit(warnstyle="simple")
def tuple_unpacking_error(obj):
a, b = obj
print(a, b)
if __name__ == "__main__":
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_issue_163
# -*- coding: utf-8 -*-
# Thanks to Gaรซtan de Menten
"""
>>> test_valid_compare()
>>> invalid_compare(np.arange(10))
Traceback (most recent call last):
...
NumbaError: 27:11: Cannot determine truth value of boolean array (use any or all)
"""
import numpy as np
from numba import autojit, jit, double, b1
def array(a):
return a > 0.1
#fails too
#array_nb = jit(b1[:](double[:]))(array)
def test_valid_compare():
array_nb = autojit(array)
a = np.random.rand(1e6)
assert np.allclose(array(a), array_nb(a))
@autojit(warnstyle="simple")
def invalid_compare(a):
return 1 < a < 2
if __name__ == '__main__':
from numba.testing import test_support
test_support.testmod()
########NEW FILE########
__FILENAME__ = test_issue_164
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import jit, double
def expr_py(a, b, c):
length = len(a)
result = np.empty(length, dtype=np.float64)
for i in range(length):
result[i] = b[i] ** 2 if a[i] > 0.1 else c[i] ** 3
return result
expr_nb = jit(double[:](double[:], double[:], double[:]))(expr_py)
size = 1e6
a = np.random.rand(size)
b = np.random.rand(size)
c = np.random.rand(size)
assert np.allclose(expr_nb(a, b, c), expr_py(a, b, c))
########NEW FILE########
__FILENAME__ = test_issue_165
# -*- coding: utf-8 -*-
"""
Test using a dtype that is not an actual NumPy dtype (np.bool is the
builtin bool).
"""
# Thanks to Gaรซtan de Menten
import numpy as np
from numba import autojit, jit, double, b1
def loop(a):
length = len(a)
result = np.empty(length, dtype=np.bool)
for i in range(length):
result[i] = a[i] > 0.1
return result
def test_nondtype_dtype():
loop_nb = jit(b1[:](double[:]))(loop)
a = np.random.rand(1e6)
assert np.allclose(loop_nb(a), loop(a))
if __name__ == '__main__':
test_nondtype_dtype()
########NEW FILE########
__FILENAME__ = test_issue_169
# -*- coding: utf-8 -*-
"""
Test binding of autojit methods.
"""
from __future__ import print_function, division, absolute_import
from numba import *
class A(object):
@autojit
def a(self, arg):
return self * arg
def __mul__(self, other):
return 10 * other
assert A().a(10) == 100
########NEW FILE########
__FILENAME__ = test_issue_172
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
import numba as nb
from numba import jit, int_, void, float64
@jit
class Foo(object):
@void(int_)
def __init__(self, size):
self.arr = np.zeros(size, dtype=float)
self.type = nb.typeof(self.arr)
assert Foo(10).type == float64[:]
########NEW FILE########
__FILENAME__ = test_issue_184
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys
from numba import *
import numpy as np
import numba
@jit(object_(double[:, :]))
def func2(A):
L = []
n = A.shape[0]
for i in range(10):
for j in range(10):
temp = A[i-n : i+n+1, j-2 : j+n+1]
L.append(temp)
return L
A = np.empty(1000000, dtype=np.double).reshape(1000, 1000)
refcnt = sys.getrefcount(A)
func2(A)
new_refcnt = sys.getrefcount(A)
assert refcnt == new_refcnt
normal_result = list(map(sys.getrefcount, func2.py_func(A)))
numba_result = list(map(sys.getrefcount, func2(A)))
assert normal_result == numba_result
########NEW FILE########
__FILENAME__ = test_issue_185
# -*- coding: utf-8 -*-
# from __future__ import division, absolute_import
# Thanks to Neal Becker
import numpy as np
from numba import *
from numba.vectorize import vectorize
from math import exp, log1p
@vectorize([f8(f8,f8)])
def log_exp_sum2 (a, b):
if a >= b:
return a + (exp (-(a-b)))
else:
return b + (exp (-(b-a)))
## return max (a, b) + log1p (exp (-abs (a - b)))
#@autojit
@jit(f8[:,:] (f8[:,:]))
def log_exp_sum (u):
s = u.shape[1] # Test wraparound when implemented!
if s == 1:
return u[...,0]
elif s == 2:
return log_exp_sum2 (u[...,0], u[...,1])
else:
return log_exp_sum2 (
log_exp_sum (u[...,:s/2]),
log_exp_sum (u[...,s/2:]))
from timeit import timeit
L = 1000
N = 100
u = np.tile (np.log (np.ones (L)/L), (N, 1))
#v = log_exp_sum (u)
from timeit import timeit
print(timeit(
'log_exp_sum(u)', 'from __main__ import u, log_exp_sum', number=50))
########NEW FILE########
__FILENAME__ = test_issue_192
import unittest
from numba import jit
def loop_all(begin, end, mask):
hash = 0
for i in xrange(begin, end):
hash ^= i | ((hash << 1) & mask)
return hash
def loop_all_simpler(begin, end):
hash = 0
for i in xrange(begin, end):
hash ^= begin + hash
return hash
class TestBitwiseLoop(unittest.TestCase):
def test_loop_all_simpler(self):
fn = jit('uint32(uint32, uint32)')(loop_all_simpler)
msg = "a=%s b=%s got=%s exp=%s"
a, b = 0, 2**16 - 1
exp = fn.py_func(a, b)
got = fn(a, b)
self.assertTrue(exp == got, msg % (a, b, got, exp))
def test_loop_all(self):
fn = jit('uint32(uint32, uint32, uint32)')(loop_all)
msg = "a=%s b=%s got=%s exp=%s"
a, b = 0, 2**16 - 1
c = 0xffffffff
exp = fn.py_func(a, b, c)
got = fn(a, b, c)
self.assertTrue(exp == got, msg % (a, b, got, exp))
if __name__ == '__main__':
# TestBitwiseLoop('test_loop_all_simpler').debug()
unittest.main()
########NEW FILE########
__FILENAME__ = test_issue_198
# -*- coding: utf-8 -*-
"""
>>> f(1, 2)
(2, 1)
"""
from __future__ import print_function, division, absolute_import
import numba
from numba.testing import testmod
@numba.autojit
def f(a, b):
for i in range(1):
b, a = a, b
return a, b
testmod()
########NEW FILE########
__FILENAME__ = test_issue_204
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import autojit, jit
@autojit
def closure_modulo(a, b):
@jit('int32()')
def foo():
return a % b
return foo()
def test_closure_modulo():
assert closure_modulo(100, 48) == 4
if __name__ == '__main__':
test_closure_modulo()
########NEW FILE########
__FILENAME__ = test_issue_212
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba
import numpy
n = 50
steps = 10000
@numba.autojit()
def calc():
value = numpy.zeros((n,n))
for i in range(n):
for j in range(n):
value[i][j] += 1.0
return value
def leaky():
for i in range(steps):
value = calc()
leaky()
########NEW FILE########
__FILENAME__ = test_issue_215
# -*- coding: utf-8 -*-
"""
>>> unpack_loop()
Traceback (most recent call last):
...
NumbaError: ...: Only a single target iteration variable is supported at the moment
"""
from __future__ import print_function, division, absolute_import
import numba
from numba.testing import testmod
@numba.autojit(warnstyle="simple")
def unpack_loop():
x = [(1,2),(3,4)]
for (a, b) in x:
print(a + b)
testmod()
########NEW FILE########
__FILENAME__ = test_issue_216
# -*- coding: utf-8 -*-
"""
>>> with_stat("foo", "bar")
Traceback (most recent call last):
...
NumbaError: ...: Only 'with python' and 'with nopython' is supported at this moment
"""
from __future__ import print_function, division, absolute_import
import numba
from numba.testing import testmod
@numba.autojit(warnstyle="simple")
def with_stat(fn, msg):
with open(fn, 'w') as fp:
fp.write(msg)
testmod()
########NEW FILE########
__FILENAME__ = test_issue_249
"""
Thanks to Aron Ahmadia
"""
from __future__ import division, print_function
import sys
import math
import numba
from numba import jit, autojit, size_t
import numpy as np
import numpy.testing as npt
try:
from skimage import img_as_float
except ImportError as e:
print("skimage not available, skipping")
sys.exit()
SCALAR_DTYPE = np.float64
# This doesn't work :(
# SCALAR_TYPE = numba.typeof(SCALAR_DTYPE)
SCALAR_TYPE = numba.float64
def window_floor(idx, radius):
if radius > idx:
return 0
else:
return idx - radius
def window_ceil(idx, ceil, radius):
if idx + radius > ceil:
return ceil
else:
return idx + radius
def distance(image, r0, c0, r1, c1):
d = image[r0, c0, 0] - image[r1, c1, 0]
s = d * d
for i in range(1, 3):
d = image[r0, c0, i] - image[r1, c1, i]
s += d * d
return math.sqrt(s)
def pixel_distance(pixel1, pixel2):
d = pixel1[0] - pixel2[0]
s = d*d
for i in range(1, 3):
d = pixel1[i] - pixel2[i]
s += d*d
return math.sqrt(s)
def np_distance(pixel1, pixel2):
return np.linalg.norm(pixel1-pixel2, 2)
sqrt_3 = math.sqrt(3.0)
def g(d):
return 1.0 - d/sqrt_3
def np_g(x, y):
return 1.0 - np_distance(x, y)/sqrt_3
def kernel(image, state, state_next, window_radius):
changes = 0
height = image.shape[0]
width = image.shape[1]
for j in xrange(width):
for i in xrange(height):
winning_colony = state[i, j, 0]
defense_strength = state[i, j, 1]
for jj in xrange(window_floor(j, window_radius),
window_ceil(j+1, width, window_radius)):
for ii in xrange(window_floor(i, window_radius),
window_ceil(i+1, height, window_radius)):
if (ii == i and jj == j):
continue
d = image[i, j, 0] - image[ii, jj, 0]
s = d * d
for k in range(1, 3):
d = image[i, j, k] - image[ii, jj, k]
s += d * d
gval = 1.0 - math.sqrt(s)/sqrt_3
attack_strength = gval * state[ii, jj, 1]
if attack_strength > defense_strength:
defense_strength = attack_strength
winning_colony = state[ii, jj, 0]
changes += 1
state_next[i, j, 0] = winning_colony
state_next[i, j, 1] = defense_strength
return changes
def growcut(image, state, max_iter=20, window_size=3):
"""Grow-cut segmentation (Numba accelerated).
Parameters
----------
image : (M, N) ndarray
Input image.
state : (M, N, 2) ndarray
Initial state, which stores (foreground/background, strength) for
each pixel position or automaton. The strength represents the
certainty of the state (e.g., 1 is a hard seed value that remains
constant throughout segmentation).
max_iter : int, optional
The maximum number of automata iterations to allow. The segmentation
may complete earlier if the state no longer varies.
window_size : int, optional
Size of the neighborhood window.
Returns
-------
mask : ndarray
Segmented image. A value of zero indicates background, one foreground.
"""
image = img_as_float(image)
window_radius = (window_size - 1) // 2
changes = 1
n = 0
state_next = np.empty_like(state)
while changes > 0 and n < max_iter:
changes = 0
n += 1
changes = kernel(image, state, state_next, window_radius)
state_next, state = state, state_next
#print n, changes
print('.', end='')
print('')
return state_next[:, :, 0]
def create_numba_funcs(scalar_type=SCALAR_TYPE):
this = sys.modules[__name__]
pixel_type = scalar_type[:]
image_type = scalar_type[:, :, :]
state_type = scalar_type[:, :, :]
this._numba_window_floor = jit(nopython=True,
argtypes=[size_t, size_t],
restype=size_t)(_py_window_floor)
this._numba_window_ceil = jit(nopython=True,
argtypes=[size_t, size_t, size_t],
restype=size_t)(_py_window_ceil)
this._numba_distance = jit(nopython=True,
argtypes=[image_type,
size_t, size_t, size_t, size_t],
restype=scalar_type)(_py_distance)
this._numba_np_distance = jit(nopython=False,
argtypes=[pixel_type, pixel_type],
restype=scalar_type)(_py_np_distance)
this._numba_g = jit(nopython=True,
argtypes=[scalar_type],
restype=scalar_type)(_py_g)
this._numba_np_g = jit(nopython=False,
argtypes=[pixel_type, pixel_type],
restype=scalar_type)(_py_np_g)
this._numba_kernel = autojit(nopython=True)(_py_kernel)
# the below code does not work
# this._numba_kernel = jit(nopython=False,
# argtypes=[image_type,
# state_type,
# state_type,
# size_t],
# restype=int_,
# attack_strength=scalar_type,
# defense_strength=scalar_type,
# winning_colony=scalar_type)(_py_kernel)
def debug():
this = sys.modules[__name__]
this.window_floor = _py_window_floor
this.window_ceil = _py_window_ceil
this.distance = _py_distance
this.np_distance = _py_np_distance
this.g = _py_g
this.np_g = _py_np_g
this.kernel = _py_kernel
def optimize():
this = sys.modules[__name__]
this.window_floor = _numba_window_floor
this.window_ceil = _numba_window_ceil
this.distance = _numba_distance
this.np_distance = _numba_np_distance
this.g = _numba_g
this.np_g = _numba_np_g
this.kernel = _numba_kernel
# protected Pythonic versions of code:
_py_window_floor = window_floor
_py_window_ceil = window_ceil
_py_distance = distance
_py_np_distance = np_distance
_py_g = g
_py_np_g = np_g
_py_kernel = kernel
def test_window_floor_ceil():
assert 3 == window_floor(4, 1)
assert 0 == window_floor(1, 4)
assert 3 == window_ceil(3, 3, 1)
assert 5 == window_ceil(4, 5, 1)
def test_distance():
image = np.zeros((2, 2, 3), dtype=SCALAR_DTYPE)
image[0, 1] = [1, 1, 1]
image[1, 0] = [0.5, 0.5, 0.5]
assert 0.0 == distance(image, 0, 0, 0, 0)
assert abs(math.sqrt(3) - distance(image, 0, 0, 0, 1)) < 1e-15
assert abs(math.sqrt(3/4) - distance(image, 0, 1, 1, 0)) < 1e-15
pixel1 = np.asarray([0.0, 0.0, 0.0], dtype=SCALAR_DTYPE)
pixel2 = np.asarray([1.0, 1.0, 1.0], dtype=SCALAR_DTYPE)
pixel3 = np.asarray([0.5, 0.5, 0.5], dtype=SCALAR_DTYPE)
assert 0.0 == np_distance(pixel1, pixel1)
assert abs(math.sqrt(3) - np_distance(pixel1, pixel2)) < 1e-15
assert abs(math.sqrt(3/4) - np_distance(pixel2, pixel3)) < 1e-15
def test_g():
image = np.zeros((2, 2, 3), dtype=SCALAR_DTYPE)
image[0, 1] = [1, 1, 1]
image[1, 0] = [0.5, 0.5, 0.5]
assert 1.0 == g(distance(image, 0, 0, 0, 0))
assert abs(0 - g(distance(image, 0, 0, 0, 1))) < 1e-15
assert abs(0.5 - g(distance(image, 0, 1, 1, 0))) < 1e-15
pixel1 = np.asarray([0.0, 0.0, 0.0], dtype=SCALAR_DTYPE)
pixel2 = np.asarray([1.0, 1.0, 1.0], dtype=SCALAR_DTYPE)
pixel3 = np.asarray([0.5, 0.5, 0.5], dtype=SCALAR_DTYPE)
assert 1.0 == np_g(pixel1, pixel1)
assert abs(0 - np_g(pixel1, pixel2)) < 1e-15
assert abs(0.5 - np_g(pixel2, pixel3)) < 1e-15
def test_kernel():
image = np.zeros((3, 3, 3), dtype=SCALAR_DTYPE)
state = np.zeros((3, 3, 2), dtype=SCALAR_DTYPE)
state_next = np.empty_like(state)
# colony 1 is strength 1 at position 0,0
# colony 0 is strength 0 at all other positions
state[0, 0, 0] = 1
state[0, 0, 1] = 1
# window_size 1, colony 1 should propagate to three neighbors
changes = kernel(image, state, state_next, 1)
assert(3 == changes)
npt.assert_array_equal(state_next[0:2, 0:2], 1)
npt.assert_array_equal(state_next[2, :], 0)
npt.assert_array_equal(state_next[2, :], 0)
# window_size 1, colony 1 should propagate to entire image
changes = kernel(image, state, state_next, 2)
assert(8 == changes)
npt.assert_array_equal(state_next, 1)
def test():
test_window_floor_ceil()
test_distance()
test_g()
test_kernel()
# create numba versions of code
create_numba_funcs()
if __name__ == "__main__":
# always verify pure Python code first
test()
# then test optimized variants
optimize()
test()
# replace default function calls with numba calls
optimize()
########NEW FILE########
__FILENAME__ = test_issue_252
from numba import *
import numpy as np
@jit(object_(object_[:, :]))
def func(A):
for x in xrange(2):
for y in xrange(2):
items = A[x,y]
if items == None:
continue
for item in items:
print(item)
return A
########NEW FILE########
__FILENAME__ = test_issue_256
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import autojit
def test(a, b):
r = np.empty(3, dtype=bool)
for i in range(len(a)):
r[i] = a[i] != b[i]
return r
test_nb = autojit(test)
a = np.arange(3, dtype=complex)
b = np.arange(3, dtype=complex)
b[1] += 1j
assert np.array_equal(test(a, b), test_nb(a, b))
########NEW FILE########
__FILENAME__ = test_issue_297
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import int_, jit
import numpy
@jit(argtypes=(int_[:],))
def test1(arr):
u = 0
for x in arr:
u += len(arr)
v = 0
for y in arr:
v += len(arr)
return u + v
@jit(argtypes=(int_[:],))
def test2(arr):
s = 0
for i, x in enumerate(arr):
s += i*x
s2 = 0
for i2, x2 in enumerate(arr, 1):
s2 += i2*x2
return s+s2
if __name__ == '__main__':
arr = numpy.arange(1, 4)
assert test1(arr) == test1.py_func(arr)
assert test2(arr) == test2.py_func(arr)
########NEW FILE########
__FILENAME__ = test_issue_305
from __future__ import print_function, division, absolute_import
import os
import contextlib
from numba import jit, autojit
from numba.tests.issues import issue_305_helper1, issue_305_helper2
try:
from imp import reload
except ImportError:
pass
# Thanks to @bfredl
env = { '__file__': __file__, '__name__': __name__ ,
'jit': jit, 'autojit': autojit }
root = os.path.dirname(os.path.abspath(__file__))
fn1 = os.path.join(root, 'issue_305_helper1.py')
fn2 = os.path.join(root, 'issue_305_helper2.py')
new_source1 = """
from numba import jit, autojit
@jit('i8()')
def test():
return 1
"""
new_source2 = """
from numba import jit, autojit
@autojit
def test2():
return 1
"""
@contextlib.contextmanager
def newsource(fn, mod, reexec=True):
old_source = open(fn).read()
try:
open(fn, 'w').write(new_source1)
if reexec: reload(mod)
yield
finally:
open(fn, 'w').write(old_source)
def test_fetch_latest_source():
"""
When reloading new versions of the same module into the same session (i.e.
an interactive ipython session), numba sometimes gets the wrong version of
the source from inspect.getsource()
"""
with newsource(fn1, issue_305_helper1):
assert issue_305_helper1.test() == issue_305_helper1.test.py_func()
def test_no_auto_reload():
"""
In this case autojit 'sees' the new version of the source even if it
hasn't been reloaded. This could be fixed by fetching the ast directly
at declaration time rather that at first compilation (2nd commit)
"""
with newsource(fn2, issue_305_helper2, reexec=False):
print(issue_305_helper2.test2(), issue_305_helper2.test2.py_func())
assert issue_305_helper2.test2() == issue_305_helper2.test2.py_func()
test_fetch_latest_source()
test_no_auto_reload()
########NEW FILE########
__FILENAME__ = test_issue_309
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba
@numba.jit(numba.float32(numba.float32))
def trial0(x):
if x > 0.0:
return x * 3.0 + 5.0
else:
raise IndexError
try:
trial0(-1.0)
except IndexError:
pass
else:
raise Exception("expected indexerror")
assert trial0(2.0) == 11.0
########NEW FILE########
__FILENAME__ = test_issue_313
# -*- coding: utf-8 -*-
from numba import void, double, jit
import numpy as np
# thanks to @ufechner7
def multiassign(res0, res1, val0, val1):
res0[:], res1[:] = val0[:], val1[:]
if __name__ == "__main__":
multiassign1 = jit(void(double[:], double[:], double[:], double[:]))(multiassign)
res0 = np.zeros(2)
res1 = np.zeros(2)
val0 = np.array([0.0,0.0])
val1 = np.array([1.0,1.0])
multiassign1(res0, res1, val0, val1)
assert (res0 == val0).all()
assert (res1 == val1).all()
########NEW FILE########
__FILENAME__ = test_issue_321
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys
from numba import *
meta = None
if sys.version_info[:2] > (2, 6):
try:
import meta
except ImportError:
pass
if meta:
f = autojit(lambda x: x * x)
assert f(10) == 100
########NEW FILE########
__FILENAME__ = test_issue_50
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import jit
from numpy import zeros
import unittest
@jit()
def test():
foo = zeros((1,))
foo[0] = 0
@jit()
def test2():
foo = [0]
foo[0] = 0
class TestIssue50(unittest.TestCase):
def test_1d_arr_setitem(self):
self.assertEquals(test(), None)
def test_list_setitem(self):
self.assertEqual(test2(), None)
if __name__ == "__main__":
unittest.main()
########NEW FILE########
__FILENAME__ = test_issue_56
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
from numba.testing import test_support
import numpy
import unittest
# NOTE: See also numba.tests.ops.test_binary_ops
def maxstar1d(a, b):
M = a.shape[0]
res = numpy.empty(M)
for i in range(M):
res[i] = numpy.max(a[i], b[i]) + numpy.log1p(
numpy.exp(-numpy.abs(a[i] - b[i])))
return res
class TestIssue56(unittest.TestCase):
def test_maxstar1d(self):
test_fn = jit('f8[:](f8[:],f8[:])')(maxstar1d)
test_a = numpy.random.random(10)
test_b = numpy.random.random(10)
self.assertTrue(numpy.allclose(test_fn(test_a, test_b),
maxstar1d(test_a, test_b)))
if __name__ == "__main__":
# TestIssue56("test_maxstar1d").debug()
test_support.main()
########NEW FILE########
__FILENAME__ = test_issue_57
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys
from numba import *
from numba.testing import test_support
import numpy as np
import math
import unittest
import time
import logging
logger = logging.getLogger(__name__)
def ra_numba(doy, lat):
'''Modified from http://nbviewer.ipython.org/4117896/'''
M, N = lat.shape
ra = np.zeros_like(lat)
Gsc = 0.0820
pi = math.pi
dr = 1 + 0.033 * math.cos( 2 * pi / 365 * doy)
decl = 0.409 * math.sin( 2 * pi / 365 * doy - 1.39 )
for i in range(M):
for j in range(N):
ws = math.acos(-1 * math.tan(lat[i,j]) * math.tan(decl))
ra[i,j] = 24 * 60 / pi * Gsc * dr * (
ws * math.sin(lat[i,j]) * math.sin(decl) +
math.cos(lat[i,j]) * math.cos(decl) *
math.sin(ws)) * 11.6
return ra
def ra_numpy(doy, lat):
Gsc = 0.0820
pi = math.pi
dr = 1 + 0.033 * np.cos( 2 * pi / 365 * doy)
decl = 0.409 * np.sin( 2 * pi / 365 * doy - 1.39 )
ws = np.arccos(-np.tan(lat) * np.tan(decl))
ra = 24 * 60 / pi * Gsc * dr * (
ws * np.sin(lat) * np.sin(decl) + np.cos(lat) * np.cos(decl) *
np.sin(ws)) * 11.6
return ra
class TestIssue57(unittest.TestCase):
@test_support.skip_if((sys.platform == 'darwin' and
sys.version_info[0] >= 3),
"Skip on Darwin Py3.3 for now")
def test_ra_numba(self):
test_fn = jit('f4[:,:](i2,f4[:,:])')(ra_numba)
lat = np.deg2rad(np.ones((5, 5), dtype=np.float32) * 45.)
control_arr = ra_numpy(120, lat)
test_arr = test_fn(120, lat)
self.assertTrue(np.allclose(test_arr, control_arr),
test_arr - control_arr)
def benchmark(test_fn=None, control_fn=None):
if test_fn is None:
test_fn = jit('f4[:,:](i2,f4[:,:])')(ra_numba)
if control_fn is None:
control_fn = ra_numpy
lat = np.deg2rad(np.ones((2000, 2000), dtype=np.float32) * 45.)
t0 = time.time()
control_arr = control_fn(120, lat)
t1 = time.time()
test_arr = test_fn(120, lat)
t2 = time.time()
dt0 = t1 - t0
dt1 = t2 - t1
logger.info('Control time %0.6fs, test time %0.6fs' % (dt0, dt1))
assert np.allclose(test_arr, control_arr), (test_arr - control_arr)
return dt0, dt1
if __name__ == "__main__":
test_support.main()
########NEW FILE########
__FILENAME__ = test_issue_77
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import autojit
@autojit
def slicing_error(X, window_size, i):
return X[max(0, i - window_size):i + 1]
def test_slicing_shape():
X = np.random.normal(0, 1, (20, 2))
i = 0
gold = slicing_error.py_func(X, 10, i)
ans = slicing_error(X, 10, i)
assert gold.shape == ans.shape, (gold.shape, ans.shape)
if __name__ == '__main__':
test_slicing_shape()
########NEW FILE########
__FILENAME__ = test_potential_gcc_error
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
# This tests a potential GCC 4.1.2 miscompile of LLVM.
# The problem is observed as a error in greedy register allocation pass,
# which resulted as a segfault.
# No such problem in GCC 4.4.6.
from numba import *
import numpy as np
@jit(uint8[:,:](f8, f8, f8, f8, uint8[:,:], int32))
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
return image
########NEW FILE########
__FILENAME__ = test_allmath
# -*- coding: utf-8 -*-
"""
Test all support math functions
"""
from __future__ import print_function, division, absolute_import
import math
import cmath
import collections
from itertools import chain
import numba as nb
import numpy as np
from numpy.testing import assert_almost_equal
# ______________________________________________________________________
# Common
def run_common(mod, x):
"np, math and cmath"
y0 = mod.sin(x)
y1 = mod.cos(x)
y2 = mod.tan(x)
y3 = mod.sqrt(x)
y4 = mod.sinh(x)
y5 = mod.cosh(x)
y6 = mod.tanh(x)
y7 = mod.log(x)
y8 = mod.log10(x)
y9 = mod.exp(x)
return (y0, y1, y2, y3, y4, y5, y6, y7, y8, y9)
def run_np_math(mod, x):
"np (floating, complex) and math (floating)"
if hasattr(mod, 'expm1'):
y0 = mod.expm1(x)
else:
y0 = 0.0
y1 = mod.log1p(x)
return (y0, y1)
def run_commonf(mod, x):
"np and math"
y0 = mod.floor(x)
y1 = mod.ceil(x)
y2 = mod.hypot(x, x)
return (y0, y1, y2)
# ______________________________________________________________________
# NumPy
def run_np_arc(mod, x):
"np only"
y0 = mod.arccos(x)
y1 = mod.arcsin(x)
y2 = mod.arctan(x)
y3 = mod.arcsinh(x)
y4 = mod.arccosh(1.0/x)
return (y0, y1, y2, y3, y4)
def run_np_misc(mod, x):
"np only"
y0 = mod.log2(x)
y1 = mod.exp2(x)
y2 = mod.rint(x)
y3 = mod.power(x, x)
y4 = mod.absolute(x * -1) # TODO: USub for some types
return (y0, y1, y2, y3, y4)
# ______________________________________________________________________
# Python
def run_py_arc(mod, x):
"math and cmath"
y0 = mod.acos(x)
y1 = mod.asin(x)
y2 = mod.atan(x)
y3 = mod.asinh(x)
y4 = mod.acosh(1.0/x)
y5 = mod.atanh(x)
return (y0, y1, y2, y3, y4, y5)
def misc_floating(mod, x):
"miscellaneous"
# y0 = math.erfc(x)
y1 = math.atan2(x, x)
y2 = np.arctan2(x, x)
y3 = np.logaddexp(x, x)
y4 = np.logaddexp2(x, x)
return (y1, y2, y3, y4) #(y0, y1)
# ______________________________________________________________________
# Run tests
Suite = collections.namedtuple('Suite', ['mod', 'types'])
merge = lambda d1, d2: dict(chain(d1.items(), d2.items()))
integral = nb.short, nb.int_, nb.uint, nb.long_, nb.ulong, nb.longlong, nb.ulonglong
floating = nb.float_, nb.double, #nb.longdouble
complexes = nb.complex64, nb.complex128, #nb.complex256
fdata = { integral : 6, floating: 6.0, (nb.object_,): 6.0 }
cdata = { complexes: 6.0+4.0j }
data = merge(fdata, cdata)
arc_fdata = { floating: 0.6, (nb.object_,): 0.6 }
arc_cdata = { complexes: 0.6+0.4j }
arc_data = merge(arc_fdata, arc_cdata)
tests = {
run_common : [Suite(math, fdata), Suite(cmath, cdata)],
run_np_math : [Suite(np, data), Suite(math, fdata)],
run_commonf : [Suite(np, fdata), Suite(math, fdata)],
run_np_arc : [Suite(np, arc_data)],
run_py_arc : [Suite(math, arc_fdata), Suite(cmath, arc_cdata)],
run_np_misc : [Suite(np, data)],
# run_py_math : [Suite(math, fdata)],
misc_floating : [Suite(math, fdata)]
}
def run():
for test, suites in tests.iteritems():
for suite in suites:
for types, data in suite.types.iteritems():
for ty in types:
print("running:", test.__name__, ty)
signature = nb.object_(nb.typeof(suite.mod), ty)
jitter = nb.jit(signature)
jitted = jitter(test)
r1 = test(suite.mod, data)
r2 = jitted(suite.mod, data)
# print(r1, r2)
assert np.allclose(r1, r2)
r3 = jitted(suite.mod, data)
assert np.allclose(r1, r3)
run()
########NEW FILE########
__FILENAME__ = test_complex
#! /usr/bin/env python
# ______________________________________________________________________
'''test_complex
Test Numba's ability to generate code that supports complex numbers.
'''
# ______________________________________________________________________
import cmath
import unittest
import numba
from numba import *
from numba.decorators import jit
from numba.utils import debugout
from numba.llvm_types import _plat_bits
from numba.testing import test_support
import numpy
import itertools
# ______________________________________________________________________
def get_real_fn (in_num):
return in_num.real
def get_imag_fn (in_num):
ret_val = in_num.imag
return ret_val
def get_conj_fn (in_num):
return in_num.conjugate()
def get_complex_constant_fn ():
return (3. + 4.j).conjugate()
def prod_sum_fn (coeff, inval, ofs):
#debugout('prod_sum_fn(): coeff = ', coeff, ', inval = ', inval, ', ofs = ',
# ofs)
ret_val = (coeff * inval) + ofs
#debugout('prod_sum_fn() ==> ', ret_val)
return ret_val
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
return a / b
def floordiv(a, b):
return a // b
def sqrt(a, b):
result = a**2 + b**2
return cmath.sqrt(result) + 1.6j
def log(a, b):
result = a**2 + b**2
return cmath.log(result) + 1.6j
def log10(a, b):
result = a**2 + b**2
return cmath.log10(result) + 1.6j
def exp(a, b):
result = a**2 + b**2
return cmath.exp(result) + 1.6j
def sin(a, b):
result = a**2 + b**2
return cmath.sin(result) + 1.6j
def cos(a, b):
result = a**2 + b**2
return cmath.cos(result) + 1.6j
def atan(a, b):
result = a**2 + b**2
return cmath.atan(result) + 1.6j
def asinh(a, b):
result = a**2 + b**2
return cmath.asinh(result) + 1.6j
def cosh(a, b):
result = a**2 + b**2
return cmath.cosh(result) + 1.6j
def absolute(a, b):
result = a**2 + b**2
return abs(result) + 1.6j
def mandel(x, y, max_iters):
i = 0
z = 0.0j
for i in range(max_iters):
z = z**2 + (x + y*1j)
if abs(z**2) >= 4:
return i
return 255
# ______________________________________________________________________
m, n = 0.4 + 1.2j, 5.1 - 0.6j
class TestASTComplex(test_support.ASTTestCase):
def test_get_real_fn (self):
num0 = 3 + 2j
num1 = numpy.complex128(num0)
compiled_get_real_fn = self.jit(argtypes = [complex128])(get_real_fn)
self.assertEqual(compiled_get_real_fn(num0), 3.)
self.assertEqual(get_real_fn(num0), compiled_get_real_fn(num0))
self.assertEqual(compiled_get_real_fn(num1), 3.)
self.assertEqual(get_real_fn(num1), compiled_get_real_fn(num1))
def test_get_imag_fn (self):
num0 = 0 - 2j
num1 = numpy.complex128(num0)
compiled_get_imag_fn = self.jit(argtypes = [complex128])(get_imag_fn)
self.assertEqual(compiled_get_imag_fn(num0), -2.)
self.assertEqual(get_imag_fn(num0), compiled_get_imag_fn(num0))
self.assertEqual(compiled_get_imag_fn(num1), -2.)
self.assertEqual(get_imag_fn(num1), compiled_get_imag_fn(num1))
def test_get_conj_fn (self):
num0 = 4 - 1.5j
num1 = numpy.complex128(num0)
compiled_get_conj_fn = self.jit(argtypes = [complex128],
restype = complex128)(get_conj_fn)
self.assertEqual(compiled_get_conj_fn(num0), 4 + 1.5j)
self.assertEqual(get_conj_fn(num0), compiled_get_conj_fn(num0))
self.assertEqual(compiled_get_conj_fn(num1), 4 + 1.5j)
self.assertEqual(get_conj_fn(num1), compiled_get_conj_fn(num1))
def test_get_complex_constant_fn (self):
compiled_get_complex_constant_fn = self.jit(
argtypes = [], restype = complex128)(get_complex_constant_fn)
self.assertEqual(get_complex_constant_fn(),
compiled_get_complex_constant_fn())
def test_prod_sum_fn (self):
compiled_prod_sum_fn = self.jit(argtypes = [complex128, complex128, complex128],
restype = complex128)(prod_sum_fn)
rng = numpy.arange(-1., 1.1, 0.5)
for ar, ai, xr, xi, br, bi in itertools.product(rng, rng, rng, rng, rng,
rng):
a = numpy.complex128(ar + ai * 1j)
x = numpy.complex128(xr + xi * 1j)
b = numpy.complex128(br + bi * 1j)
self.assertEqual(prod_sum_fn(a, x, b),
compiled_prod_sum_fn(a, x, b))
def test_arithmetic_mixed(self):
m, n = 0.4 + 1.2j, 10.0
self.arithmetic(m, n)
m, n = 0.4 + 1.2j, 10
self.arithmetic(m, n)
def arithmetic(self, m, n):
self.assertAlmostEqual(self.autojit(add)(m, n), add(m, n))
self.assertAlmostEqual(self.autojit(sub)(m, n), sub(m, n))
self.assertAlmostEqual(self.autojit(mul)(m, n), mul(m, n))
self.assertAlmostEqual(self.autojit(div)(m, n), div(m, n))
if not numba.PY3:
self.assertAlmostEqual(self.autojit(floordiv)(m, n), floordiv(m, n))
def test_complex_math(self):
self.assertAlmostEqual(self.autojit(sqrt)(m, n), sqrt(m, n))
self.assertAlmostEqual(self.autojit(log)(m, n), log(m, n))
self.assertAlmostEqual(self.autojit(log10)(m, n), log10(m, n))
self.assertAlmostEqual(self.autojit(exp)(m, n), exp(m, n), places=3)
self.assertAlmostEqual(self.autojit(sin)(m, n), sin(m, n))
self.assertAlmostEqual(self.autojit(cos)(m, n), cos(m, n))
self.assertAlmostEqual(self.autojit(cosh)(m, n), cosh(m, n), places=3)
self.assertAlmostEqual(self.autojit(atan)(m, n), atan(m, n))
self.assertAlmostEqual(self.autojit(asinh)(m, n), asinh(m, n))
self.assertAlmostEqual(self.autojit(absolute)(m, n), absolute(m, n))
def test_complex_math_float_input(self):
m, n = .12, .32
self.assertAlmostEqual(self.autojit(sqrt)(m, n), sqrt(m, n))
self.assertAlmostEqual(self.autojit(log)(m, n), log(m, n))
self.assertAlmostEqual(self.autojit(log10)(m, n), log10(m, n))
self.assertAlmostEqual(self.autojit(exp)(m, n), exp(m, n))
self.assertAlmostEqual(self.autojit(sin)(m, n), sin(m, n))
self.assertAlmostEqual(self.autojit(cos)(m, n), cos(m, n))
self.assertAlmostEqual(self.autojit(cosh)(m, n), cosh(m, n))
self.assertAlmostEqual(self.autojit(atan)(m, n), atan(m, n))
self.assertAlmostEqual(self.autojit(asinh)(m, n), asinh(m, n))
self.assertAlmostEqual(self.autojit(absolute)(m, n), absolute(m, n))
def test_mandel(self):
self.assertEqual(self.autojit(mandel)(-1, -1, 20), 2)
self.assertEqual(mandel(-1, -1, 20), 2)
# ______________________________________________________________________
if __name__ == "__main__":
# m, n = .12, .32
# print autojit(add)(m, n)
# print autojit(cosh)(m, n), cosh(m, n)
# print(autojit(sqrt)(m, n))
# num0 = 0 - 2j
# num1 = numpy.complex128(num0)
# compiled_get_imag_fn = jit(argtypes = [complex128])(get_imag_fn)
# compiled_get_imag_fn(num0)
unittest.main() #verbosity=3)
# ______________________________________________________________________
# End of test_complex.py
########NEW FILE########
__FILENAME__ = test_math
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
import numpy as np
# ______________________________________________________________________
# NumPy
def array_of_type(dtype=np.double):
return np.arange(1, 10, dtype=dtype)
def expected(a):
return np.sum(np.log(a) * np.sqrt(a) - np.cos(a) * np.sin(a))
def expected2(a):
return np.sum(np.expm1(a) + np.ceil(a + 0.5) * np.rint(a + 1.5))
@autojit(backend='ast')
def numpy_math(a):
sum = 0.0
for i in range(a.shape[0]):
sum += np.log(a[i]) * np.sqrt(a[i]) - np.cos(a[i]) * np.sin(a[i])
return sum
@autojit(backend='ast')
def numpy_math2(a):
sum = 0.0
for i in range(a.shape[0]):
sum += np.expm1(a[i]) + np.ceil(a[i] + 0.5) * np.rint(a[i] + 1.5)
return sum
dtypes = np.float32, np.float64 #, np.float128
def test_numpy_math():
for dtype in dtypes:
print(dtype)
array = array_of_type(dtype)
result = numpy_math(array)
assert np.allclose(result, expected(array)), (result, expected(array))
result = numpy_math2(array)
assert np.allclose(result, expected2(array))
# ______________________________________________________________________
# Pow
@autojit(backend='ast')
def power(x, y):
return x ** y
def test_power():
assert power(5.0, 2.0) == 25.0
assert power(5, 2) == 25
# ______________________________________________________________________
# Mod
@autojit(backend='ast')
def modulo(x, y):
return x % y
def test_modulo():
for lsign in (1, -1):
for rsign in (1, -1):
float_lhs = lsign * 22.5
float_rhs = rsign * 0.2
assert np.allclose(modulo(float_lhs, float_rhs),
float_lhs % float_rhs)
int_lhs = lsign * 5
int_rhs = rsign * 2
assert modulo(int_lhs, int_rhs) == (int_lhs % int_rhs)
if __name__ == "__main__":
test_numpy_math()
test_power()
test_modulo()
########NEW FILE########
__FILENAME__ = test_nopython_math
import sys
import math
import numpy as np
import unittest
#import logging; logging.getLogger().setLevel(1)
from numba import *
def exp_fn(a):
return math.exp(a)
def sqrt_fn(a):
return math.sqrt(a)
def log_fn(a):
return math.log(a)
class TestNoPythonMath(unittest.TestCase):
def test_sqrt(self):
self._template(sqrt_fn, np.sqrt)
def test_exp(self):
self._template(exp_fn, np.exp)
def test_log(self):
self._template(log_fn, np.log)
def _template(self, func, npfunc):
func_jitted = jit(argtypes=[f4], restype=f4, nopython=True)(func)
A = np.array(np.random.random(10), dtype=np.float32)
B = np.vectorize(func_jitted)(A)
self.assertTrue(np.allclose(B, npfunc(A)))
if sys.platform == 'win32':
# NOTE: we're using the double implementation (e.g. 'log' instead of 'logf')
# class TestNoPythonMath(unittest.TestCase):
# """
# LLVM intrinsics don't work properly on Windows, and libc doesn't
# have all these functions.
# """
pass
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_numpy_math
from numba import error
import numba
from numba import *
import numpy as np
def get_functions():
def sqrt(a, b):
result = a**2 + b**2
return np.sqrt(result) + 1.6
def log(a, b):
result = a**2 + b**2
return np.log(result) + 1.6
def log10(a, b):
result = a**2 + b**2
return np.log10(result) + 1.6
def log1p(a, b):
result = a**2 + b**2
return np.log1p(result) + 1.6
def log2(a, b):
result = a**2 + b**2
return np.log2(result) + 1.6
def exp(a, b):
result = a**2 + b**2
return np.exp(result) + 1.6
def expm1(a, b):
result = a**2 + b**2
return np.expm1(result) + 1.6
def sin(a, b):
result = a**2 + b**2
return np.sin(result) + 1.6
def cos(a, b):
result = a**2 + b**2
return np.cos(result) + 1.6
def absolute(a, b):
result = a**2 + b**2
return np.abs(result) + 1.6
return locals()
dest_types = [
int_,
short,
Py_ssize_t,
float_,
double,
complex128
]
def test_math_funcs():
functions = get_functions()
exceptions = 0
for func_name in functions:
# func_name = 'sqrt'
func = functions[func_name]
for dest_type in dest_types:
signature = numba.function(None, [dest_type, dest_type])
print(("executing...", func_name, signature))
try:
numba_func = jit(signature)(func)
except error.NumbaError as e:
exceptions += 1
print((func_name, dest_type, e))
continue
x, y = 5.2, 6.9
if dest_type.is_int:
x, y = 5, 6
r1 = numba_func(x, y)
r2 = func(x, y)
assert np.allclose(r1, r2), (r1, r2, signature, func_name)
if exceptions:
raise Exception
@autojit
def sin(A):
return np.sin(A)
def test_array_math():
# A = np.arange(10)
# assert np.all(sin(A) == sin.py_func(A))
dst_types = set(dest_types)
dst_types.discard(Py_ssize_t)
functions = get_functions()
for func_name, func in functions.iteritems():
for dst_type in dst_types:
print(("array math", func_name, dst_type))
dtype = dst_type.get_dtype()
a = np.arange(1, 5, dtype=dtype)
b = np.arange(5, 9, dtype=dtype)
r1 = autojit(func)(a, b)
r2 = func(a, b)
assert np.allclose(r1, r2)
@autojit
def expm1(a, b):
print((numba.typeof(a)))
print((numba.typeof(np.expm1(a))))
# result = a**2 + b**2
# print "... :)"
# print np.expm1(result), "..."
return np.expm1(a**2) + b
@autojit
def log2(a, b):
result = a**2 + b**2
return np.log2(result) + 1.6
if __name__ == "__main__":
# dtype = np.complex128
# a = np.arange(1, 11, dtype=dtype)
# b = np.arange(5, 15, dtype=dtype)
# print expm1(a, b)
# print "run log"
# log2(10, 10)
test_math_funcs()
test_array_math()
########NEW FILE########
__FILENAME__ = test_numpy_calls
import sys
import numpy as np
from numba import *
from numba.decorators import jit, autojit
#from numba.testing import test_support
a = np.arange(80).reshape(8, 10)
@autojit(backend='ast')
def np_sum(a):
return np.sum(a, axis=0)
@autojit(backend='ast')
def np_copy(a):
return a.copy(order='F')
@autojit(backend='ast')
def attributes(a):
return (a.T,
a.T.T,
a.copy(),
np.array(a, dtype=np.double))
def test_numpy_attrs():
result = np_sum(a)
np_result = np.sum(a, axis=0)
assert np.all(result == np_result)
if np.__version__ >= '1.6':
assert np_copy(a).strides == a.copy(order='F').strides
assert all(np.all(result1 == result2)
for result1, result2 in zip(attributes(a),
attributes.py_func(a)))
if __name__ == "__main__":
test_numpy_attrs()
########NEW FILE########
__FILENAME__ = test_numpy_type_inference
import numpy as np
import numba
from numba import *
from numba import typesystem
from numba.typesystem import tuple_
#------------------------------------------------------------------------
# Test functions
#------------------------------------------------------------------------
@autojit
def array(value):
return numba.typeof(np.array(value))
@autojit
def nonzero(value):
return numba.typeof(np.nonzero(value))
@autojit
def where(value):
return numba.typeof(np.where(value))
@autojit
def where3(value, x, y):
return numba.typeof(np.where(value, x, y))
@autojit
def numba_dot(A, B):
result = np.dot(A, B)
return numba.typeof(result), result
@autojit
def numba_vdot(A, B):
result = np.vdot(A, B)
return numba.typeof(result), result
@autojit
def numba_inner(a, b):
result = np.inner(a, b)
return numba.typeof(result), result
@autojit
def numba_outer(a, b):
result = np.outer(a, b)
return numba.typeof(result), result
@autojit
def numba_tensordot(a, b, axes):
result = np.tensordot(a, b, axes)
return numba.typeof(result), result
@autojit
def numba_tensordot2(a, b):
result = np.tensordot(a, b)
return numba.typeof(result), result
@autojit
def numba_kron(a, b):
result = np.kron(a, b)
return numba.typeof(result), result
# ------------- Test sum ------------
@autojit
def sum_(a):
return numba.typeof(np.sum(a))
@autojit
def sum_axis(a, axis):
return numba.typeof(np.sum(a, axis=axis))
@autojit
def sum_dtype(a, dtype):
return numba.typeof(np.sum(a, dtype=dtype))
@autojit
def sum_out(a, out):
return numba.typeof(np.sum(a, out=out))
# ------------- Basic tests ------------
@autojit
def array_from_list():
ids = np.array([3,4,5])
ids2 = ids < 4
return ids2
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
def equals(a, b):
assert a == b, (a, b, type(a), type(b))
def test_array():
equals(array(np.array([1, 2, 3], dtype=np.double)), float64[:])
equals(array(np.array([[1, 2, 3]], dtype=np.int32)), int32[:, :])
equals(array(np.array([[1, 2, 3],
[4, 5, 6]], dtype=np.int32).T), int32[:, :])
def test_nonzero():
equals(nonzero(np.array([1, 2, 3], dtype=np.double)),
tuple_(npy_intp[:], 1))
equals(nonzero(np.array([[1, 2, 3]], dtype=np.double)),
tuple_(npy_intp[:], 2))
equals(nonzero(np.array((((1, 2, 3),),), dtype=np.double)),
tuple_(npy_intp[:], 3))
def test_where():
equals(where(np.array([1, 2, 3], dtype=np.double)),
tuple_(npy_intp[:], 1))
equals(where3(np.array([True, False, True]),
np.array([1, 2, 3], dtype=np.double),
np.array([1, 2, 3], dtype=np.complex128)),
complex128[:])
equals(where3(np.array([True, False, True]),
np.array([1, 2, 3], dtype=np.float32),
np.array([1, 2, 3], dtype=np.int64)),
float64[:])
def test_numba_dot():
A = np.array(1)
B = np.array(2)
dtype = typesystem.from_numpy_dtype(A.dtype).dtype
for i in range(1, 5):
for j in range(1, 5):
# print i, j
shape_A = (1,) * i
shape_B = (1,) * j
x = A.reshape(*shape_A)
y = B.reshape(*shape_B)
result_type, result = numba_dot(x, y)
assert result == np.dot(x, y)
if i + j - 2 > 0:
assert result.ndim == result_type.ndim
else:
assert result_type == dtype, (result_type, dtype)
def test_numba_vdot():
for a, b in ((np.array([1+2j,3+4j]),
np.array([5+6j,7+8j])),
(np.array([[1, 4], [5, 6]]),
np.array([[4, 1], [2, 2]]))):
result_type, result = numba_vdot(a, b)
assert result == np.vdot(a, b)
assert result_type == typesystem.from_numpy_dtype(a.dtype).dtype
result_type, result = numba_vdot(b, a)
assert result == np.vdot(b, a)
assert result_type == typesystem.from_numpy_dtype(b.dtype).dtype
def test_numba_inner():
# Note these tests assume that the lhs' type is the same as the
# promotion type for both arguments. They will fail if additional
# test data doesn't adhere to this policy.
for a, b in ((np.array([1,2,3]), np.array([0,1,0])),
(np.arange(24).reshape((2,3,4)), np.arange(4)),
(np.eye(2), 7)):
result_type, result = numba_inner(a, b)
if result_type.is_array:
assert (result == np.inner(a, b)).all()
assert (result_type.dtype ==
typesystem.from_numpy_dtype(result.dtype).dtype)
assert (result_type.dtype ==
typesystem.from_numpy_dtype(a.dtype).dtype)
else:
assert result == np.inner(a, b)
assert result_type == typesystem.from_numpy_dtype(a.dtype).dtype
def test_numba_outer():
for a, b in ((np.ones((5,)), np.linspace(-2, 2, 5)),
(1j * np.linspace(2, -2, 5), np.ones((5,))),
(np.array(['a', 'b', 'c'], dtype=object), np.arange(1,4)),
(np.array([1]), 1),
(np.ones((2,2,2)), np.linspace(-2, 2, 5))):
result_type, result = numba_outer(a, b)
assert (result == np.outer(a, b)).all()
assert (result_type.is_array and result_type.ndim == 2)
assert result_type.dtype == typesystem.from_numpy_dtype(a.dtype).dtype
def test_numba_tensordot():
for a, b, axes in ((np.arange(60.).reshape(3, 4, 5),
np.arange(24.).reshape(4, 3, 2), ([1,0],[0,1])),
):
result_type, result = numba_tensordot(a, b, axes)
assert (result == np.tensordot(a, b, axes)).all()
# See comments in the docstring for
# numba.type_inference.modules.numpymodule.tensordot().
assert result_type == object_
def test_numba_tensordot2():
A = np.array(1)
B = np.array(2)
dtype = typesystem.from_numpy_dtype(A.dtype).dtype
for i in range(2, 5):
for j in range(2, 5):
shape_A = (1,) * i
shape_B = (1,) * j
x = A.reshape(*shape_A)
y = B.reshape(*shape_B)
result_type, result = numba_tensordot2(x, y)
control = np.tensordot(x, y)
assert result == control
#assert result_type == numba.typeof(control)
if i + j - 4 > 0:
assert result.ndim == result_type.ndim
else:
assert result_type == dtype
def test_sum():
a = np.array([1, 2, 3], dtype=np.int32)
b = np.array([[1, 2], [3, 4]], dtype=np.int64)
equals(sum_(a), int32)
equals(sum_axis(a, 0), int32)
equals(sum_dtype(a, np.double), double)
equals(sum_out(b, a), int32[:]) # Not a valid call to sum :)
def test_basic():
assert np.all(array_from_list() == np.array([True, False, False]))
if __name__ == "__main__":
test_array()
test_nonzero()
test_where()
test_numba_dot()
test_numba_vdot()
test_numba_inner()
test_numba_outer()
test_numba_tensordot()
test_numba_tensordot2()
test_sum()
test_basic()
########NEW FILE########
__FILENAME__ = test_preloading
"""
>>> a = np.arange(10, dtype=np.double)
>>> preload_arg(a)
4.0
>>> preload_local(a)
4.0
>>> preload_phi(a)
45.0
>>> preload_phi_cycle(a)
45.0
>>> preload_phi_cycle2(a)
45.0
"""
import numpy as np
from numba import *
@autojit
def preload_arg(A):
return A[4]
@autojit
def preload_local(A):
A = A
return A[4]
@autojit
def preload_phi(A):
sum = 0.0
for i in range(10):
sum += A[i]
A = A
return sum
@autojit
def preload_phi_cycle(A):
# A_0 = A # <- propagated preload
sum = 0.0
for i in range(10):
# A_1 = phi(A_0, A_3) # <- preload
sum += A[i]
if i > 5:
A = A # A_2 # <- propagated preload
# A_3 = phi(A_1, A_2) # <- propagated preload
return sum
@autojit
def preload_phi_cycle2(A):
# A_0 = A # <- propagated preload
sum = 0.0
for i in range(10):
# A_1 = phi(A_0, A_3) # <- propagated preload
if i > 5:
A = A # A_2 # <- propagated preload
# A_3 = phi(A_1, A_2) # <- preload
sum += A[i]
return sum
if __name__ == "__main__":
a = np.arange(10, dtype=np.double)
preload_arg(a)
preload_phi(a)
# import numba
# numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_ufunc_type_inference
import numpy as np
import numba
from numba import *
from numba import typesystem
tup_t = typesystem.tuple_
#------------------------------------------------------------------------
# Test data
#------------------------------------------------------------------------
a = np.array([1, 2, 3], dtype=np.int32)
b = np.array([[1, 2], [3, 4]], dtype=np.int64)
#------------------------------------------------------------------------
# Test functions
#------------------------------------------------------------------------
# ________________ unary ufuncs _______________
# ________________ binary ufuncs _______________
@autojit
def binary_ufunc(ufunc, a, b):
return numba.typeof(ufunc(a, b))
@autojit
def binary_ufunc_dtype(ufunc, a, b, dtype):
return numba.typeof(ufunc(a, b, dtype=dtype))
@autojit
def binary_ufunc_dtype_positional(ufunc, a, b, dtype):
return numba.typeof(ufunc(a, b, dtype=dtype))
# ________________ binary ufunc methods _______________
@autojit
def add_reduce(a):
return numba.typeof(np.add.reduce(a))
@autojit
def add_reduce_axis(a, axis):
return numba.typeof(np.add.reduce(a, axis=axis))
@autojit
def accumulate(ufunc, a):
return numba.typeof(ufunc.accumulate(a))
@autojit
def accumulate_dtype(ufunc, a, dtype):
return numba.typeof(ufunc.accumulate(a, dtype=dtype))
@autojit
def reduceat(ufunc, a):
return numba.typeof(ufunc.reduceat(a))
@autojit
def reduceat_dtype(ufunc, a, dtype):
return numba.typeof(ufunc.reduceat(a, dtype=dtype))
@autojit
def outer(ufunc, a):
return numba.typeof(ufunc.outer(a, a))
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
def equals(a, b):
assert a == b, (a, b, type(a), type(b))
def test_binary_ufunc():
equals(binary_ufunc(np.add, a, b), int64[:, :])
equals(binary_ufunc(np.subtract, a, b), int64[:, :])
equals(binary_ufunc(np.multiply, a, b), int64[:, :])
equals(binary_ufunc(np.true_divide, a, b), int64[:, :])
equals(binary_ufunc(np.floor_divide, a, b), int64[:, :])
equals(binary_ufunc(np.divide, a, b), int64[:, :])
equals(binary_ufunc(np.bitwise_and, a, b), int64[:, :])
equals(binary_ufunc(np.bitwise_or, a, b), int64[:, :])
equals(binary_ufunc(np.bitwise_xor, a, b), int64[:, :])
equals(binary_ufunc(np.left_shift, a, b), int64[:, :])
equals(binary_ufunc(np.right_shift, a, b), int64[:, :])
equals(binary_ufunc(np.logical_and, a, b), bool_[:, :])
equals(binary_ufunc(np.logical_or, a, b), bool_[:, :])
equals(binary_ufunc(np.logical_xor, a, b), bool_[:, :])
equals(binary_ufunc(np.logical_not, a, b), bool_[:, :])
equals(binary_ufunc(np.greater, a, b), bool_[:, :])
equals(binary_ufunc(np.greater_equal, a, b), bool_[:, :])
equals(binary_ufunc(np.less, a, b), bool_[:, :])
equals(binary_ufunc(np.less_equal, a, b), bool_[:, :])
equals(binary_ufunc(np.not_equal, a, b), bool_[:, :])
equals(binary_ufunc(np.equal, a, b), bool_[:, :])
def test_ufunc_reduce():
equals(add_reduce(a), int32)
equals(add_reduce_axis(b, 1), int64[:])
def test_ufunc_accumulate():
equals(accumulate(np.add, a), int32[:])
equals(accumulate(np.multiply, a), int32[:])
equals(accumulate(np.bitwise_and, a), int32[:])
equals(accumulate(np.logical_and, a), bool_[:])
# Test with dtype
equals(accumulate_dtype(np.add, a, np.double), double[:])
def test_ufunc_reduceat():
equals(reduceat(np.add, a), int32[:])
equals(reduceat(np.multiply, a), int32[:])
equals(reduceat(np.bitwise_and, a), int32[:])
equals(reduceat(np.logical_and, a), bool_[:])
# Test with dtype
equals(reduceat_dtype(np.add, a, np.double), double[:])
def test_ufunc_outer():
equals(outer(np.add, a), int32[:, :])
equals(outer(np.multiply, a), int32[:, :])
equals(outer(np.bitwise_and, a), int32[:, :])
equals(outer(np.logical_and, a), bool_[:, :])
if __name__ == "__main__":
test_binary_ufunc()
test_ufunc_reduce()
test_ufunc_accumulate()
test_ufunc_reduceat()
test_ufunc_outer()
########NEW FILE########
__FILENAME__ = test_binary_ops
from numba import *
from numba.testing import test_support
import numpy
import unittest
def add(a, b): return a + b
def sub(a, b): return a - b
def mult(a, b): return a * b
def div(a, b): return a / b
def mod(a, b): return a % b
def pow_(a, b): return a ** b
def shl(a, b): return a << b
def shr(a, b): return a >> b
def bitor(a, b): return a | b
def bitxor(a, b): return a ^ b
def bitand(a, b): return a & b
def floor(a, b): return a // b
class TestBinops(unittest.TestCase):
def _handle_bitwise_op(self, fn, *args):
test_fn = jit('object_(object_,object_)')(fn)
for lhs, rhs in [(5, 2), (0xdead, 0xbeef), (-5, 2)]:
self.assertEquals(fn(lhs, rhs), test_fn(lhs, rhs))
for lhs, rhs in args:
self.assertEquals(fn(lhs, rhs), test_fn(lhs, rhs))
def _handle_binop(self, fn, *args):
return self._handle_bitwise_op(fn, (6.3, 9.2), *args)
def test_add(self): self._handle_binop(add, ('egg', 'spam'))
def test_sub(self): self._handle_binop(sub)
def test_mult(self): self._handle_binop(mult)
def test_div(self): self._handle_binop(div)
def test_mod(self): self._handle_binop(mod)
def test_pow(self): self._handle_binop(pow_)
def test_shl(self): self._handle_bitwise_op(shl)
def test_shr(self): self._handle_bitwise_op(shr)
def test_bitor(self): self._handle_bitwise_op(bitor)
def test_bitxor(self): self._handle_bitwise_op(bitxor)
def test_bitand(self): self._handle_bitwise_op(bitand)
def test_floor(self): self._handle_binop(floor)
def test_stringformat(self):
self.assertEqual(autojit(mod)("hello %s", "world"), "hello world")
if __name__ == "__main__":
test_support.main()
########NEW FILE########
__FILENAME__ = test_bitwise_ops
import sys
import numba
from numba.testing.test_support import autojit_py3doc
# NOTE: See also issues/test_issue_56
autojit_py3doc = autojit_py3doc(warn=False, warnstyle='simple')
@autojit_py3doc
def test_bitwise_and(a, b):
"""
>>> test_bitwise_and(0b01, 0b10)
0
>>> test_bitwise_and(0b01, 0b11)
1
>>> test_bitwise_and(0b01, 2.0)
Traceback (most recent call last):
...
NumbaError: 27:15: Expected an int, or object, or bool
>>> test_bitwise_and(2.0, 0b01)
Traceback (most recent call last):
...
NumbaError: 27:11: Expected an int, or object, or bool
"""
return a & b
@autojit_py3doc
def test_bitwise_or(a, b):
"""
>>> test_bitwise_or(0b00, 0b00)
0
>>> test_bitwise_or(0b00, 0b01)
1
>>> test_bitwise_or(0b10, 0b00)
2
>>> test_bitwise_or(0b01, 0b10)
3
>>> test_bitwise_or(0b01, 0b11)
3
>>> test_bitwise_or(0b01, 2.0)
Traceback (most recent call last):
...
NumbaError: 54:15: Expected an int, or object, or bool
>>> test_bitwise_or(2.0, 0b01)
Traceback (most recent call last):
...
NumbaError: 54:11: Expected an int, or object, or bool
"""
return a | b
@autojit_py3doc
def test_bitwise_xor(a, b):
"""
>>> test_bitwise_xor(0b00, 0b00)
0
>>> test_bitwise_xor(0b00, 0b01)
1
>>> test_bitwise_xor(0b10, 0b00)
2
>>> test_bitwise_xor(0b01, 0b10)
3
>>> test_bitwise_xor(0b01, 0b11)
2
>>> test_bitwise_xor(0b01, 2.0)
Traceback (most recent call last):
...
NumbaError: 82:15: Expected an int, or object, or bool
>>> test_bitwise_xor(2.0, 0b01)
Traceback (most recent call last):
...
NumbaError: 82:11: Expected an int, or object, or bool
"""
return a ^ b
@autojit_py3doc
def test_shift_left(a, b):
"""
>>> test_shift_left(5, 2)
20
>>> test_shift_left(-5, 2)
-20
"""
return a << b
@autojit_py3doc
def test_shift_right(a, b):
"""
>>> test_shift_right(20, 2)
5
>>> test_shift_right(-20, 2)
-5
"""
return a >> b
@autojit_py3doc
def test_invert(a):
"""
>>> test_invert(5)
-6
>>> test_invert(-5)
4
"""
return ~a
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_unary_ops
#! /usr/bin/env python
from numba import *
from numba.testing import test_support
import unittest
def unary_minus(x):
return -x
def unary_not(x):
return not x
def unary_not_pred(p):
if not p:
return 1
return 0
def unary_invert(x):
return ~x
class TestUnaryOps(unittest.TestCase):
def test_unary_minus(self):
test_fn = jit(argtypes=(double,), restype=double)(unary_minus)
test_val = 3.1415
self.assertEqual(test_fn(test_val), -test_val)
self.assertEqual(test_fn(test_val), unary_minus(test_val))
def test_unary_not(self):
test_fn = jit(argtypes=(bool_,), restype=bool_)(unary_not)
for test_val in True, False:
self.assertEqual(test_fn(test_val), not test_val)
self.assertEqual(test_fn(test_val), unary_not(test_val))
def test_unary_not_pred(self):
test_fn = jit(argtypes=(bool_,), restype=int_)(unary_not_pred)
for test_val in True, False:
self.assertEqual(test_fn(test_val), 0 if test_val else 1)
self.assertEqual(test_fn(test_val), unary_not(test_val))
def test_unary_invert(self):
test_fn = jit(argtypes=(int_,), restype=int_)(unary_invert)
test_val = 0x70f0f0f0
self.assertEqual(test_fn(test_val), ~test_val)
self.assertEqual(test_fn(test_val), unary_invert(test_val))
if __name__ == "__main__":
test_support.main()
########NEW FILE########
__FILENAME__ = test_null
import ctypes
import numba
from numba import *
#intp = ctypes.POINTER(ctypes.c_int)
#voidp = ctypes.c_void_p
intp = int_.pointer()
voidp = void.pointer()
@autojit
def test_compare_null():
"""
>>> test_compare_null()
True
"""
return intp(Py_uintptr_t(0)) == NULL
@autojit
def test_compare_null_attribute():
"""
>>> test_compare_null_attribute()
True
"""
return voidp(Py_uintptr_t(0)) == numba.NULL
if __name__ == '__main__':
# test_compare_null()
# test_compare_null_attribute()
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_pointers
import ctypes
import numba
from numba import *
from numba.testing.test_support import testmod, autojit_py3doc
import numpy as np
int32p = int32.pointer()
voidp = void.pointer()
@autojit_py3doc
def test_pointer_arithmetic():
"""
>>> test_pointer_arithmetic()
48
"""
p = int32p(Py_uintptr_t(0))
p = p + 10
p += 2
return Py_uintptr_t(p) # 0 + 4 * 12
@autojit_py3doc(locals={"pointer_value": Py_uintptr_t})
def test_pointer_indexing(pointer_value, type_p):
"""
>>> a = np.array([1, 2, 3, 4], dtype=np.float32)
>>> test_pointer_indexing(a.ctypes.data, float32.pointer())
(1.0, 2.0, 3.0, 4.0)
>>> a = np.array([1, 2, 3, 4], dtype=np.int64)
>>> [int(x) for x in test_pointer_indexing(a.ctypes.data, int64.pointer())]
[1, 2, 3, 4]
"""
p = type_p(pointer_value)
return p[0], p[1], p[2], p[3]
if __name__ == '__main__':
# print test_pointer_arithmetic()
# a = np.array([1, 2, 3, 4], dtype=np.float32)
# print test_pointer_indexing(a.ctypes.data, float32.pointer())
pass
testmod()
########NEW FILE########
__FILENAME__ = test_prange
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba
from numba import autojit, double
import unittest
# from unittest import FunctionTestCase as testcase
import numpy as np
tests = []
def testcase(f):
tests.append(f)
return f
#----------------------------------------------------------------------------
# Simple isolated tests
#----------------------------------------------------------------------------
@testcase
def test_simple_prange_shared():
@autojit(warn=False)
def simple_prange_shared():
"""
>>> simple_prange_shared()
20L
"""
result = np.empty(1, dtype=np.int64)
shared = 20
for i in numba.prange(1):
result[0] = shared
return result[0]
assert simple_prange_shared() == 20
@testcase
def test_simple_prange_private():
@autojit(warn=False)
def simple_prange_private():
"""
>>> simple_prange_private()
10L
"""
result = np.empty(1, dtype=np.int64)
var = 20
for i in numba.prange(1):
var = 10
result[0] = var
return result[0]
assert simple_prange_private() == 10
@testcase
def test_simple_prange_lastprivate():
@autojit(warn=False)
def simple_prange_lastprivate():
"""
>>> simple_prange_lastprivate()
10
"""
var = 20
for i in numba.prange(1):
var = 10
return var
assert simple_prange_lastprivate() == 10
@testcase
def test_simple_prange_reduction():
@autojit(warn=False)
def simple_prange_reduction():
"""
>>> simple_prange_reduction()
15
"""
var = 10
for i in numba.prange(1):
var += 5
return var
assert simple_prange_reduction() == 15
#----------------------------------------------------------------------------
# Error Tests
#----------------------------------------------------------------------------
@autojit(warn=False)
def prange_reduction_error():
"""
DISABLED.
>> prange_reduction_error()
Traceback (most recent call last):
...
NumbaError: 32:8: Local variable 'sum' is not bound yet
"""
for i in numba.prange(10):
sum += i
sum = 0.0
return sum
#----------------------------------------------------------------------------
# Advanced Tests
#----------------------------------------------------------------------------
@testcase
def test_prange_reduction2():
@autojit(warn=False)
def prange_reduction2():
"""
>>> prange_reduction2()
49999995000000.0
"""
sum = 0.0
for i in numba.prange(10000000):
sum += i
return sum
assert prange_reduction2() == 49999995000000.0
@testcase
def test_prange_reduction_and_privates():
@autojit(warn=False)
def prange_reduction_and_privates():
"""
>>> prange_reduction_and_privates()
100.0
"""
sum = 10.0
for i in numba.prange(10):
j = i * 2
sum += j
return sum
assert prange_reduction_and_privates() == 100.0
@testcase
def test_prange_lastprivate():
@autojit(warn=False)
def prange_lastprivate():
"""
>>> prange_lastprivate()
100.0
18
"""
sum = 10.0
for i in numba.prange(10):
j = i * 2
sum += j
print(sum)
return j
assert prange_lastprivate() == 18
@testcase
def test_prange_shared_privates_reductions():
@autojit(warn=False)
def prange_shared_privates_reductions(shared):
"""
>>> prange_shared_privates_reductions(2.0)
100.0
"""
sum = 10.0
for i in numba.prange(10):
j = i * shared
sum += j
shared = 3.0
return sum
assert prange_shared_privates_reductions(2.0) == 100.0
@testcase
def test_test_sum2d():
@autojit(warn=False)
def test_sum2d(A):
"""
>>> a = np.arange(100).reshape(10, 10)
>>> test_sum2d(a)
4950.0
>>> test_sum2d(a.astype(np.complex128))
(4950+0j)
>>> np.sum(a)
4950
"""
sum = 0.0
for i in numba.prange(A.shape[0]):
for j in range(A.shape[1]):
# print(i, j)
sum += A[i, j]
return sum
a = np.arange(100).reshape(10, 10)
assert test_sum2d(a) == 4950.0
assert test_sum2d(a.astype(np.complex128)) == 4950+0j
assert np.sum(a) == 4950
@testcase
def test_test_prange_in_closure():
@autojit(warn=False)
def test_prange_in_closure(x):
"""
>>> test_prange_in_closure(2.0)()
1000.0
"""
sum = 10.0
N = 10
@double()
def inner():
sum = 100.0
for i in numba.prange(N):
for j in range(N):
sum += i * x
return sum
return inner
assert test_prange_in_closure(2.0)() == 1000.0
@testcase
def test_test_prange_in_closure2():
@autojit(warn=False)
def test_prange_in_closure2(x):
"""
>>> test_prange_in_closure2(2.0)()
10000.0
"""
sum = 10.0
N = 10
@double()
def inner():
sum = 100.0
for i in numba.prange(N):
for j in range(N):
sum += (i * N + j) * x
return sum
return inner
assert test_prange_in_closure2(2.0)() == 10000.0
if __name__ == '__main__':
# unittest.main()
for test in tests:
print("running", test.__name__)
test()
########NEW FILE########
__FILENAME__ = test_prange_issue_24
from numba import autojit, jit, prange
@autojit
def prange_redux():
c = 0
a = 1
for i in prange(10):
c += a
return c
if __name__ == '__main__':
prange_redux()
########NEW FILE########
__FILENAME__ = test_prange_iteration_space
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba
from numba import autojit
import numpy as np
def test(func, expect):
N = 10
A = np.empty(N, dtype=np.float32)
A[...] = 2.5
func(A)
assert np.allclose(A, expect)
@autojit
def forward1(A):
for i in numba.prange(10):
A[i] = i
@autojit
def forward2(A):
for i in numba.prange(1, 5):
A[i] = i
@autojit
def forward3(A):
for i in numba.prange(1, 8, 3):
A[i] = i
@autojit
def backward1(A):
for i in numba.prange(9, 2, -3):
A[i] = i
@autojit
def backward2(A):
for i in numba.prange(1, 5, -1):
A[i] = i
@autojit
def empty_assign():
i = 14
for i in numba.prange(10, 4):
pass
return i
@autojit
def last_value():
for i in numba.prange(10):
pass
print("after loop", i)
return i
def run():
test(forward1,
[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
test(forward2,
[ 2.5, 1., 2., 3., 4., 2.5, 2.5, 2.5, 2.5, 2.5])
test(forward3,
[ 2.5, 1., 2.5, 2.5, 4., 2.5, 2.5, 7., 2.5, 2.5])
test(backward1,
[ 2.5, 2.5, 2.5, 3., 2.5, 2.5, 6., 2.5, 2.5, 9. ])
test(backward2,
[ 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5])
assert empty_assign() == 14
last = last_value()
print(last, 9)
if __name__ == '__main__':
run()
########NEW FILE########
__FILENAME__ = test_print
########NEW FILE########
__FILENAME__ = test_py2raise
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys
import traceback
import unittest
from numba import autojit
# ______________________________________________________________________
# Helpers
class SpecialException(Exception):
pass
def make_tb():
try:
raise SpecialException # hee hee hee
except:
type, val, tb = sys.exc_info()
return tb
# ______________________________________________________________________
@autojit
def raise1():
raise SpecialException
@autojit
def raise2():
raise SpecialException("hello")
# ______________________________________________________________________
class TestRaise(unittest.TestCase):
def _assert_raises(self, func, expected_args):
try:
func()
except SpecialException as e:
assert e.args == tuple(expected_args), (e.args, expected_args)
else:
raise AssertionError("Expected exception")
def test_raise(self):
self._assert_raises(raise1, [])
self._assert_raises(raise2, ["hello"])
if __name__ == "__main__":
unittest.main()
########NEW FILE########
__FILENAME__ = test_addressof
# -*- coding: utf-8 -*-
"""
Test numba.addressof().
"""
from __future__ import print_function, division, absolute_import
import ctypes
import numba
from numba import *
@jit(int32(int32, int32))
def func(a, b):
return a * b
@autojit
def error_func():
pass
# TODO: struct pointer support
before_computed_column = struct_([
('x', float32),
('y', float32)])
with_computed_column = struct_([
('mean', float32),
('x', float32),
('y', float32)])
signature = void(with_computed_column.ref(),
before_computed_column.ref())
# @jit(signature, nopython=True)
def cc_kernel(dst, src):
dst.mean = (src.x + src.y) / 2.0
dst.x = src.x
dst.y = src.y
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
def test_addressof(arg):
"""
>>> func = test_addressof(func)
>>> assert func.restype == ctypes.c_int32
>>> assert func.argtypes == (ctypes.c_int32, ctypes.c_int32)
>>> func(5, 2)
10
"""
return numba.addressof(arg)
def test_addressof_error(arg, **kwds):
"""
>>> test_addressof_error(error_func)
Traceback (most recent call last):
...
TypeError: Object is not a jit function
>>> test_addressof_error(func, propagate=False)
Traceback (most recent call last):
...
ValueError: Writing unraisable exceptions is not yet supported
"""
return numba.addressof(arg, **kwds)
def test_address_of_struct_function():
S1 = before_computed_column.to_ctypes()
S2 = with_computed_column.to_ctypes()
ctypes_kernel = numba.addressof(cc_kernel)
s1 = S1(10, 5)
s2 = S2(0, 0, 0)
ctypes_kernel(s2, s1)
assert s2.x == s1.x
assert s2.y == s1.y
assert s2.mean == (s1.x + s1.y) / 2.0
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_ast_arrays
#! /usr/bin/env python
# ______________________________________________________________________
'''test_forloop
Test the Numba compiler on a simple for loop over an iterable object.
'''
# ______________________________________________________________________
import unittest
import numba
from numba import *
from numba.decorators import autojit
import numpy as np
def _matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
matmulcore = autojit(backend='ast')(_matmulcore)
class TestASTArrays(unittest.TestCase):
def test_numba(self):
A = np.arange(16, dtype=np.float32).reshape(4, 4)
B = np.arange(16, dtype=np.float32).reshape(4, 4)
C = np.zeros(16, dtype=np.float32).reshape(4, 4)
Gold = C.copy()
_matmulcore(A, B, Gold) # oracle
matmulcore(A, B, C)
self.assertTrue(np.all(C == Gold))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_ast_forloop
#! /usr/bin/env python
# ______________________________________________________________________
'''test_forloop
Test the Numba compiler on a simple for loop over an iterable object.
'''
# ______________________________________________________________________
import unittest
from numba import autojit
import numpy as np
# ______________________________________________________________________
def _for_loop_fn_0 (iterable):
acc = 0.
for value in iterable:
acc += value
return acc
# ______________________________________________________________________
def _for_loop_fn_1 (start, stop, inc):
acc = 0
for value in range(start, stop, inc):
acc += value
return acc
@autojit
def for_loop_fn_1a (start, stop):
acc = 0
for value in range(start, stop):
acc += value
return acc
@autojit
def for_loop_fn_1b (stop):
acc = 0
for value in range(stop):
acc += value
return acc
# ______________________________________________________________________
def _for_loop_fn_2 (stop):
acc = 0
for value_0 in range(stop):
for value_1 in range(stop):
acc += value_0 * value_1
return acc
# ______________________________________________________________________
def _for_loop_fn_3 (stop):
acc = 0
for i in range(stop):
for j in range(stop):
for k in range(stop):
for l in range(stop):
acc += 1
return acc
for_loop_fn_0 = autojit(backend='ast')(_for_loop_fn_0)
for_loop_fn_1 = autojit(backend='ast')(_for_loop_fn_1)
for_loop_fn_2 = autojit(backend='ast')(_for_loop_fn_2)
for_loop_fn_3 = autojit(backend='ast')(_for_loop_fn_3)
# ______________________________________________________________________
class TestForLoop(unittest.TestCase):
# @unittest.skipUnless(__debug__, "Requires implementation of iteration "
# "over arrays.")
def test_compiled_for_loop_fn_0(self):
for dtype in (np.float32, np.float64, np.int32, np.int64):
test_data = np.arange(10, dtype=dtype)
result = for_loop_fn_0(test_data)
self.assertEqual(result, 45)
self.assertEqual(result, _for_loop_fn_0(test_data))
def test_compiled_for_loop_fn_1(self):
result = for_loop_fn_1(1, 4, 1)
self.assertEqual(result, 6)
self.assertEqual(result, _for_loop_fn_1(1, 4, 1))
def test_compiled_for_loop_fn_2(self):
result = for_loop_fn_2(4)
self.assertEqual(result, 36)
self.assertEqual(result, _for_loop_fn_2(4))
def test_compiled_for_loop_fn_3(self):
result = for_loop_fn_3(3)
self.assertEqual(result, _for_loop_fn_3(3))
self.assertEqual(result, 81)
def test_compiled_for_loop_fn_many(self):
for lo in xrange( -10, 11 ):
for hi in xrange( -10, 11 ):
for step in xrange( -20, 21 ):
if step:
self.assertEqual(for_loop_fn_1(lo, hi, step),
for_loop_fn_1.py_func(lo, hi, step),
'failed for %d/%d/%d' % (lo, hi, step))
self.assertEqual(for_loop_fn_1a(lo, hi),
for_loop_fn_1a.py_func(lo, hi),
'failed for %d/%d' % (lo, hi))
self.assertEqual(for_loop_fn_1b(hi),
for_loop_fn_1b.py_func(hi),
'failed for %d' % hi)
# ______________________________________________________________________
if __name__ == "__main__":
print((for_loop_fn_2(10)))
# print for_loop_fn_2(10.0)
unittest.main()
# ______________________________________________________________________
# End of test_forloop.py
########NEW FILE########
__FILENAME__ = test_ast_getattr
#! /usr/bin/env python
# ______________________________________________________________________
from numba.decorators import autojit
import numpy as np
import numpy
import unittest
# ______________________________________________________________________
def _get_ndarray_ndim(ndarr):
return ndarr.ndim
def _get_ndarray_shape(ndarr):
return ndarr.shape
def _get_ndarray_data(ndarr):
return ndarr.data
def _get_ndarray_2_shape_unpack_0(ndarr):
dim0, _ = ndarr.shape
return dim0
def _get_ndarray_2_shape_unpack_1(ndarr):
_, dim1 = ndarr.shape
return dim1
get_ndarray_ndim = autojit(backend='ast')(_get_ndarray_ndim)
get_ndarray_shape = autojit(backend='ast')(_get_ndarray_shape)
get_ndarray_data = autojit(backend='ast')(_get_ndarray_data)
get_ndarray_2_shape_unpack_0 = autojit(backend='ast')(_get_ndarray_2_shape_unpack_0)
get_ndarray_2_shape_unpack_1 = autojit(backend='ast')(_get_ndarray_2_shape_unpack_1)
# ______________________________________________________________________
class TestGetattr(unittest.TestCase):
def test_getattr_ndim(self):
args = [
np.empty((2,)),
np.empty((2, 2)),
]
for arg in args:
expect = _get_ndarray_ndim(arg)
got = get_ndarray_ndim(arg)
self.assertEqual(got, expect)
def test_getattr_shape(self):
args = [
np.empty((10,)),
np.empty((10, 20)),
]
for arg in args:
expect = _get_ndarray_shape(arg)
got = get_ndarray_shape(arg)
for i, _ in enumerate(expect):
print(i)
self.assertEqual(got[i], expect[i])
def test_getattr_shape_unpack(self):
args = [
np.empty((1, 2))
]
for arg in args:
expect_dim0 = get_ndarray_2_shape_unpack_0(arg)
expect_dim1 = get_ndarray_2_shape_unpack_1(arg)
got_dim0 = _get_ndarray_2_shape_unpack_0(arg)
got_dim1 = _get_ndarray_2_shape_unpack_1(arg)
expect = expect_dim0, expect_dim1
got = got_dim0, got_dim1
self.assertEqual(got, expect)
def test_getattr_data_1(self):
expect = [1., 2., 3.]
test_data = numpy.array([1., 2., 3.])
got = get_ndarray_data(test_data)
# this returns a buffer object
# _get_ndarray_data(test_data)
for i, _ in enumerate(expect):
self.assertEqual(got[i], expect[i])
def test_getattr_data_2(self):
expect = [float(x) for x in range(6)]
test_data = numpy.array(expect).reshape((2, 3))
got = get_ndarray_data(test_data)
for i, v in enumerate(expect):
self.assertEqual(got[i], v)
# ______________________________________________________________________
if __name__ == "__main__":
# TestGetattr('test_getattr_data_1').debug()
unittest.main()
# ______________________________________________________________________
# End of test_ast_getattr.py
########NEW FILE########
__FILENAME__ = test_autojit
"""
>>> autojit_as_arg(autojit_arg, 0.0)
10.0
>>> jit_as_arg(jit_arg, 0.0)
10.0
"""
from numba import *
@autojit(nopython=True)
def autojit_arg(result):
return result + 1
@jit(float_(float_))
def jit_arg(result):
return result + 1
@autojit(nopython=True)
def autojit_as_arg(autojit_arg, value):
result = value
for i in range(10):
result = autojit_arg(result)
return result
@autojit(nopython=True)
def jit_as_arg(jit_arg, value):
result = value
for i in range(10):
result = jit_arg(result)
return result
if __name__ == "__main__":
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_avg2d
#! /usr/bin/env python
# ______________________________________________________________________
'''Unit test for issue #30.'''
# ______________________________________________________________________
import numpy
from numba import f8
from numba.decorators import jit, autojit
from numba.testing import test_support
import unittest
import __builtin__
# ______________________________________________________________________
def avg2d(arr, result):
M, N = arr.shape
for i in range(M):
avg = 0.
for j in range(N):
avg += arr[i,j]
result[i] = avg / N
# ______________________________________________________________________
def avg2d_w_cast(arr, result):
M, N = arr.shape
for i in range(M):
avg = 0.
for j in range(N):
avg += arr[i,j]
result[i] = avg / float(N)
# ______________________________________________________________________
class TestAvg2DAST(test_support.ASTTestCase):
def _do_test(self, _avg2d, compiled_fn):
test_data = numpy.random.random((5,5))
control_result = numpy.zeros((5,))
test_result = control_result[:]
_avg2d(test_data, control_result)
compiled_fn(test_data, test_result)
self.assertTrue((control_result == test_result).all())
def test_avg2d(self):
compiled_fn = self.jit(argtypes = [f8[:,:], f8[:]])(avg2d)
self._do_test(avg2d, compiled_fn)
def test_avg2d_w_cast(self):
compiled_fn = self.jit(argtypes = [f8[:,:], f8[:]])(avg2d_w_cast)
self._do_test(avg2d_w_cast, compiled_fn)
# ______________________________________________________________________
if __name__ == "__main__":
# TestAvg2DAST('test_avg2d_w_cast').debug()
unittest.main()
# ______________________________________________________________________
# End of test_avg2d.py
########NEW FILE########
__FILENAME__ = test_const_folding
import unittest
import ast, inspect
import numpy as np
from numba import utils, decorators, environment, pipeline
from numba import typesystem
from numba import *
def cf_1():
return 1 + 2
def cf_2():
return 1 + 2 - 3 * 4 / 5 // 6 ** 7
def cf_3():
return 0xbad & 0xbeef | 0xcafe
def cf_4():
return True or False
def cf_5():
return 1 != 2 and 3 < 4 and 5 > 8 / 9
M = 1
def cf_6():
return M + 2
def cf_7():
N = 1
return N + 2
def cf_8():
N = 1
N += 2 # invalidate N
return N + 3
def cf_9():
i = 1
j = 2
k = 3 # the only constant
for i, n in range(10):
j += 0
return i + k
def cf_10():
i = j = 123
return i + j
def cf_11(M):
return M + 2
def cf_12(a):
M = a
return M + 2
class TestConstFolding(unittest.TestCase):
env = environment.NumbaEnvironment.get_environment()
def run_pipeline(self, func):
func_sig = typesystem.function(typesystem.void, [])
source = inspect.getsource(func)
astree = ast.parse(source)
with environment.TranslationContext(self.env, func, astree, func_sig):
pipeline_callable = self.env.get_or_add_pipeline(
'const_folding', pipeline.ConstFolding)
astree = pipeline.AST3to2()(astree, self.env)
ret_val = pipeline_callable(astree, self.env)
return ret_val
def iter_all(self, astree, target):
for node in ast.walk(astree):
if isinstance(node, target):
yield node
def test_cf_1(self):
astree = self.run_pipeline(cf_1)
print((utils.pformat_ast(astree)))
nums = list(self.iter_all(astree, ast.Num))
self.assertEqual(len(nums), 1)
self.assertEqual(nums[0].n, (1 + 2))
def test_cf_2(self):
astree = self.run_pipeline(cf_2)
print((utils.pformat_ast(astree)))
nums = list(self.iter_all(astree, ast.Num))
self.assertEqual(len(nums), 1)
self.assertEqual(nums[0].n, (1 + 2 - 3 * 4 / 5 // 6 ** 7))
def test_cf_3(self):
astree = self.run_pipeline(cf_3)
print((utils.pformat_ast(astree)))
nums = list(self.iter_all(astree, ast.Num))
self.assertEqual(len(nums), 1)
self.assertEqual(nums[0].n, (0xbad & 0xbeef | 0xcafe))
def test_cf_4(self):
astree = self.run_pipeline(cf_4)
print((utils.pformat_ast(astree)))
names = list(self.iter_all(astree, ast.Name))
self.assertEqual(len(names), 1)
self.assertEqual(names[0].id, 'True')
def test_cf_5(self):
astree = self.run_pipeline(cf_5)
print((utils.pformat_ast(astree)))
names = list(self.iter_all(astree, ast.Name))
self.assertEqual(len(names), 1)
self.assertEqual(names[0].id, str(1 != 2 and 3 < 4 and 5 > 8 / 9))
def test_cf_6(self):
astree = self.run_pipeline(cf_6)
print((utils.pformat_ast(astree)))
names = list(self.iter_all(astree, ast.Name))
nums = list(self.iter_all(astree, ast.Num))
self.assertEqual(len(names), 0)
self.assertEqual(len(nums), 1)
self.assertEqual(nums[0].n, (1 + 2))
def test_cf_7(self):
astree = self.run_pipeline(cf_7)
print((utils.pformat_ast(astree)))
names = list(self.iter_all(astree, ast.Name))
nums = list(self.iter_all(astree, ast.Num))
# No removal of constant assignment
self.assertEqual(len(names), 1)
self.assertEqual(len(nums), 2)
self.assertEqual(nums[1].n, (1 + 2))
def test_cf_8(self):
astree = self.run_pipeline(cf_8)
print((utils.pformat_ast(astree)))
names = list(self.iter_all(astree, ast.Name))
nums = list(self.iter_all(astree, ast.Num))
self.assertEqual(len(names), 3)
self.assertEqual(len(nums), 3)
for name in names:
self.assertEqual(name.id, 'N')
for i, num in enumerate(nums):
self.assertEqual(num.n, i + 1)
def test_cf_9(self):
astree = self.run_pipeline(cf_9)
print((utils.pformat_ast(astree)))
names = list(self.iter_all(astree, ast.Name))
nums = list(self.iter_all(astree, ast.Num))
self.assertEqual(len(names), 8)
self.assertEqual(len(nums), 6)
def test_cf_10(self):
astree = self.run_pipeline(cf_10)
print((utils.pformat_ast(astree)))
names = list(self.iter_all(astree, ast.Name))
nums = list(self.iter_all(astree, ast.Num))
self.assertEqual(len(names), 2)
self.assertEqual(len(nums), 2)
def test_cf_11(self):
astree = self.run_pipeline(cf_11)
print((utils.pformat_ast(astree)))
names = list(self.iter_all(astree, ast.Name))
nums = list(self.iter_all(astree, ast.Num))
self.assertEqual(len(names), 2)
self.assertEqual(len(nums), 1)
self.assertEqual(nums[0].n, (2))
def test_cf_12(self):
astree = self.run_pipeline(cf_12)
print((utils.pformat_ast(astree)))
names = list(self.iter_all(astree, ast.Name))
nums = list(self.iter_all(astree, ast.Num))
self.assertEqual(len(names), 4)
self.assertEqual(len(nums), 1)
self.assertEqual(nums[0].n, (2))
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_cstring
#! /usr/bin/env python
# ______________________________________________________________________
from numba import *
from numba import string_ as cstring, int_
from nose.tools import nottest
from numba.testing import test_support
# ______________________________________________________________________
def convert(input_str):
return int(input_str[0:5])
# ______________________________________________________________________
def fast_convert(input_str):
with nopython:
return int(input_str[0:5])
# ______________________________________________________________________
class TestCString(test_support.ASTTestCase):
def test_convert(self, **kws):
jit_convert = self.jit(argtypes = (cstring,), restype = int_, **kws)(
convert)
for exp in xrange(10):
test_str = str(10 ** exp)
self.assertEqual(jit_convert(test_str), convert(test_str))
def test_convert_nopython(self, **kws):
jit_convert = self.jit(argtypes = (cstring,), restype = int_, **kws)(
fast_convert)
for exp in xrange(10):
test_str = str(10 ** exp)
self.assertEqual(jit_convert(test_str), convert(test_str))
# ______________________________________________________________________
if __name__ == "__main__":
# TestCString('test_convert').debug()
test_support.main()
# ______________________________________________________________________
# End of test_cstring.py
########NEW FILE########
__FILENAME__ = test_datetime
import numpy
import numba
import numba.vectorize
from numba.vectorize import vectorize
@numba.autojit(nopython=True)
def datetime_identity(datetime):
return datetime
@numba.autojit(nopython=True)
def timedelta_identity(delta):
return delta
@numba.autojit(nopython=True)
def create_python_datetime(year, month, day, hour, min, sec):
return datetime.datetime(year, month, day, hour, min, sec)
@numba.autojit(nopython=True)
def create_numpy_datetime(datetime_str):
return numpy.datetime64(datetime_str)
@numba.autojit(nopython=True)
def create_numpy_timedelta(delta, units):
return numpy.timedelta64(delta, units)
@numba.autojit(nopython=True)
def create_python_datetime_from_string(datetime_str):
year = datetime_str[0:4]
month = datetime_str[5:7]
day = datetime_str[8:10]
hour = datetime_str[11:13]
min = datetime_str[14:16]
sec = datetime_str[18:20]
return datetime.datetime(int(year), int(month), int(day),
int(hour), int(min), int(sec))
@numba.autojit(nopython=True)
def create_numpy_datetime_from_string(datetime_str):
year = datetime_str[0:4]
month = datetime_str[5:7]
day = datetime_str[8:10]
hour = datetime_str[11:13]
min = datetime_str[14:16]
sec = datetime_str[18:20]
return numpy.datetime64('{0}-{1}-{2}T{3}:{4}:{5}Z'.format(year, month, day,
hour, min, sec))
@numba.autojit(nopython=True)
def extract_year(date):
return date.year
@numba.autojit(nopython=True)
def extract_month(date):
return date.month
@numba.autojit(nopython=True)
def extract_day(date):
return date.day
@numba.autojit(nopython=True)
def extract_hour(date):
return date.hour
@numba.autojit(nopython=True)
def extract_min(date):
return date.min
@numba.autojit(nopython=True)
def extract_sec(date):
return date.sec
@numba.autojit(nopython=True)
def datetime_delta(d0, d1):
return d1 - d0
@numba.autojit(nopython=True)
def datetime_add_timedelta(d, t):
return d + t
@numba.autojit(nopython=True)
def datetime_subtract_timedelta(d, t):
return d - t
# JNB: vectorize doesn't work for struct-like types right now
#@vectorize([numba.datetime(units='D')(numba.datetime(units='D'))])
def ufunc_inc_day(a):
return a + numpy.timedelta64(1, 'D')
@numba.jit(numba.int64(numba.string_), nopython=True)
def cast_datetime_to_int(datetime_str):
x = numpy.datetime64(datetime_str)
return x
@numba.autojit(nopython=True)
def datetime_array_index(datetimes, index):
return datetimes[index]
@numba.jit(numba.datetime(units='M')(numba.datetime(units='M')[:], numba.int_),
nopython=True)
def datetime_array_index2(datetimes, index):
return datetimes[index]
@numba.autojit(nopython=True)
def timedelta_array_index(timedeltas, index):
return timedeltas[index]
@numba.jit(numba.timedelta(units='M')(numba.timedelta(units='M')[:], numba.int_),
nopython=True)
def timedelta_array_index2(timedeltas, index):
return timedeltas[index]
def test_datetime():
datetime = numpy.datetime64('2014-01-01')
assert datetime_identity(datetime) == datetime
delta = numpy.timedelta64(1)
assert timedelta_identity(delta) == delta
datetime_str = '2014'
datetime = numpy.datetime64(datetime_str)
control = numpy.datetime64(datetime_str)
assert create_numpy_datetime(datetime_str) == control
datetime_str = '2014-01'
datetime = numpy.datetime64(datetime_str)
control = numpy.datetime64(datetime_str)
assert create_numpy_datetime(datetime_str) == control
datetime_str = '2014-01-02'
datetime = numpy.datetime64(datetime_str)
control = numpy.datetime64(datetime_str)
assert create_numpy_datetime(datetime_str) == control
if numpy.version.version[0:3] != '1.6':
datetime_str = '2014-01-02T03Z'
datetime = numpy.datetime64(datetime_str)
control = numpy.datetime64(datetime_str)
assert create_numpy_datetime(datetime_str) == control
datetime_str = '2014-01-02T03:04Z'
datetime = numpy.datetime64(datetime_str)
control = numpy.datetime64(datetime_str)
assert create_numpy_datetime(datetime_str) == control
datetime_str = '2014-01-02T03:04:05Z'
datetime = numpy.datetime64(datetime_str)
control = numpy.datetime64(datetime_str)
assert create_numpy_datetime(datetime_str) == control
# JNB: string concatenation doesn't work right now
#assert create_numpy_datetime_from_string(datetime_str) == control
if numpy.version.version[0:3] != '1.6':
control = numpy.timedelta64(2014, 'Y')
assert create_numpy_timedelta(2014, 'Y') == control
control = numpy.timedelta64(100, 'M')
assert create_numpy_timedelta(100, 'M') == control
control = numpy.timedelta64(10000, 'D')
assert create_numpy_timedelta(10000, 'D') == control
control = numpy.timedelta64(100, 'h')
assert create_numpy_timedelta(100, 'h') == control
control = numpy.timedelta64(100, 'm')
assert create_numpy_timedelta(100, 'm') == control
control = numpy.timedelta64(100, 's')
assert create_numpy_timedelta(100, 's') == control
datetime_str = '2014-01-02T03:04:05Z'
assert extract_year(numpy.datetime64(datetime_str)) == 2014
assert extract_month(numpy.datetime64(datetime_str)) == 1
assert extract_day(numpy.datetime64(datetime_str)) == 2
assert extract_hour(numpy.datetime64(datetime_str)) == 3
assert extract_min(numpy.datetime64(datetime_str)) == 4
assert extract_sec(numpy.datetime64(datetime_str)) == 5
datetime1 = numpy.datetime64('2014')
datetime2 = numpy.datetime64('2015')
control = datetime2 - datetime1
assert datetime_delta(datetime1, datetime2) == control
datetime1 = numpy.datetime64('2014-01')
datetime2 = numpy.datetime64('2015-01')
control = datetime2 - datetime1
assert datetime_delta(datetime1, datetime2) == control
datetime1 = numpy.datetime64('2014-01-01')
datetime2 = numpy.datetime64('2015-01-02')
control = datetime2 - datetime1
assert datetime_delta(datetime1, datetime2) == control
datetime1 = numpy.datetime64('2014-01-01T01Z')
datetime2 = numpy.datetime64('2015-01-04T02Z')
control = datetime2 - datetime1
assert datetime_delta(datetime1, datetime2) == control
datetime1 = numpy.datetime64('2014-01-01T01:01Z')
datetime2 = numpy.datetime64('2015-01-04T02:02Z')
control = datetime2 - datetime1
assert datetime_delta(datetime1, datetime2) == control
datetime1 = numpy.datetime64('2014-01-01T01:01:01Z')
datetime2 = numpy.datetime64('2015-01-04T02:02:02Z')
control = datetime2 - datetime1
assert datetime_delta(datetime1, datetime2) == control
datetime = numpy.datetime64('2014-01-01')
if numpy.version.version[0:3] != '1.6':
timedelta = numpy.timedelta64(1, 'D')
else:
timedelta = numpy.timedelta64(1)
control = datetime + timedelta
assert datetime_add_timedelta(datetime, timedelta) == control
datetime = numpy.datetime64('2014-01-01T01:02:03Z')
if numpy.version.version[0:3] != '1.6':
timedelta = numpy.timedelta64(-10000, 's')
else:
timedelta = numpy.timedelta64(-10000)
control = datetime + timedelta
assert datetime_add_timedelta(datetime, timedelta) == control
datetime = numpy.datetime64('2014')
if numpy.version.version[0:3] != '1.6':
timedelta = numpy.timedelta64(10, 'Y')
else:
timedelta = numpy.timedelta64(10)
control = datetime - timedelta
assert datetime_subtract_timedelta(datetime, timedelta) == control
datetime = numpy.datetime64('2014-01-01T01:02:03Z')
if numpy.version.version[0:3] != '1.6':
timedelta = numpy.timedelta64(-10000, 'm')
else:
timedelta = numpy.timedelta64(-10000)
control = datetime - timedelta
assert datetime_subtract_timedelta(datetime, timedelta) == control
datetime_str ='2014'
datetime = numpy.datetime64(datetime_str)
assert cast_datetime_to_int(datetime_str) == \
int(numpy.array(datetime, numpy.int64))
# cast datetime to number of days since epoch
datetime_str ='2014-01-01'
datetime = numpy.datetime64(datetime_str)
assert cast_datetime_to_int(datetime_str) == \
int(numpy.array(datetime, numpy.int64))
# cast datetime to number of seconds since epoch
datetime_str ='2014-01-02T03:04:05Z'
datetime = numpy.datetime64(datetime_str)
assert cast_datetime_to_int(datetime_str) == \
int(numpy.array(datetime, numpy.int64))
datetimes = numpy.array(['2014-01', '2014-02', '2014-03'],
dtype=numpy.datetime64)
assert datetime_array_index(datetimes, 0) == datetimes[0]
assert datetime_array_index2(datetimes, 1) == datetimes[1]
timedeltas = numpy.array([1, 2, 3], dtype='m8[M]')
assert timedelta_array_index(timedeltas, 0) == timedeltas[0]
assert timedelta_array_index2(timedeltas, 1) == timedeltas[1]
# JNB: vectorize doesn't work for struct-like types right now
#array = numpy.array(['2014-01-01', '2014-01-02', '2014-01-03'],
# dtype=numpy.datetime64)
#assert ufunc_inc_day(array) == numpy.array(
# ['2014-01-02', '2014-01-03', '2014-01-04'], dtype=numpy.datetime64)
if __name__ == "__main__":
test_datetime()
########NEW FILE########
__FILENAME__ = test_diffusion
import unittest
import numpy as np
from numba import autojit
mu = 0.1
Lx, Ly = 101, 101
@autojit
def diffusionObstacleStep(u,tempU,iterNum):
for n in range(iterNum):
for i in range(1, Lx - 1):
for j in range(1, Ly - 1):
u[i,j] = mu * (tempU[i+1,j]-2*tempU[i,j]+tempU[i-1,j] +
tempU[i,j+1]-2*tempU[i,j]+tempU[i,j-1])
# Bug in Meta??
# tempU, u = u, tempU
# -> Assign(targets=[Name(id='tempU', ctx=Store()),
# Name(id='u', ctx=Store())],
# value=Name(id='u', ctx=Load()))
temp = u
u = tempU
tempU = temp
def get_arrays():
u = np.zeros([Lx, Ly], dtype=np.float64)
tempU = np.zeros([Lx, Ly], dtype=np.float64)
u[Lx / 2, Ly / 2] = 1000.0
return tempU, u
def test_diffusion():
tempU, u = get_arrays()
iterNum = 10
diffusionObstacleStep(u, tempU, iterNum)
tempU_numpy, u_numpy = get_arrays()
diffusionObstacleStep.py_func(u_numpy, tempU_numpy, iterNum)
print(u)
print(u_numpy)
assert np.allclose(u, u_numpy)
if __name__ == "__main__":
test_diffusion()
########NEW FILE########
__FILENAME__ = test_dot
# issue: #33
# Thanks to Stefan van der Walt
import numpy as np
from numba.decorators import jit
from numba import float64, int32
@jit(argtypes=[float64[:, :], float64[:, :], float64[:, :]])
def ndot(A, B, out):
rows_A, cols_A = A.shape
rows_B, cols_B = B.shape
# Take each row in A
for i in range(rows_A):
# And multiply by every column in B
for j in range(cols_B):
s = 0.0
for k in range(cols_A):
s = s + A[i, k] * B[k, j]
out[i, j] = s
return out
def test_dot():
A = np.random.random((10, 10))
B = np.random.random((10, 10))
C = np.empty_like(A)
assert np.allclose(np.dot(A, B), ndot(A, B, C))
if __name__ == '__main__':
test_dot()
########NEW FILE########
__FILENAME__ = test_exceptions
"""
>>> boom()
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'boom'
>>> boom2()
Traceback (most recent call last):
...
TypeError: 'object' object is not callable
>>> boom3()
Traceback (most recent call last):
...
TypeError: 'object' object is not callable
"""
import sys
import ctypes
from numba import *
import numpy as np
@autojit(backend='ast')
def boom():
return int('boom')
@jit(int_())
def boom2():
return object()('boom')
@jit(complex128())
def boom3():
return object()('boom')
if __name__ == "__main__":
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_extern_call
#! /usr/bin/env python
# ______________________________________________________________________
'''test_extern_call
Unit tests checking on Numba's code generation for Python/Numpy C-API calls.
'''
# ______________________________________________________________________
import sys
import unittest
from numba import *
import numpy
from numba.decorators import jit, autojit
from numba.testing import test_support
# ______________________________________________________________________
def call_zeros_like(arr):
return numpy.zeros_like(arr)
# ______________________________________________________________________
def call_len(arr):
return len(arr)
@autojit(backend='ast')
def func1(arg):
return arg * 2
@autojit(backend='ast')
def func2(arg):
return func1(arg + 1)
# ______________________________________________________________________
class TestASTExternCall(test_support.ASTTestCase):
def test_call_zeros_like(self):
testarr = numpy.array([1., 2, 3, 4, 5], dtype=numpy.double)
testfn = self.jit(argtypes = [double[:]], restype = double[:])(
call_zeros_like)
print((sys.getrefcount(testarr)))
result = testfn(testarr)
print((sys.getrefcount(testarr)))
print((sys.getrefcount(result)))
self.assertTrue((result == numpy.zeros_like(testarr)).all())
def test_call_len(self):
testarr = numpy.arange(10.)
testfn = self.jit(argtypes = [double[:]], restype = long_)(
call_len)
self.assertEqual(testfn(testarr), 10)
def test_numba_calls_numba(self):
self.assertEqual(func2(3), 8)
self.assertEqual(func2(2+3j), (3+3j)*2)
# ______________________________________________________________________
if __name__ == "__main__":
# TestASTExternCall('test_call_zeros_like').debug()
unittest.main()
# ______________________________________________________________________
# End of test_extern_call.py
########NEW FILE########
__FILENAME__ = test_fbcorr
#! /usr/bin/env python
# ______________________________________________________________________
'''test_fbcorr
Test the fbcorr() example ....
'''
# ______________________________________________________________________
import numpy as np
import numba
from numba.decorators import jit
nd4type = numba.double[:,:,:,:]
import sys
import unittest
# ______________________________________________________________________
def fbcorr(imgs, filters, output):
n_imgs, n_rows, n_cols, n_channels = imgs.shape
n_filters, height, width, n_ch2 = filters.shape
for ii in range(n_imgs):
for rr in range(n_rows - height + 1):
for cc in range(n_cols - width + 1):
for hh in xrange(height):
for ww in xrange(width):
for jj in range(n_channels):
for ff in range(n_filters):
imgval = imgs[ii, rr + hh, cc + ww, jj]
filterval = filters[ff, hh, ww, jj]
output[ii, ff, rr, cc] += imgval * filterval
# ______________________________________________________________________
class TestFbcorr(unittest.TestCase):
def test_vectorized_fbcorr(self):
ufbcorr = jit(argtypes=(nd4type, nd4type, nd4type))(fbcorr)
imgs = np.random.randn(10, 16, 16, 3)
filt = np.random.randn(6, 5, 5, 3)
old_output = np.zeros((10, 6, 15, 15))
fbcorr(imgs, filt, old_output)
new_output = np.zeros((10, 6, 15, 15))
ufbcorr(imgs, filt, new_output)
self.assertTrue((abs(old_output - new_output) < 1e-9).all())
def test_catch_error(self):
imgs = np.random.randn(10, 64, 64, 3)
filt = np.random.randn(6, 5, 5, 3)
#incorrect channel-minor format?
old_output = np.zeros((10, 60, 60, 6))
try:
fbcorr(imgs, filt, old_output)
except IndexError as e:
print(('This test produced the error "' + repr(e) + '"'))
else:
raise Exception('This should have produced an error.')
# ______________________________________________________________________
if __name__ == "__main__":
unittest.main()
# ______________________________________________________________________
# End of test_filter2d.py
########NEW FILE########
__FILENAME__ = test_filter2d
#! /usr/bin/env python
# ______________________________________________________________________
'''test_filter2d
Test the filter2d() example from the PyCon'12 slide deck.
'''
# ______________________________________________________________________
import numpy
from numba import *
from numba.decorators import jit
import sys
import unittest
# ______________________________________________________________________
def filter2d(image, filt):
M, N = image.shape
Mf, Nf = filt.shape
Mf2 = Mf // 2
Nf2 = Nf // 2
result = numpy.zeros_like(image)
for i in range(Mf2, M - Mf2):
for j in range(Nf2, N - Nf2):
num = 0.0
for ii in range(Mf):
for jj in range(Nf):
num += (filt[Mf-1-ii, Nf-1-jj] * image[i-Mf2+ii, j-Nf2+jj])
result[i, j] = num
return result
# ______________________________________________________________________
class TestFilter2d(unittest.TestCase):
def test_vectorized_filter2d(self):
ufilter2d = jit(argtypes=[double[:,:], double[:,:]],
restype=double[:,:])(filter2d)
image = numpy.random.random((50, 50))
filt = numpy.random.random((5, 5))
filt /= filt.sum()
plain_old_result = filter2d(image, filt)
hot_new_result = ufilter2d(image, filt)
self.assertTrue((abs(plain_old_result - hot_new_result) < 1e-9).all())
# ______________________________________________________________________
@autojit
def func():
return numpy.empty(10)
if __name__ == "__main__":
# func()
# TestFilter2d('test_vectorized_filter2d').debug()
unittest.main(*sys.argv[1:])
# ______________________________________________________________________
# End of test_filter2d.py
########NEW FILE########
__FILENAME__ = test_forces
# issue: #68
# Thanks to tpievila
from numba import *
import numpy as np
from numpy.random import randn
from numpy import zeros, double
nple = 64
k_fene = 15.0
R0 = 2.0
R0_2 = R0*R0
sigma = 1.0
sigma2 = sigma*sigma
sigma6 = sigma2*sigma2*sigma2
sigma12 = sigma6*sigma6
dpart = 1.12246*sigma
rcut = dpart
r2cut = dpart*dpart
k_fene = 15.0
force = 5.0 # Constant bias force in ext_force
width = 0.8775 # for ext force
@autojit()
def ext_force1(x, y, extx, exty):
for i in xrange(nple):
if abs(x[i]) > rcut:
extx[i] = 0.0
elif abs(y[i]) > rcut + width:
r2dist = 1.0 / (x[i] * x[i])
r6dist = r2dist * r2dist * r2dist
extx[i] = (x[i] * 48.0 * r6dist * r2dist *
(sigma12 * r6dist - 0.5 * sigma6))
else:
ydista = y[i] - width - rcut
ydistb = y[i] + width + rcut
dist2a = x[i] * x[i] + ydista * ydista
dist2b = x[i] * x[i] + ydistb * ydistb
if dist2b < r2cut or dist2a < r2cut:
if dist2b < r2cut:
ydist = ydistb
r2dist = 1.0 / dist2b
else:
ydist = ydista
r2dist = 1.0 / dist2a
r6dist = r2dist * r2dist * r2dist
lj_factor = 48.0 * r6dist * r2dist * (sigma12 * r6dist - 0.5 * sigma6)
extx[i] = x[i] * lj_factor
exty[i] = ydist * lj_factor
if abs(x[i]) < 0.5 and abs(y[i]) < width + rcut:
extx[i] += force # constant bias force in the x-direction
return extx, exty
@autojit()
def ext_force2(x, y):
extx = object_(zeros(nple, double))
exty = object_(zeros(nple, double))
ext_force1(x, y, extx, exty)
return extx, exty
def test_forces():
extx = zeros(nple, double)
exty = zeros(nple, double)
x, y = randn(nple), randn(nple)
x1, y1 = ext_force1(x, y, extx, exty)
x2, y2 = ext_force2(x, y)
assert np.allclose(x1, x2), x2 - x1
assert np.allclose(y1, y2), y2 - y1
if __name__ == '__main__':
test_forces()
########NEW FILE########
__FILENAME__ = test_forloop
#! /usr/bin/env python
# ______________________________________________________________________
'''test_forloop
Test the Numba compiler on a simple for loop over an iterable object.
'''
# ______________________________________________________________________
from numba import *
from numba.testing import test_support
import numpy
import unittest
try:
import __builtin__ as builtins
except ImportError:
import builtins
# ______________________________________________________________________
def for_loop_fn_0 (iterable):
acc = 0.
for value in iterable:
acc += value
return acc
# ______________________________________________________________________
def for_loop_fn_1 (start, stop, inc):
acc = 0
for value in range(start, stop, inc):
acc += value
return acc
# ______________________________________________________________________
def for_loop_fn_2 (stop):
acc = 0
for value_0 in range(stop):
for value_1 in range(stop):
acc += value_0 * value_1
return acc
# ______________________________________________________________________
def for_loop_fn_3 (stop):
acc = 0
for i in range(stop):
for j in range(stop):
for k in range(stop):
for l in range(stop):
acc += 1
return acc
# ______________________________________________________________________
def for_loop_w_guard_0 (test_input):
'''Test case based on issue #25. See:
https://github.com/numba/numba/issues/25'''
acc = 0.0
for i in range(5):
if i == test_input:
acc += 100.0
return acc
# ______________________________________________________________________
def for_loop_w_guard_1 (test_input):
'''Test case based on issue #25. See:
https://github.com/numba/numba/issues/25'''
acc = 0.0
for i in range(5):
if i == test_input:
acc += 100.0
else:
acc += i
return acc
# ______________________________________________________________________
def for_loop_fn_4(i, u, p, U):
'''Test case for issue #48. See:
https://github.com/numba/numba/issues/48'''
s = 0
t = 0
for j in range(-p, p+2):
if U[i+j] == u:
t = t + 1
if u == U[i+j]:
s = s + 1
if t != s:
s = -1
return s
# ______________________________________________________________________
class TestForLoop(unittest.TestCase):
@test_support.skip_unless(hasattr(builtins, '__noskip__'),
"Requires implementation of iteration "
"over arrays.")
def test_compiled_for_loop_fn_0(self):
test_data = numpy.array([1, 2, 3], dtype = 'l')
compiled_for_loop_fn = jit(restype=f4,
argtypes = [i8[:]],backend='ast')(for_loop_fn_0)
result = compiled_for_loop_fn(test_data)
self.assertEqual(result, 6)
self.assertEqual(result, for_loop_fn_0(test_data))
def test_compiled_for_loop_fn_1(self):
compiled_for_loop_fn = jit(argtypes = [i4, i4, i4],
restype = i4, backend='ast')(for_loop_fn_1)
result = compiled_for_loop_fn(1, 4, 1)
self.assertEqual(result, 6)
self.assertEqual(result, for_loop_fn_1(1, 4, 1))
def test_compiled_for_loop_fn_2(self):
compiled_for_loop_fn = jit(argtypes = [i4],
restype = i4, backend='ast')(for_loop_fn_2)
result = compiled_for_loop_fn(4)
self.assertEqual(result, 36)
self.assertEqual(result, for_loop_fn_2(4))
def test_compiled_for_loop_fn_3(self):
compiled_for_loop_fn = jit(argtypes = [i4],
restype = i4, backend='ast')(for_loop_fn_3)
result = compiled_for_loop_fn(3)
self.assertEqual(result, for_loop_fn_3(3))
self.assertEqual(result, 81)
def test_compiled_for_loop_w_guard_0(self):
compiled_for_loop_w_guard = autojit(backend='ast')(for_loop_w_guard_0)
self.assertEqual(compiled_for_loop_w_guard(5.),
for_loop_w_guard_0(5.))
self.assertEqual(compiled_for_loop_w_guard(4.),
for_loop_w_guard_0(4.))
def test_compiled_for_loop_w_guard_1(self):
compiled_for_loop_w_guard = autojit(backend='ast')(for_loop_w_guard_1)
self.assertEqual(compiled_for_loop_w_guard(5.),
for_loop_w_guard_1(5.))
self.assertEqual(compiled_for_loop_w_guard(4.),
for_loop_w_guard_1(4.))
def test_compiled_for_loop_fn_4(self):
compiled = jit('i4(i4,f8,i4,f8[:])')(for_loop_fn_4)
args0 = 5, 1.0, 2, numpy.ones(10)
self.assertEqual(compiled(*args0), for_loop_fn_4(*args0))
args1 = 5, 1.0, 2, numpy.zeros(10)
self.assertEqual(compiled(*args1), for_loop_fn_4(*args1))
# ______________________________________________________________________
if __name__ == "__main__":
# compiled_for_loop_fn = jit(argtypes = [i4, i4, i4],
# restype = i4, backend='ast', nopython=True)(for_loop_fn_1)
# result = compiled_for_loop_fn(1, 4, 1)
# compiled_for_loop_fn = jit(argtypes = [i4],
# restype = i4, backend='ast')(for_loop_fn_3)
# result = compiled_for_loop_fn(3)
# compiled = jit('i4(i4,f8,i4,f8[:])')(for_loop_fn_4)
# args0 = 5, 1.0, 2, numpy.ones(10)
# print compiled(*args0)
# print for_loop_fn_4(*args0)
unittest.main()
# ______________________________________________________________________
# End of test_forloop.py
########NEW FILE########
__FILENAME__ = test_for_in_range
# Adapted from cython/tests/run/for_in_range.pyx
from numba.testing.test_support import *
@autojit_py3doc(warn=False)
def test_modify():
"""
>>> test_modify()
0
1
2
3
4
<BLANKLINE>
(4, 0)
"""
n = 5
for i in range(n):
print(i)
n = 0
print('')
return i,n
@autojit_py3doc(warn=False)
def test_negindex():
"""
>>> test_negindex()
6
5
4
3
2
(2, 0)
"""
n = 5
for i in range(n+1, 1, -1):
print(i)
n = 0
return i,n
@autojit_py3doc(warn=False)
def test_negindex_inferred():
"""
>>> test_negindex_inferred()
5
4
3
2
(2, 0)
"""
n = 5
for i in range(n, 1, -1):
print(i)
n = 0
return i,n
@autojit_py3doc(warn=False)
def test_fix():
"""
>>> test_fix()
0
1
2
3
4
<BLANKLINE>
4
"""
for i in range(5):
print(i)
print('')
return i
@autojit_py3doc(warn=False)
def test_break():
"""
>>> test_break()
0
1
2
<BLANKLINE>
(2, 0)
"""
n = 5
for i in range(n):
print(i)
n = 0
if i == 2:
break
else:
print("FAILED!")
print('')
return i, n
@autojit_py3doc
def test_else_clause1():
"""
>>> test_else_clause1()
0
1
2
"""
for i in range(10):
if i > 2:
break
print(i)
else:
print("else clause")
@autojit_py3doc
def test_else_clause2():
"""
>>> test_else_clause2()
0
1
2
else clause
"""
for i in range(10):
if i > 2:
continue
print(i)
else:
print("else clause")
@autojit_py3doc
def test_else_clause3():
"""
>>> test_else_clause3()
0
1
2
else clause
"""
for i in range(3):
if i > 2 and i < 2:
continue
print(i)
else:
print("else clause")
@autojit_py3doc(warn=False)
def test_else_clause4():
"""
>>> test_else_clause4()
inner 0
i 0
else clause 1 0 9
i 1
else clause 2 0 9
i 2
else clause 3 0 9
i 3
else clause 4 0 9
i 4
else clause 5 0 9
i 5
else clause 6 0 9
i 6
else clause 7 0 9
i 7
else clause 8 0 9
i 8
else clause 9 0 9
i 9
else clause
"""
for i in range(10):
for j in range(10):
for k in range(10):
if i == j and j == k:
print("inner " + str(i))
break
else:
continue
else:
print("else clause " + str(i) + ' ' + str(j) + ' ' + str(k))
break
else:
print("else clause " + str(i) + ' ' + str(j))
print("i " + str(i))
else:
print("else clause")
@autojit_py3doc
def test_return():
"""
>>> test_return()
0
1
2
(2, 0)
"""
n = 5
for i in range(n):
print(i)
n = 0
if i == 2:
return i,n
print('')
return "FAILED!"
@autojit_py3doc
def test_return2():
"""
>>> test_return2()
0
1
2
2
"""
n = 5
for i in range(n):
print(i)
n = 0
for j in range(n):
return 0
else:
if i < 2:
continue
elif i == 2:
for j in range(i):
return i
print("FAILED!")
print("FAILED!")
print("FAILED!")
return -1
#print test_negindex()
#test_else_clause2()
#test_else_clause3()
#test_else_clause4()
#test_return2()
testmod()
########NEW FILE########
__FILENAME__ = test_getattr
#! /usr/bin/env python
# ______________________________________________________________________
'''
Duplicated test for test_ast_getattr.py
'''
from numba.llvm_types import _plat_bits
from numba.decorators import jit
import numpy
import unittest
# ______________________________________________________________________
def get_ndarray_ndim(ndarr):
return ndarr.ndim
def get_ndarray_shape(ndarr):
return ndarr.shape
def get_ndarray_data(ndarr):
return ndarr.data
def get_ndarray_2_shape_unpack_0(ndarr):
dim0, _ = ndarr.shape
return dim0
def get_ndarray_2_shape_unpack_1(ndarr):
_, dim1 = ndarr.shape
return dim1
# ______________________________________________________________________
class TestGetattr(unittest.TestCase):
def test_getattr_ndim_1(self):
test_data1 = numpy.array([1., 2., 3.])
compiled_fn1 = jit('i(double[:])')(get_ndarray_ndim)
self.assertEqual(compiled_fn1(test_data1), 1)
def test_getattr_ndim_2(self):
test_data2 = numpy.array([[1., 2., 3.], [4., 5., 6.]])
compiled_fn2 = jit('i(double[:,:])')(get_ndarray_ndim)
self.assertEqual(compiled_fn2(test_data2), 2)
def test_getattr_shape_1(self):
test_data = numpy.array([1., 2., 3.])
compiled_fn = jit('i%d*(f8[:])' % (_plat_bits // 8))(get_ndarray_shape)
result = compiled_fn(test_data)
self.assertEqual(result[0], 3)
def test_getattr_shape_2(self):
test_data2 = numpy.array([[1., 2., 3.], [4., 5., 6.]])
compiled_fn2 = jit('i%d*(f8[:])' % (_plat_bits // 8))(get_ndarray_shape)
result = compiled_fn2(test_data2)
self.assertEqual(result[0], 2)
self.assertEqual(result[1], 3)
def test_getattr_shape_2_unpack(self):
compiler_fn = jit('i%d(d[:,:])' % (_plat_bits // 8))
dim0_fn, dim1_fn = (compiler_fn(fn)
for fn in (get_ndarray_2_shape_unpack_0,
get_ndarray_2_shape_unpack_1))
test_data2 = numpy.array([[1., 2., 3.], [4., 5., 6.]])
self.assertEqual(dim0_fn(test_data2), 2)
self.assertEqual(dim1_fn(test_data2), 3)
def test_getattr_data_1(self):
test_data = numpy.array([1., 2., 3.])
compiled_fn = jit('d*(d[:])')(get_ndarray_data)
result = compiled_fn(test_data)
self.assertEqual(result[0], 1.)
self.assertEqual(result[1], 2.)
self.assertEqual(result[2], 3.)
def test_getattr_data_2(self):
test_data = numpy.array([[1., 2., 3.], [4., 5., 6.]])
compiled_fn = jit('d*(d[:,:])')(get_ndarray_data)
result = compiled_fn(test_data)
self.assertEqual(result[0], 1.)
self.assertEqual(result[1], 2.)
self.assertEqual(result[2], 3.)
self.assertEqual(result[3], 4.)
self.assertEqual(result[4], 5.)
self.assertEqual(result[5], 6.)
# ______________________________________________________________________
if __name__ == "__main__":
unittest.main()
# ______________________________________________________________________
# End of test_getattr.py
########NEW FILE########
__FILENAME__ = test_globals_builtins
import unittest
from numba import *
some_global = "hello"
@autojit(backend='ast')
def access_global():
return some_global
@autojit(backend='ast')
def call_abs(num):
return abs(num)
class TestConversion(unittest.TestCase):
def test_globals(self):
result = access_global()
assert result == some_global, result
def test_builtins(self):
result = call_abs(-10)
assert result == 10, result
if __name__ == "__main__":
# TestConversion('test_globals').debug()
unittest.main()
########NEW FILE########
__FILENAME__ = test_if
#! /usr/bin/env python
# ______________________________________________________________________
'''test_if
Test phi node (or similar) generation for CFG joins beyond
if-then-else statements.
'''
# ______________________________________________________________________
from __future__ import print_function
import sys
from numba import *
import unittest
# ______________________________________________________________________
def if_fn_1(arg):
if arg > 0.:
result = 22.
else:
result = 42.
return result
def if_fn_2(i, j):
n = 5
m = 5
if j >= 1 and j < n - 1 and i >= 1 and i < m - 1:
return i + j
return 0xcafe
def if_fn_3(i, j):
n = 5
m = 5
if j >= 1:
if j < n - 1:
if i >= 1:
if i < m - 1:
return i + j
return 0xbeef
def if_fn_4(i, j):
if i < 0 or j < 0:
return i + j
return 0xdead
def if_fn_5(i, j):
if i < 0:
return i + j
if j < 0:
return i + j
return 0xdefaced
def if_fn_6(i, j):
if i < j:
return #
i += j
def if_fn_7(i):
if i:
return i + 1
return i
def if_fn_8(i, j):
if i > j:
return 1
return 0
def if_fn_9(i, j, k):
if i or (j and k):
return 1
return 0
@autojit
def if_bool(b):
if b:
return 1
else:
return 2
@autojit
def if_bool_constant_true():
if True:
return 1
else:
return 2
@autojit
def if_bool_constant_false():
if False:
return 1
else:
return 2
# ______________________________________________________________________
class TestIf(unittest.TestCase):
def test_if_fn_1(self):
if_fn_1c = jit(restype=f4, argtypes=[f4], backend='ast')(if_fn_1)
oracle = if_fn_1
self.assertEqual(if_fn_1c(-1.), if_fn_1(-1.))
self.assertEqual(if_fn_1c(1.), if_fn_1(1.))
def test_if_fn_2(self):
if_fn_2c = jit(restype=i4, argtypes=[i4, i4], backend='ast')(if_fn_2)
oracle = if_fn_2
for i in range(6):
for j in range(6):
self.assertEqual(if_fn_2c(i, j), oracle(i, j))
def test_if_fn_3(self):
if_fn_3c = jit(restype=i4, argtypes=[i4, i4], backend='ast')(if_fn_3)
oracle = if_fn_3
for i in range(6):
for j in range(6):
self.assertEqual(if_fn_3c(i, j), oracle(i, j))
def test_if_fn_4(self):
try:
if sys.version_info[:2] < (2, 7):
raise ImportError
from meta.decompiler import decompile_func
except ImportError:
print("Skipping if test, meta not available", file=sys.stderr)
else:
import ast, inspect
if_fn_4c = jit(restype=i4, argtypes=[i4, i4], backend='ast')(if_fn_4)
oracle = if_fn_4
for i in range(-3, 3):
for j in range(-3, 3):
self.assertEqual(if_fn_4c(i, j), oracle(i, j))
def test_if_fn_5(self):
if_fn_5c = jit(restype=i4, argtypes=[i4, i4], backend='ast')(if_fn_5)
oracle = if_fn_5
for i in range(-3, 3):
for j in range(-3, 3):
self.assertEqual(if_fn_5c(i, j), oracle(i, j))
def test_if_fn_6(self):
if_fn_6c = jit(restype=void, argtypes=[i4, i4], backend='ast')(if_fn_6)
def test_if_fn_7(self):
# if_fn_7c = jit(restype=i4, argtypes=[i4], backend='ast')(if_fn_7)
if_fn_7c = autojit(if_fn_7)
oracle = if_fn_7
for i in range(-3, 3):
self.assertEqual(if_fn_7c(i), oracle(i))
self.assertEqual(if_fn_7c(float(i)), oracle(float(i)))
# self.assertEqual(if_fn_7c(i+1j), oracle(i+1j))
def test_if_fn_8(self):
if_fn_5c = jit(restype=i4, argtypes=[i4, i4], backend='ast')(if_fn_8)
oracle = if_fn_8
for i in range(-3, 3):
for j in range(-3, 3):
self.assertEqual(if_fn_5c(i, j), oracle(i, j))
def test_if_fn_9(self):
if_fn_5c = jit(restype=i4, argtypes=[i4, i4, i4], backend='ast')(
if_fn_9)
oracle = if_fn_9
for i in range(-2, 2):
for j in range(-2, 2):
for k in range(-2, 2):
self.assertEqual(if_fn_5c(i, j, k), oracle(i, j, k))
def test_if_bool(self):
self.assertEqual(if_bool(True), 1)
self.assertEqual(if_bool(False), 2)
self.assertEqual(if_bool_constant_true(), 1)
self.assertEqual(if_bool_constant_false(), 2)
# ______________________________________________________________________
if __name__ == "__main__":
# if_fn_1c = jit(restype=f4, argtypes=[f4], backend='ast')(if_fn_1)
# if_fn_4c = jit(restype=i4, argtypes=[i4, i4], backend='ast')(if_fn_4)
# if_fn_5c = jit(restype=i4, argtypes=[i4, i4], backend='ast')(if_fn_5)
# if_fn_6c = jit(restype=void, argtypes=[i4, i4], backend='ast')(if_fn_6)
# if_fn_7c = jit(restype=i4, argtypes=[i4], backend='ast')(if_fn_7)
# print if_fn_7c(-2), if_fn_7(-2)
# print if_bool_constant_true()
unittest.main()
# ______________________________________________________________________
# End of test_if.py
########NEW FILE########
__FILENAME__ = test_ifexp
from itertools import product
from numba import autojit
def _make_test(f):
def test_ifexp():
f_ = autojit(f)
for args in product(range(3), range(3)):
assert f_(*args)==f(*args)
test_ifexp.__name__ = f.__name__
return test_ifexp
@_make_test
def test_as_return_value(a,b):
return a if a>b else b
@_make_test
def test_assign_and_return(a,b):
c = a if a>b else b
return c
@_make_test
def test_in_expression(a,b):
c = 5 + (a if a>b else b)/2.0
return c
@_make_test
def test_expr_as_then_clause(a,b):
return (a+1) if a>b else b
@_make_test
def test_expr_as_else_clause(a,b):
return a if a>b else (b+1)
@autojit
def _f1(a,b):
return a if a>b else b
def test_type_promotion():
assert isinstance(_f1(1, 1), (int, long))
assert isinstance(_f1(1.0, 1), float)
assert isinstance(_f1(1, 1.0), float)
if __name__ == '__main__':
test_type_promotion()
########NEW FILE########
__FILENAME__ = test_indexing
#! /usr/bin/env python
# ______________________________________________________________________
'''test_indexing
Unit tests for checking Numba's indexing into Numpy arrays.
'''
# ______________________________________________________________________
from numba import double, int_
from numba.decorators import jit
import numpy
import unittest
# ______________________________________________________________________
def get_index_fn_0 (inarr):
return inarr[1,2,3]
def set_index_fn_0 (ioarr):
ioarr[1,2,3] = 0.
def set_index_fn_1 (min_x, max_x, min_y, out_arr):
'''Thinly veiled (and simplified) version of the Mandelbrot
driver...though this is very similar to just doing a zip of
arange(min_x,max_x + epsilon,delta)[mgrid[:width,:height][0]] (and
the corresponding y values).'''
width = out_arr.shape[0]
height = out_arr.shape[1]
delta = (max_x - min_x) / width
for x in range(width):
x_val = x * delta + min_x
for y in range(height):
y_val = y * delta + min_y
out_arr[x,y,0] = x_val
out_arr[x,y,1] = y_val
def set_index_fn_2(arr):
width = arr.shape[0]
height = arr.shape[1]
for x in range(width):
for y in range(height):
arr[x, y] = x*width+y
def get_shape_fn_0 (arr):
width = arr.shape[0]
return width
def get_shape_fn_1 (arr):
height = arr.shape[1]
return height
def get_shape_fn_2 (arr):
height = arr.shape[2]
return height
# ______________________________________________________________________
class TestIndexing (unittest.TestCase):
def test_get_index_fn_0 (self):
arr = numpy.ones((4,4,4), dtype=numpy.double)
arr[1,2,3] = 0.
compiled_fn = jit(restype=double,
argtypes=[double[:, :, ::1]])(get_index_fn_0)
self.assertEqual(compiled_fn(arr), 0.)
def test_set_index_fn_0 (self):
arr = numpy.ones((4,4,4))
compiled_fn = jit(argtypes=[double[:,:,::1]])(set_index_fn_0)
self.assertEqual(arr[1,2,3], 1.)
compiled_fn(arr)
self.assertEqual(arr[1,2,3], 0.)
def test_set_index_fn_1 (self):
control_arr = numpy.zeros((50, 50, 2), dtype=numpy.double)
test_arr = numpy.zeros_like(control_arr)
set_index_fn_1(-1., 1., -1., control_arr)
argtypes = double, double, double, double[:,:,:]
compiled_fn = jit(argtypes=argtypes)(set_index_fn_1)
compiled_fn(-1., 1., -1., test_arr)
self.assertTrue((numpy.abs(control_arr - test_arr) < 1e9).all())
def test_get_shape_fn_0(self):
arr = numpy.zeros((5,6,7), dtype=numpy.double)
compiled_fn = jit(restype=int_,
argtypes=[double[:, :, ::1]])(get_shape_fn_0)
self.assertEqual(compiled_fn(arr), 5)
def test_get_shape_fn_1(self):
arr = numpy.zeros((5,6,7), dtype=numpy.double)
compiled_fn = jit(restype=int_,
argtypes=[double[:, :, ::1]])(get_shape_fn_1)
self.assertEqual(compiled_fn(arr), 6)
def test_get_shape_fn_2(self):
arr = numpy.zeros((5,6,7), dtype=numpy.double)
compiled_fn = jit(restype=int_,
argtypes=[double[:, :, ::1]])(get_shape_fn_2)
self.assertEqual(compiled_fn(arr), 7)
def test_set_index_fn_2 (self):
control_arr = numpy.zeros((10, 10), dtype=numpy.double)
test_arr = numpy.zeros_like(control_arr)
set_index_fn_2(control_arr)
argtypes = double[:, :],
compiled_fn = jit(argtypes=argtypes)(set_index_fn_2)
compiled_fn(test_arr)
self.assertTrue((numpy.abs(control_arr - test_arr) < 1e9).all())
# ______________________________________________________________________
if __name__ == "__main__":
unittest.main()
# ______________________________________________________________________
# End of test_indexing.py
########NEW FILE########
__FILENAME__ = test_intrinsic
from numba import *
from numba import declare_intrinsic, declare_instruction
def test_intrinsics():
intrin = declare_instruction(int32(int32, int32), 'srem')
assert intrin(5, 3) == 2
if __name__ == "__main__":
test_intrinsics()
########NEW FILE########
__FILENAME__ = test_issues
#! /usr/bin/env python
# ______________________________________________________________________
from numba import int32
from numba.decorators import jit
import unittest
import __builtin__
# ______________________________________________________________________
def int_pow_fn (val, exp):
return val ** exp
# ______________________________________________________________________
def _int_pow (val, exp):
x = 1
temp = val
w = exp
while w > 0:
if (w & 1) != 0:
x = x * temp
# TODO: Overflow check on x
w >>= 1
if w == 0: break
temp = temp * temp
# TODO: Overflow check on temp
return x
# ______________________________________________________________________
def bad_return_fn (arg0, arg1):
arg0 + arg1
# ______________________________________________________________________
class TestIssues (unittest.TestCase):
def test_int_pow_fn (self):
compiled_fn = jit(argtypes = (int32, int32), restype = int32)(
int_pow_fn)
self.assertEqual(compiled_fn(2, 3), 8)
self.assertEqual(compiled_fn(3, 4), int_pow_fn(3, 4))
def test_bad_return_fn (self):
jit(argtypes = (int32, int32), restype = int32)(bad_return_fn)(0, 0)
# ______________________________________________________________________
if __name__ == '__main__':
unittest.main()
# ______________________________________________________________________
# End of test_issues.py
########NEW FILE########
__FILENAME__ = test_listcomp
# Based on cython/tests/run/listcomp.pyx
from numba import *
from numba.testing.test_support import testmod, autojit_py3doc
@autojit_py3doc
def smoketest():
"""
>>> smoketest()
([0, 4, 8], 4)
"""
x = -10 # 'abc'
result = [x*2 for x in range(5) if x % 2 == 0]
# assert x != -10 # 'abc'
return result, x
@autojit_py3doc(warnstyle="simple")
def list_genexp():
"""
>>> list_genexp()
Traceback (most recent call last):
...
NumbaError: ...: Generator comprehensions are not yet supported
"""
x = -10 # 'abc'
result = list(x*2 for x in range(5) if x % 2 == 0)
# assert x == 'abc'
return result, x
@autojit_py3doc(locals={"x": int_})
def int_runvar():
"""
>>> int_runvar()
[0, 4, 8]
"""
print([x*2 for x in range(5) if x % 2 == 0])
#@jit
#class A(object):
# @object_()
# def __repr__(self):
# return u"A"
#@autojit
#def typed():
# """
# >>> typed()
# [A, A, A]
# """
# cdef A obj
# print [obj for obj in [A(), A(), A()]]
#@autojit
#def iterdict():
# """
# >>> iterdict()
# [1, 2, 3]
# """
# d = dict(a=1,b=2,c=3)
# l = [d[key] for key in d]
# l.sort()
# print l
@autojit_py3doc
def nested_result():
"""
>>> nested_result()
[[], [-1], [-1, 0], [-1, 0, 1]]
"""
result = [[a-1 for a in range(b)] for b in range(4)]
return result
# TODO: object and string iteration
#@autojit_py3doc
#def listcomp_as_condition(sequence):
# """
# >>> listcomp_as_condition(['a', 'b', '+'])
# True
# >>> listcomp_as_condition('ab+')
# True
# >>> listcomp_as_condition('abc')
# False
# """
# if [1 for c in sequence if c in '+-*/<=>!%&|([^~,']:
# return True
# return False
#@autojit_py3doc
#def sorted_listcomp(sequence):
# """
# >>> sorted_listcomp([3,2,4])
# [3, 4, 5]
# """
# return sorted([ n+1 for n in sequence ])
@autojit_py3doc
def listcomp_const_condition_false():
"""
>>> listcomp_const_condition_false()
[]
"""
return [x*2 for x in range(3) if 0]
@autojit_py3doc
def listcomp_const_condition_true():
"""
>>> listcomp_const_condition_true()
[0, 2, 4]
"""
return [x*2 for x in range(3) if 1]
if __name__ == '__main__':
# print test_pointer_arithmetic()
# a = np.array([1, 2, 3, 4], dtype=np.float32)
# print test_pointer_indexing(a.ctypes.data, float32.pointer())
pass
#int_runvar()
#smoketest()
#list_genexp()
#test_listcomp()
# smoketest()
testmod()
########NEW FILE########
__FILENAME__ = test_llarray
import ctypes
import unittest
from collections import namedtuple
from functools import partial
import numba
from numba import *
from numba import ndarray_helpers
import llvm.core as lc
# ______________________________________________________________________
ArrayType = numba.struct([('data', double.pointer()),
('shape', int64.pointer()),
('strides', int64.pointer())])
Int32 = lc.Type.int(32)
const = partial(lc.Constant.int, Int32)
zero = const(0)
one = const(1)
two = const(2)
def ptr_at(builder, ptr, idx):
return builder.gep(ptr, [const(idx)])
def load_at(builder, ptr, idx):
return builder.load(ptr_at(builder, ptr, idx))
def store_at(builder, ptr, idx, val):
builder.store(val, ptr_at(builder, ptr, idx))
class MyArray(object):
"""
Internal array class for a double(:, 10) array.
"""
def __init__(self, array_ptr, builder):
self.array_ptr = array_ptr
self.builder = builder
self.nd = 2
@classmethod
def from_type(cls, llvm_dtype):
return ArrayType.pointer().to_llvm()
@property
def data(self):
dptr = self.builder.gep(self.array_ptr, [zero, zero])
return self.builder.load(dptr)
@property
def shape_ptr(self):
result = self.builder.gep(self.array_ptr, [zero, one])
result = self.builder.load(result)
return result
@property
def strides_ptr(self):
result = self.builder.gep(self.array_ptr, [zero, two])
return self.builder.load(result)
@property
def shape(self):
return self.preload(self.shape_ptr, self.nd)
@property
def strides(self):
return self.preload(self.strides_ptr, self.nd)
@property
def ndim(self):
return const(self.nd)
@property
def itemsize(self):
raise NotImplementedError
def preload(self, ptr, count=None):
assert count is not None
return [load_at(self.builder, ptr, i) for i in range(count)]
def getptr(self, *indices):
const = partial(lc.Constant.int, indices[0].type)
offset = self.builder.add(
self.builder.mul(indices[0], const(10)), indices[1])
data_ptr_ty = lc.Type.pointer(lc.Type.double())
dptr_plus_offset = self.builder.gep(self.data, [offset])
return self.builder.bitcast(dptr_plus_offset, data_ptr_ty)
ndarray_helpers.Array.register(MyArray)
# ______________________________________________________________________
# Test functions
CtypesArray = ArrayType.to_ctypes()
@jit(void(double[:, :]), array=MyArray, wrap=False, nopython=True)
def use_array(A):
"""simple test function"""
for i in range(A.shape[0]):
for j in range(A.shape[1]):
A[i, j] = i * A.shape[1] + j
@jit(object_(double[:, :]), array=MyArray, wrap=False)
def get_attributes(A):
return A.shape[0], A.shape[1], A.strides[0], A.strides[1]
# ______________________________________________________________________
# Ctypes functions
c_use_array = numba.addressof(use_array)
c_get_attributes = numba.addressof(get_attributes)
c_use_array.argtypes = [ctypes.POINTER(CtypesArray)]
c_get_attributes.argtypes = [ctypes.POINTER(CtypesArray)]
# ______________________________________________________________________
# Utils
Array = namedtuple('Array', ['handle', 'array', 'data', 'shape', 'strides'])
def make_array():
"""Make a double[*, 10] ctypes-allocated array"""
empty = lambda c_type, args: ctypes.cast(
(c_type * len(args))(*args), ctypes.POINTER(c_type))
data = empty(ctypes.c_double, [0] * 50)
shape = empty(ctypes.c_int64, [5, 10])
strides = empty(ctypes.c_int64, [10 * 8, 8]) # doubles!
array = CtypesArray(data, shape, strides)
return Array(ctypes.pointer(array), array, data, shape, strides)
# ______________________________________________________________________
# Tests...
class TestArray(unittest.TestCase):
def test_indexing(self):
arr = make_array()
c_use_array(arr.handle)
for i in range(50):
assert arr.data[i] == float(i), (arr.data[i], i)
def test_attributes(self):
arr = make_array()
result = c_get_attributes(arr.handle)
assert result == (5, 10, 80, 8), result
if __name__ == "__main__":
# TestArray('test_attributes').debug()
# TestArray('test_indexing').debug()
unittest.main()
########NEW FILE########
__FILENAME__ = test_locals_override
import os
from numba import *
from numba import error
@autojit(backend='ast', locals=dict(value=double))
def locals_override(obj):
value = obj.method()
with nopython:
return value * value
class Class(object):
def method(self):
return 20.0
def test_locals_override():
assert locals_override(Class()) == 400.0
if __name__ == "__main__":
test_locals_override()
########NEW FILE########
__FILENAME__ = test_mandelbrot
"""
>>> image = np.zeros((50, 75), dtype=np.uint8)
>>> numpy_image = image.copy()
>>> image = create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
>>> numpy_image = create_fractal.py_func(-2.0, 1.0, -1.0, 1.0, numpy_image, 20)
>>> assert np.allclose(image, numpy_image)
"""
from numba import *
import numpy as np
@autojit(nopython=True)
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return 255
@autojit
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
with nopython:
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
return image
if __name__ == "__main__":
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_mandelbrot_2
#! /usr/bin/env python
'''test_mandelbrot_2
Test the Numba compiler on several variants of Mandelbrot set membership
computations.
'''
from numba import *
import unittest
import numpy as np
from numba.testing import test_support
def mandel_1(real_coord, imag_coord, max_iters):
'''Given a the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
Inspired by code at http://wiki.cython.org/examples/mandelbrot
'''
# Ideally we'd want to use a for loop, but we'll need to be able
# to detect and desugar for loops over range/xrange/arange first.
i = 0
z_real = 0.
z_imag = 0.
while i < max_iters:
z_real_n = z_real * z_real - z_imag * z_imag + real_coord
z_imag = 2. * z_real * z_imag + imag_coord
z_real = z_real_n
if (z_real * z_real + z_imag * z_imag) >= 4:
return i
i += 1
return -1
mandel_1c = jit('i4(f8,f8,i4)')(mandel_1)
def mandel_driver_1(min_x, max_x, min_y, nb_iterations, colors, image):
nb_colors = len(colors)
width = image.shape[0]
height = image.shape[1]
pixel_size = (max_x - min_x) / width
for x in range(width):
real = min_x + x * pixel_size
for y in range(height):
imag = min_y + y * pixel_size
# For the following to actually compile, mandel_1 must
# have already been compiled.
color = mandel_1(real, imag, nb_iterations)
# Would prefer the following, just to show off:
# image[x, y, :] = colors[color % nb_colors]
# But that'd require Numba to handle slicing (it doesn't
# at the time this version was writen), and it wouldn't
# have the type information about the shape.
col_index = color % nb_colors # Ohh for wont of CSE...
image[x, y, 0] = colors[col_index, 0]
image[x, y, 1] = colors[col_index, 1]
image[x, y, 2] = colors[col_index, 2]
mandel_driver_1c = jit('void(f8,f8,f8,i4,u1[:,:],u1[:,:,:])')(
mandel_driver_1)
def make_palette():
'''Shamefully stolen from
http://wiki.cython.org/examples/mandelbrot, though we did correct
their spelling mistakes (*smirk*).'''
colors = []
for i in range(0, 25):
colors.append( (i*10, i*8, 50 + i*8), )
for i in range(25, 5, -1):
colors.append( (50 + i*8, 150+i*2, i*10), )
for i in range(10, 2, -1):
colors.append( (0, i*15, 48), )
return np.array(colors, dtype=np.uint8)
def mandel_2(x, max_iterations):
z = complex(0)
for i in range(max_iterations):
z = z**2 + x
if abs(z) >= 2:
return i
return -1
mandel_2c = jit(i4(c16,i4))(mandel_2)
def mandel_driver_2(min_x, max_x, min_y, nb_iterations, colors, image):
nb_colors = len(colors)
width = image.shape[0]
height = image.shape[1]
pixel_size = (max_x - min_x) / width
dy = pixel_size * 1j
for x in range(width):
coord = complex(min_x + x * pixel_size, min_y)
for y in range(height):
color = mandel_2(coord, nb_iterations)
image[x,y,:] = colors[color % nb_colors,:]
coord += dy
mandel_driver_2c = jit(void(f8,f8,f8,i4,u1[:,:],u1[:,:,:]))(mandel_driver_2)
def benchmark(dx = 500, dy = 500):
import time
min_x = -1.5
max_x = 0
min_y = -1.5
colors = make_palette()
nb_iterations = colors.shape[0]
img0 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125
start = time.time()
mandel_driver_1(min_x, max_x, min_y, nb_iterations, colors, img0)
dt0 = time.time() - start
img1 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125
start = time.time()
mandel_driver_1c(min_x, max_x, min_y, nb_iterations, colors, img1)
dt1 = time.time() - start
img2 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125
start = time.time()
mandel_driver_2(min_x, max_x, min_y, nb_iterations, colors, img2)
dt2 = time.time() - start
img3 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125
start = time.time()
mandel_driver_2c(min_x, max_x, min_y, nb_iterations, colors, img3)
dt3 = time.time() - start
return (dt0, dt1, dt2, dt3), (img0, img1, img2, img3)
class TestMandelbrot(unittest.TestCase):
def test_mandel_1_sanity(self):
self.assertEqual(mandel_1c(0., 0., 20), -1)
def test_mandel_1(self):
vals = np.arange(-1., 1.000001, 0.1)
for real in vals:
for imag in vals:
self.assertEqual(mandel_1(real, imag, 20),
mandel_1c(real, imag, 20))
def test_mandel_driver_1(self):
palette = make_palette()
control_image = np.zeros((50, 50, 3), dtype = np.uint8)
mandel_driver_1(-1., 1., -1., len(palette), palette, control_image)
test_image = np.zeros_like(control_image)
self.assertTrue((control_image - test_image == control_image).all())
mandel_driver_1c(-1., 1., -1., len(palette), palette, test_image)
image_diff = control_image - test_image
self.assertTrue((image_diff == 0).all())
def test_mandel_driver_2(self):
palette = make_palette()
control_image = np.zeros((50, 50, 3), dtype = np.uint8)
mandel_driver_2(-1., 1., -1., len(palette), palette, control_image)
test_image = np.zeros_like(control_image)
self.assertTrue((control_image - test_image == control_image).all())
mandel_driver_2c(-1., 1., -1., len(palette), palette, test_image)
image_diff = control_image - test_image
self.assertTrue((image_diff == 0).all())
if __name__ == "__main__":
test_support.main()
########NEW FILE########
__FILENAME__ = test_modulo
#! /usr/bin/env python
# ______________________________________________________________________
from numba import uint32, int16
from numba.decorators import jit, autojit
import unittest
import __builtin__
# ______________________________________________________________________
def modulo (a, b):
return a % b
# ______________________________________________________________________
class TestModulo (unittest.TestCase):
#Test for issue #143
def test_modulo_uint32 (self):
compiled_modulo = jit(argtypes = (uint32, uint32),
restype = uint32)(modulo)
self.assertEqual(modulo(0, 0x80000000),
compiled_modulo(0, 0x80000000))
#Test for issue #151
def test_modulo_int16 (self):
compiled_modulo = jit(argtypes = (int16, int16),
restype = int16)(modulo)
self.assertEqual(modulo(-3584, -512),
compiled_modulo(-3584, -512))
# ______________________________________________________________________
if __name__ == '__main__':
unittest.main()
# ______________________________________________________________________
# End of test_modulo.py
########NEW FILE########
__FILENAME__ = test_multiarray_api
#! /usr/bin/env python
# ______________________________________________________________________
'''test_multiarray_api
Test the code generation utility class numba.multiarray_api.MultiarrayAPI.
'''
# ______________________________________________________________________
import ctypes
import llvm.core as lc
import llvm.ee as le
from numba.llvm_types import _int32, _intp, _intp_star, _void_star, _numpy_array, _head_len
import numba.multiarray_api as ma
import numpy as np
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
# ______________________________________________________________________
_pyobj_to_pyobj = ctypes.CFUNCTYPE(ctypes.py_object, ctypes.py_object)
_void_star = lc.Type.pointer(lc.Type.int(8))
_make_const_int = lambda X: lc.Constant.int(_int32, X)
def _numpy_array_element(ndarray_ptr, idx, builder):
ptr_to_element = builder.gep(ndarray_ptr,
map(_make_const_int, [0, _head_len + idx]))
return builder.load(ptr_to_element)
def _get_pyarray_getptr(module):
'''
For reference:
void *
PyArray_GetPtr(PyArrayObject *obj, npy_intp* ind)
{
int n = PyArray_NDIM(obj);
npy_intp *strides = PyArray_STRIDES(obj);
char *dptr = PyArray_DATA(obj);
while (n--) {
dptr += (*strides++) * (*ind++);
}
return (void *)dptr;
}
'''
function_type = lc.Type.function(_void_star, [_numpy_array, _intp_star])
function = module.get_or_insert_function(function_type, 'PyArray_GetPtr_inline')
if function.basic_block_count!=0:
# Already implemented in the module
return function
# set linkage and attributes
function.add_attribute(lc.ATTR_ALWAYS_INLINE) # force inline
# function.linkage = lc.LINKAGE_INTERNAL
print(function)
# implement the function
bb_entry = function.append_basic_block('entry')
bb_while_cond = function.append_basic_block('while.cond')
bb_while_body = function.append_basic_block('while.body')
bb_ret = function.append_basic_block('return')
# initialize
builder = lc.Builder.new(bb_entry)
ndarray_element = lambda X: _numpy_array_element(ndarray_ptr, X, builder)
ndarray_ptr = function.args[0]
dptr, nd, strides = map(ndarray_element, [0, 1, 3])
dptr.name = 'dptr'
nd.name = 'nd'
strides.name = 'strides'
builder.branch(bb_while_cond) # branch to while cond
# while (n--)
builder.position_at_end(bb_while_cond)
nd_phi = builder.phi(nd.type, name='nd_phi')
strides_phi = builder.phi(strides.type, name='strides_phi')
ind_phi = builder.phi(function.args[1].type, name='ind_phi')
dptr_phi = builder.phi(dptr.type, name='dptr_phi')
nd_phi.add_incoming(nd, bb_entry)
nd_minus_one = builder.sub(nd_phi, _make_const_int(1), name='nd_minus_one')
nd_phi.add_incoming(nd_minus_one, bb_while_body)
pred = builder.icmp(lc.ICMP_NE, nd_phi, _make_const_int(0))
strides_phi.add_incoming(strides, bb_entry)
ind_phi.add_incoming(function.args[1], bb_entry)
dptr_phi.add_incoming(dptr, bb_entry)
builder.cbranch(pred, bb_while_body, bb_ret)
# dptr += (*strides++) * (*ind++);
builder.position_at_end(bb_while_body)
strides_next = builder.gep(strides_phi, [_make_const_int(1)], name='strides_next')
strides_phi.add_incoming(strides_next, bb_while_body)
ind_next = builder.gep(ind_phi, [_make_const_int(1)], name='ind_next')
ind_phi.add_incoming(ind_next, bb_while_body)
stride_value = builder.load(strides_phi)
ind_value = builder.load(builder.bitcast(ind_phi, strides_phi.type))
dptr_next = builder.gep(dptr_phi, [builder.mul(stride_value, ind_value)], name='dptr_next')
dptr_phi.add_incoming(dptr_next, bb_while_body)
builder.branch(bb_while_cond)
# return (void *) dptr;
builder.position_at_end(bb_ret)
builder.ret(dptr_phi)
# check generated code
function.verify()
return function
# ______________________________________________________________________
# For reference:
# typedef struct {
# PyObject_HEAD // indices (skipping the head)
# char *data; // 0
# int nd; // 1
# int *dimensions, *strides; // 2, 3
# PyObject *base; // 4
# PyArray_Descr *descr; // 5
# int flags; // 6
# } PyArrayObject;
class TestMultiarrayAPI(unittest.TestCase):
def test_call_PyArray_Zeros(self):
ma_obj = ma.MultiarrayAPI()
module = lc.Module.new('test_module')
ma_obj.set_PyArray_API(module)
test_fn = module.add_function(lc.Type.function(_numpy_array,
[_numpy_array]),
'test_fn')
bb = test_fn.append_basic_block('entry')
builder = lc.Builder.new(bb)
pyarray_zeros = ma_obj.load_PyArray_Zeros(module, builder)
arg = test_fn.args[0]
largs = [
builder.load(
builder.gep(arg,
[lc.Constant.int(_int32, 0),
lc.Constant.int(_int32, _head_len + ofs)]))
for ofs in (1, 2, 5)] # nd, dimensions, descr
largs.append(lc.Constant.int(_int32, 0))
ret_void_ptr = builder.call(pyarray_zeros, largs)
builder.ret(builder.bitcast(ret_void_ptr, _numpy_array))
logging.debug(module)
module.verify()
ee = le.EngineBuilder.new(module).mattrs('-avx').create()
test_fn_addr = ee.get_pointer_to_function(test_fn)
py_test_fn = _pyobj_to_pyobj(test_fn_addr)
test_arr = np.array([1.,2.,3.])
result = py_test_fn(test_arr)
self.assertEqual(result.shape, test_arr.shape)
self.assertTrue((result == 0.).all())
def test_call_PyArray_AsCArray(self):
'''
A test to check PyArray_AsCArray for accessing the C-array in ndarray.
This is not the recommended way to access elements in ndarray.
'''
ma_obj = ma.MultiarrayAPI()
module = lc.Module.new('test_module_PyArray_AsCArray')
ma_obj.set_PyArray_API(module)
test_fn = module.add_function(lc.Type.function(lc.Type.double(), #_numpy_array,
[_numpy_array]),
'test_fn')
bb = test_fn.append_basic_block('entry')
builder = lc.Builder.new(bb)
pyarray_ascarray = ma_obj.load_PyArray_AsCArray(module, builder)
pyarray_ascarray_fnty = pyarray_ascarray.type.pointee
arg_pyobj = test_fn.args[0]
# prepare arg 1 PyObject** op
pyobj_ptr = builder.alloca(arg_pyobj.type)
builder.store(arg_pyobj, pyobj_ptr)
arg_pyobj_ptr = builder.bitcast(pyobj_ptr, lc.Type.pointer(_void_star))
# prepare arg 2 void* ptr
data_ptr = builder.alloca(lc.Type.pointer(lc.Type.double()))
arg_data_ptr = builder.bitcast(data_ptr, _void_star)
# prepare arg 3, 4, 5
ndarray_element = lambda X: _numpy_array_element(arg_pyobj, X, builder)
nd, dimensions, descr = map(ndarray_element, [1, 2, 5])
descr_as_void_ptr = builder.bitcast(descr, _void_star)
# call
largs = [arg_pyobj_ptr, arg_data_ptr, dimensions, nd, descr_as_void_ptr]
status = builder.call(pyarray_ascarray, largs)
# check errors?
# builder.ret(status)
data_array = builder.load(data_ptr)
data = []
for i in xrange(3): # The count is fixed for this simple test.
elem_ptr = builder.gep(data_array, map(_make_const_int, [i]))
data.append(builder.load(elem_ptr))
sum_data = builder.fadd(builder.fadd(data[0], data[1]), data[2])
builder.ret(sum_data)
# NOTE: The arg_data_ptr is never freed. This is okay only for test here.
logging.debug(module)
test_fn.verify()
module.verify()
ee = le.EngineBuilder.new(module).mattrs('-avx').create()
test_fn_addr = ee.get_pointer_to_function(test_fn)
c_func_type = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.py_object)
py_test_fn = c_func_type(test_fn_addr)
test_arr = np.array([1.234, 2.345, 3.567])
result = py_test_fn(test_arr)
self.assertEqual(sum(test_arr), result)
def test_call_PyArray_GetPtr(self):
'''
Using PyArray_GetPtr should be the preferred method to access the
element. The only thing I am concerning is we will miss optimization
opportunity since LLVM has no information of PyArray_GetPtr. Perhaps,
It is better to put the definition inside the LLVM module.
'''
ma_obj = ma.MultiarrayAPI()
module = lc.Module.new('test_module_PyArray_GetPtr')
ma_obj.set_PyArray_API(module)
test_fn = module.add_function(lc.Type.function(lc.Type.double(),
[_numpy_array, _int32]),
'test_fn')
bb = test_fn.append_basic_block('entry')
builder = lc.Builder.new(bb)
pyarray_getptr = ma_obj.load_PyArray_GetPtr(module, builder)
pyarray_getptr_fnty = pyarray_getptr.type.pointee
# prepare arg 1 PyObject *
arg_pyobj = test_fn.args[0]
npy_intp_ty = pyarray_getptr_fnty.args[1].pointee
# prepare arg 2 npy_intp *
arg_index = builder.alloca(npy_intp_ty)
index_as_npy_intp = builder.sext(test_fn.args[1], npy_intp_ty)
builder.store(index_as_npy_intp, arg_index)
# call
largs = [arg_pyobj, arg_index]
elemptr = builder.call(pyarray_getptr, largs)
# return the loaded element at the index specified
elemptr_as_double_ptr = builder.bitcast(elemptr, lc.Type.pointer(lc.Type.double()))
builder.ret(builder.load(elemptr_as_double_ptr))
logging.debug(module)
test_fn.verify()
module.verify()
ee = le.EngineBuilder.new(module).mattrs('-avx').create()
test_fn_addr = ee.get_pointer_to_function(test_fn)
c_func_type = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.py_object, ctypes.c_int)
py_test_fn = c_func_type(test_fn_addr)
test_arr = np.array([1.234, 2.345, 3.567])
for idx, val in enumerate(test_arr):
result = py_test_fn(test_arr, idx)
self.assertEqual(val, result)
def test_call_PyArray_GetPtr_inline(self):
'''
Let's try implementing PyArray_GetPtr inside LLVM and allow inlining.
'''
module = lc.Module.new('test_module_PyArray_GetPtr_inline')
test_fn = module.add_function(lc.Type.function(lc.Type.double(),
[_numpy_array, _int32]),
'test_fn')
bb = test_fn.append_basic_block('entry')
builder = lc.Builder.new(bb)
pyarray_getptr = _get_pyarray_getptr(module)
pyarray_getptr_fnty = pyarray_getptr.type.pointee
# prepare arg 1 PyObject *
arg_pyobj = test_fn.args[0]
npy_intp_ty = pyarray_getptr_fnty.args[1].pointee
# prepare arg 2 npy_intp *
arg_index = builder.alloca(npy_intp_ty)
index_as_npy_intp = builder.sext(test_fn.args[1], npy_intp_ty)
builder.store(index_as_npy_intp, arg_index)
# call
largs = [arg_pyobj, arg_index]
elemptr = builder.call(pyarray_getptr, largs)
# return the loaded element at the index specified
elemptr_as_double_ptr = builder.bitcast(elemptr, lc.Type.pointer(lc.Type.double()))
builder.ret(builder.load(elemptr_as_double_ptr))
logging.debug(module)
test_fn.verify()
module.verify()
ee = le.EngineBuilder.new(module).mattrs('-avx').create()
test_fn_addr = ee.get_pointer_to_function(test_fn)
c_func_type = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.py_object, ctypes.c_int)
py_test_fn = c_func_type(test_fn_addr)
test_arr = np.array([1.234, 2.345, 3.567])
for idx, val in enumerate(test_arr):
result = py_test_fn(test_arr, idx)
self.assertEqual(val, result)
# ______________________________________________________________________
if __name__ == "__main__":
unittest.main()
# ______________________________________________________________________
# End of test_multiarray_api.py
########NEW FILE########
__FILENAME__ = test_nan
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
@jit(bool_(float64))
def isnan(x):
return x != x
@jit(bool_(float64))
def equal(x):
return x == x
assert isnan(float('nan'))
assert not isnan(10.0)
assert not equal(float('nan'))
########NEW FILE########
__FILENAME__ = test_nosource
import sys
import numba
def test_nosource():
source = '''
@numba.autojit
def foo (): return 99
'''
new_locals = {}
exec(source, globals(), new_locals)
foo = new_locals['foo']
assert foo() == 99
if sys.version_info[:2] < (2, 7):
del test_nosource
elif __name__ == "__main__":
test_nosource()
########NEW FILE########
__FILENAME__ = test_numbafunction
from numba import *
@jit(void())
def func1():
"I am a docstring!"
@autojit
def func2():
"I am a docstring!"
if __name__ == '__main__':
assert func1.__name__ == func1.py_func.__name__
assert func1.__doc__ == "I am a docstring!"
assert func1.__module__ == func1.py_func.__module__
assert func2.__name__ == func2.py_func.__name__
assert func2.__doc__ == "I am a docstring!", func2.__doc__
# This does not yet work for some reason (maybe overridden by PyType_Ready()?)
# assert func2.__module__ == func2.py_func.__module__
########NEW FILE########
__FILENAME__ = test_object_conversion
"""
See also numba.tests.test_overflow.
"""
import ctypes
import unittest
import numpy as np
from numba import *
@autojit(backend='ast')
def convert(obj_var, native_var):
obj_var = native_var
native_var = obj_var
return native_var
@autojit(locals=dict(obj=object_))
def convert_float(obj):
var = float_(obj)
return object_(var)
@autojit(locals=dict(obj=object_))
def convert_numeric(obj, dst_type):
var = dst_type(obj)
return object_(var)
@autojit
def convert_to_pointer(array):
p = array.data
return object_(p)
class TestConversion(unittest.TestCase):
def test_conversion(self):
assert convert(object(), 10.2) == 10.2
assert convert(object(), 10) == 10
assert convert(object(), "foo") == "foo"
obj = object()
assert convert(object(), obj) == obj
assert convert(object(), 10.2 + 5j) == 10.2 + 5j
assert convert_float(10.5) == 10.5
def test_numeric_conversion(self):
types = [
char,
uchar,
short,
ushort,
int_,
uint,
long_,
ulong,
longlong,
ulonglong,
Py_ssize_t,
size_t,
float_,
double,
# longdouble,
complex64,
complex128,
]
value = 2.5
for dst_type in types:
# print dst_type
if dst_type.is_int:
if dst_type.typename == 'char':
expected = b'\x02'
else:
expected = 2
else:
expected = 2.5
result = convert_numeric(value, dst_type)
assert result == expected, (result, expected, dst_type)
def test_pointer_conversion(self):
type = double.pointer()
array = np.arange(10, dtype=np.double)
# p = array.ctypes.data_as(type.to_ctypes())
result = convert_to_pointer(array)
assert ctypes.cast(result, ctypes.c_void_p).value == array.ctypes.data
if __name__ == "__main__":
from numba.testing import test_support
test_support.main()
########NEW FILE########
__FILENAME__ = test_object_counting
"""
>>> test_refcounting()
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
>>> sys.getrefcount(object())
1
>>> sys.getrefcount(fresh_obj())
1
>>> sys.getrefcount(fresh_obj2())
1
>>> sys.getrefcount(fresh_obj3())
1
>>> sys.getrefcount(fresh_obj4())
1
>>> sys.getrefcount(fresh_obj5())
1
>>> sys.getrefcount(fresh_obj6())
1
Test list/dict/tuple literals
>>> sys.getrefcount(fresh_obj7())
1
>>> sys.getrefcount(fresh_obj7()[0])
1
>>> sys.getrefcount(fresh_obj8())
1
>>> sys.getrefcount(fresh_obj8()["value"])
1
>>> sys.getrefcount(fresh_obj9())
1
>>> sys.getrefcount(fresh_obj9()[0])
1
>>> sys.getrefcount(index_count([object()]))
1
>>> class C(object):
... def __init__(self, value):
... self.value = value
... def __del__(self):
... print('deleting...')
...
>>> sys.getrefcount(attr_count(C(object())))
deleting...
1
>>> obj = object()
>>> sys.getrefcount(obj)
2
>>> exc(obj)
Traceback (most recent call last):
...
TypeError: 'object' object is not callable
>>> sys.getrefcount(obj)
2
>>> obj1, obj2 = object(), np.arange(10)
>>> sys.getrefcount(obj1), sys.getrefcount(obj2)
(2, 2)
>>> x, y = count_arguments(obj1, obj2)
>>> assert x is y is obj2
>>> sys.getrefcount(x)
4
>>> def test_count_arguments(f, obj):
... print(sys.getrefcount(obj))
... f(obj)
... print(sys.getrefcount(obj))
...
>>> test_count_arguments(count_arguments2, object())
3
3
>>> test_count_arguments(count_arguments2, np.arange(10))
3
3
>>> test_count_arguments(count_arguments3, object())
3
3
>>> test_count_arguments(count_arguments3, np.arange(10))
3
3
"""
import sys
import ctypes
from numba import *
import numpy as np
from numba.testing import test_support
class Unique(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "Unique(%d)" % self.value
@autojit(backend='ast')
def use_objects(obj_array):
for i in range(10):
var = obj_array[i]
print(var)
def test_refcounting():
L = np.array([Unique(i) for i in range(10)], dtype=np.object)
assert all(sys.getrefcount(obj) == 3 for obj in L)
with test_support.StdoutReplacer() as out:
use_objects(L)
# print out.getvalue()
# This fails in nose
#expected = "\n".join("Unique(%d)" % i for i in range(10)) + '\n'
#print out.getvalue() == expected
print([sys.getrefcount(obj) for obj in L])
@autojit(backend='ast', warn=False)
def fresh_obj():
x = object()
return x
@autojit(backend='ast', warn=False)
def fresh_obj2():
return object()
@autojit(backend='ast', warn=False)
def fresh_obj3():
x = object()
y = x
return y
@autojit(backend='ast', warn=False)
def fresh_obj4():
x = np.ones(1, dtype=np.double)
y = x
return y
@autojit(backend='ast', warn=False)
def fresh_obj5():
return np.ones(1, dtype=np.double)
@autojit(backend='ast', warn=False)
def fresh_obj6():
x = np.ones(1, dtype=np.double)
y = x
return x
@autojit(backend='ast', warn=False)
def fresh_obj7():
x = np.ones(1, dtype=np.double)
return [x]
@autojit(backend='ast', warn=False)
def fresh_obj8():
x = np.ones(1, dtype=np.double)
return {"value": x}
@autojit(backend='ast', warn=False)
def fresh_obj9():
x = np.ones(1, dtype=np.double)
return (x,)
@autojit(backend='ast', warn=False)
def index_count(L):
x = L[0]
return x
@autojit(backend='ast', warn=False)
def attr_count(obj):
x = obj.value
return x
@autojit(backend='ast', warn=False)
def exc(obj):
x = obj
return object()('boom')
@autojit(backend='ast', warn=False)
def count_arguments(x, y):
x = y
y = x
a = x
b = y
return x, y
@autojit(backend='ast', warn=False)
def count_arguments2(obj):
pass
@autojit(backend='ast', warn=False)
def count_arguments3(obj):
x = obj
if __name__ == "__main__":
# print sys.getrefcount(fresh_obj())
# exc(object())
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_object_iteration
import numba
from numba import *
@autojit
def test_object_iteration(obj):
"""
>>> test_object_iteration([1, 2, 3])
1
2
3
"""
for x in obj:
print(x)
if __name__ == '__main__':
# test_object_iteration([1, 2, 3])
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_object_literals
"""
>>> get_list('world')
[1, 'hello', 2.0, 'world']
>>> get_tuple('world')
(1, 'hello', 2.0, 'world')
>>> get_dict('world') == {"hello": 1, 2.0: 'world'}
True
"""
import sys
from numba import *
myglobal = 20
@autojit(backend='ast')
def get_list(x):
return [1, "hello", 2.0, x]
@autojit(backend='ast')
def get_tuple(x):
return (1, "hello", 2.0, x)
@autojit(backend='ast')
def get_dict(x):
return {"hello": 1, 2.0: x}
if __name__ == '__main__':
import numba
from numba.testing.test_support import rewrite_doc
__doc__ = rewrite_doc(__doc__)
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_overflow
"""
>>> native_convert(char, -10)
-10
>>> native_convert(char, 10)
10
>>> native_convert(char, 127)
127
This doesn't work yet, we should get an error here. We don't get one because
autojit detects the int type which is natively truncated to a char.
TODO::::::::::
>> native_convert(char, 128)
=> need exception!
>>> object_convert(char, 128)
Traceback (most recent call last):
...
OverflowError: value too large to convert to signed char
>>> object_convert(char, -128)
-128
>>> object_convert(char, -129)
Traceback (most recent call last):
...
OverflowError: value too large to convert to signed char
>>> object_convert(char, 2.9)
2
TODO:::::::::::
Test all numeric types for overflows!
TODO:::::::::::
Test typedef types (npy_intp, Py_uintptr_t, etc)
"""
import unittest
from numba import *
@autojit
def native_convert(dst_type, value):
return dst_type(value)
@autojit(locals=dict(obj=object_))
def object_convert(dst_type, obj):
return dst_type(obj)
class TestConversion(unittest.TestCase):
def test_native_conversion(self):
assert native_convert(char, -10) == b'\xf6'
assert native_convert(char, 10) == b'\n'
assert native_convert(char, 127) == b'\x7f'
# TODO: the below should raise an exception
# We don't get one because autojit detects the int type which is
# simply truncated to a char
# native_convert(char, 128)
def test_object_conversion(self):
assert object_convert(char, -128) == b'\x80'
assert object_convert(char, 2.9) == b'\x02'
def test_overflow(self):
self._convert_overflow(128 , char, 'signed char')
self._convert_overflow(-129, char, 'signed char')
self._convert_overflow(2**31, int32, 'signed int')
def _convert_overflow(self, value, type, typename):
self.assertRaises(OverflowError, object_convert, type, value)
# self.assertEqual(captured.exception.args[0],
# "value too large to convert to %s" % typename)
if __name__ == "__main__":
unittest.main()
########NEW FILE########
__FILENAME__ = test_print_function
from __future__ import print_function
import sys
import unittest
import StringIO
from numba import *
@autojit(backend='ast')
def print_(value):
print(value)
@autojit(backend='ast', nopython=True)
def print_nopython(value):
print("value", end=" ")
print(value)
@autojit(backend='ast')
def print_to_stream(stream, value):
print(value, file=stream)
@autojit(backend='ast')
def print_no_newline(stream, value):
print(value, end=' ', file=stream)
class TestPrint(unittest.TestCase):
def test_print(self):
out = sys.stdout
sys.stdout = temp_out = StringIO.StringIO()
try:
print_(10)
print_(10.0)
print_("hello!")
finally:
sys.stdout = out
data = temp_out.getvalue()
assert data == "10\n10.0\nhello!\n", repr(data)
def test_print_stream(self):
temp_out = StringIO.StringIO()
print_to_stream(temp_out, 13.2)
data = temp_out.getvalue()
assert data == "13.2\n", repr(data)
def test_print_no_newline(self):
temp_out = StringIO.StringIO()
print_no_newline(temp_out, 14.1)
data = temp_out.getvalue()
assert data == "14.1 ", repr(data)
if __name__ == "__main__":
# The following isn't currently supported. See issue #147
#(https://github.com/numba/numba/issues/147).
#print_nopython(10)
TestPrint('test_print_stream').debug()
unittest.main()
########NEW FILE########
__FILENAME__ = test_raise
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from numba import autojit
# ______________________________________________________________________
# Helpers
class SpecialException(Exception):
pass
# ______________________________________________________________________
@autojit
def raise1():
raise SpecialException
@autojit
def raise2():
raise SpecialException("hello")
# ______________________________________________________________________
class TestRaise(unittest.TestCase):
def _assert_raises(self, func, expected_args):
try:
func()
except SpecialException as e:
assert e.args == tuple(expected_args), (e.args, expected_args)
else:
raise AssertionError("Expected exception")
def test_raise(self):
self._assert_raises(raise1, [])
self._assert_raises(raise2, ["hello"])
if __name__ == "__main__":
unittest.main()
########NEW FILE########
__FILENAME__ = test_recursion
from numba import *
import numba as nb
#------------------------------------------------------------------------
# Jit function recursion
#------------------------------------------------------------------------
@jit(int_(int_))
def fac(arg):
if arg == 1:
return 1
else:
return arg * fac(arg - 1)
assert fac(10) == fac.py_func(10)
#------------------------------------------------------------------------
# Autojit recursion
#------------------------------------------------------------------------
# TODO: support recursion for autojit
@autojit
def fac2(arg):
if arg == 1:
return 1
else:
return arg * fac2(arg - 1)
#assert fac2(10) == fac2.py_func(10)
#------------------------------------------------------------------------
# Extension type recursion
#------------------------------------------------------------------------
@jit
class SimpleClass(object):
@void(int_)
def __init__(self, value):
self.value = value
@int_(int_)
def fac(self, value):
if value == 1:
return self.value
else:
return value * self.fac(value - 1)
obj = SimpleClass(1)
assert obj.fac(10) == fac.py_func(10)
# ______________________________________________________________________
@jit
class ToughClass(object):
@void(int_)
def __init__(self, value):
self.value = value
@int_(int_)
def func1(self, value):
return self.func2(value + self.value)
@int_(int_)
def func2(self, value):
return self.func3(value + self.value)
@int_(int_)
def func3(self, value):
if value < 5:
return self.func1(value + self.value)
return value
obj = ToughClass(1)
assert obj.func1(1) == 6
# ______________________________________________________________________
########NEW FILE########
__FILENAME__ = test_redefine
from numba import *
import unittest
class TestRedefine(unittest.TestCase):
def test_redefine(self):
def foo(x):
return x + 1
jfoo = jit(int32(int32))(foo)
# Test original function
self.assertTrue(jfoo(1), 2)
jfoo = jit(int32(int32))(foo)
# Test re-compiliation
self.assertTrue(jfoo(2), 3)
def foo(x):
return x + 2
jfoo = jit(int32(int32))(foo)
# Test redefinition
self.assertTrue(jfoo(1), 3)
########NEW FILE########
__FILENAME__ = test_reporting
from numba import *
from numba import error
#@autojit
def func():
if x:
print("hello")
else:
print("world")
def compile_func1():
try:
jit(void())(func)
except error.NumbaError as e:
print("exception: %s" % e)
__doc__ = """
>>> compile_func1()
exception: (see below)
--------------------- Numba Encountered Errors or Warnings ---------------------
if x:
-------^
Error ...: No global named 'x'
--------------------------------------------------------------------------------
"""
#@autojit
def func2():
print(10[20])
def compile_func2():
try:
jit(void())(func2)
except error.NumbaError as e:
print("exception: %s" % e)
__doc__ += """>>> compile_func2()
exception: (see below)
--------------------- Numba Encountered Errors or Warnings ---------------------
print(10[20])
----------^
Error ...: object of type int cannot be indexed
--------------------------------------------------------------------------------
"""
@autojit # this often messes up line numbers
def func_decorated():
print(10[20])
def compile_func3():
try:
func_decorated()
except error.NumbaError as e:
print("exception: %s" % e)
__doc__ += """
>>> compile_func3()
exception: (see below)
--------------------- Numba Encountered Errors or Warnings ---------------------
print(10[20])
----------^
Error ...: object of type int cannot be indexed
--------------------------------------------------------------------------------
"""
def warn_and_error(a, b):
print(a)
1[2]
__doc__ += """
>>> autojit(warn=False)(warn_and_error)(1, 2)
Traceback (most recent call last):
...
NumbaError: (see below)
--------------------- Numba Encountered Errors or Warnings ---------------------
1[2]
----^
Error 68:4: object of type int cannot be indexed
<BLANKLINE>
--------------------------------------------------------------------------------
>>> autojit(warnstyle='simple')(warn_and_error)(1, 2)
Traceback (most recent call last):
...
NumbaError: (see below)
Error ...: object of type int cannot be indexed
Warning ...: Unused argument 'b'
>>> autojit(func_decorated.py_func, warnstyle='simple')()
Traceback (most recent call last):
...
NumbaError: ...: object of type int cannot be indexed
"""
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_rshift
#! /usr/bin/env python
# ______________________________________________________________________
from numba import uint16
from numba.decorators import jit
import unittest
import __builtin__
# ______________________________________________________________________
def rshift (a, b):
return a >> b
# ______________________________________________________________________
class TestRshift (unittest.TestCase):
#Test for issue #152
def test_rshift_uint16 (self):
compiled_rshift = jit(argtypes = (uint16, uint16),
restype = uint16)(rshift)
self.assertEqual(rshift(65535, 2),
compiled_rshift(65535, 2))
# ______________________________________________________________________
if __name__ == '__main__':
unittest.main()
# ______________________________________________________________________
# End of test_rshift.py
########NEW FILE########
__FILENAME__ = test_slicing
import time
import numpy as np
from numba import *
from numba.decorators import autojit
@autojit
def slice_array(a, start, stop, step):
return a[start:stop:step]
@autojit
def time_slicing(a, start, stop, step):
# with nopython:
for i in range(1000000):
a[start:stop:step]
def test_slicing():
a = np.arange(10)
assert np.all(slice_array(a, 1, 7, 2) == a[1:7:2]) # sanity test
for start in range(-5, 15):
for stop in range(-5, 15):
for step in range(-3, 4):
if step == 0:
continue
assert np.all(slice_array(a, start, stop, step) ==
a[start:stop:step])
if __name__ == "__main__":
test_slicing()
########NEW FILE########
__FILENAME__ = test_strings
"""
>>> temp_string_var()
hellohello0
>>> temp_string()
hellohello0
>>> temp_string2()
hellohello0
>>> temp_string3()
hellohello0
hellohello1
hellohello2
>>> eq("foo", "foo")
True
>>> eq("foo", "bar")
False
>>> ne("foo", "foo")
False
>>> ne("foo", "bar")
True
>>> lt("foo", "foo")
False
>>> lt("foo", "bar")
False
>>> lt("bar", "foo")
True
>>> interpolate("%s and %s", "ham", "eggs")
'ham and eggs'
>>> autojit(string_len)("hello")
5
>>> autojit(nopython=True)(string_len)("hello")
5
"""
import sys
from numba import *
def get_string(i=0):
s = "hello"
return s * 2 + str(i)
@autojit(backend='ast', locals=dict(s=c_string_type))
def temp_string_var():
s = get_string()
print(s)
@autojit(backend='ast', locals=dict(s=c_string_type))
def temp_string():
s = c_string_type(get_string())
print(s)
@autojit(backend='ast')
def temp_string2():
print((c_string_type(get_string())))
@autojit(backend='ast', locals=dict(s=c_string_type))
def temp_string3():
for i in range(3):
s = c_string_type(get_string(i))
print(s)
@autojit(backend='ast')
def test():
return object()
@jit(void())
def string_constant():
print("hello world")
@jit(bool_(c_string_type, c_string_type))
def eq(s1, s2):
return s1 == s2
@jit(bool_(c_string_type, c_string_type))
def ne(s1, s2):
return s1 != s2
@jit(bool_(c_string_type, c_string_type))
def lt(s1, s2):
return s1 < s2
@jit(c_string_type(c_string_type, c_string_type))
def concat(s1, s2):
return s1 + s2
@jit(c_string_type(c_string_type, c_string_type, c_string_type))
def interpolate(s, s1, s2):
return s % (s1, s2)
def string_len(s):
return len(s)
if __name__ == '__main__':
import numba
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_struct
import os
from numba import *
from numba import error
import numpy as np
#------------------------------------------------------------------------
# Structs as locals
#------------------------------------------------------------------------
struct_type = struct_([('a', char.pointer()), ('b', int_)])
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_local():
value.a = "foo"
value.b = 10
return value.a, value.b
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_local_inplace():
value.a = "foo"
value.b = 10
value.b += 10.0
return value.a, value.b
# TODO: structs from objects
#@autojit
#def struct_as_arg(arg):
# arg.a = "foo"
# return arg.a
#
#@autojit(backend='ast', locals=dict(value=struct_type))
#def call_struct_as_arg():
# return struct_as_arg(value)
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_local_copy():
value.a = "foo"
value.b = 10
value2 = value
return value2.a, value2.b
def test_struct_locals():
result = struct_local()
assert result == ("foo", 10), result
result = struct_local_inplace()
assert result == ("foo", 20), result
# result = call_struct_as_arg()
# assert result == "foo", result
result = struct_local_copy()
assert result == ("foo", 10), result
#------------------------------------------------------------------------
# Struct indexing
#------------------------------------------------------------------------
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_indexing_strings():
value['a'] = "foo"
value['b'] = 10
return value['a'], value['b']
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_indexing_ints():
value[0] = "foo"
value[1] = 10
return value[0], value[1]
def test_struct_indexing():
assert struct_indexing_strings() == ("foo", 10)
assert struct_indexing_ints() == ("foo", 10)
#------------------------------------------------------------------------
# Record arrays
#------------------------------------------------------------------------
@autojit(backend='ast')
def record_array(array):
array[0].a = 4
array[0].b = 5.0
def test_record_array():
struct_type = struct_([('a', int32), ('b', double)])
struct_dtype = struct_type.get_dtype()
array = np.empty((1,), dtype=struct_dtype)
record_array(array)
assert array[0]['a'] == 4, array[0]
assert array[0]['b'] == 5.0, array[0]
#------------------------------------------------------------------------
# Object Coercion
#------------------------------------------------------------------------
struct_type = struct_([('a', int_), ('b', double)])
@autojit(backend='ast', locals=dict(value=struct_type))
def coerce_to_obj():
value.a = 10
value.b = 20.2
return object_(value)
def test_coerce_to_obj():
print((coerce_to_obj()))
if __name__ == "__main__":
print((struct_local_copy()))
# print call_struct_as_arg()
test_struct_locals()
test_record_array()
test_coerce_to_obj()
test_struct_indexing()
########NEW FILE########
__FILENAME__ = test_sum
#! /usr/bin/env python
# ______________________________________________________________________
'''test_sum
Test the sum2d() example.
'''
# ______________________________________________________________________
import numpy
from numba import *
from numba.decorators import jit
from numba.testing import test_support
import sys
import unittest
# ______________________________________________________________________
def sum2d(arr):
M, N = arr.shape
result = 0.0
for i in range(M):
for j in range(N):
result += arr[i,j]
return result
# ______________________________________________________________________
def bad_sum2d(arr):
'''Unit test code for issue #34:
https://github.com/numba/numba/issues/34'''
M, N = arr.shape
result = 0.0
for i in range(M):
for j in range(N):
result += arr[i,j]
return result
# ______________________________________________________________________
class TestASTSum2d(test_support.ASTTestCase):
def test_vectorized_sum2d(self):
usum2d = self.jit(argtypes=[double[:,:]],
restype=double)(sum2d)
image = numpy.random.rand(10, 10)
plain_old_result = sum2d(image)
hot_new_result = usum2d(image)
self.assertTrue((abs(plain_old_result - hot_new_result) < 1e-9).all())
def test_vectorized_sum2d(self):
pass
def test_vectorized_sum2d(self):
usum2d = self.jit(argtypes=[double[:,:]],
restype=double)(sum2d)
image = numpy.random.rand(10, 10)
plain_old_result = sum2d(image)
hot_new_result = usum2d(image)
self.assertTrue((abs(plain_old_result - hot_new_result) < 1e-9).all())
def _bad_sum2d(self):
compiled_bad_sum2d = self.jit(argtypes = [double[:,:]],
restype = double)(bad_sum2d)
image = numpy.random.rand(10, 10)
self.assertEqual(bad_sum2d(image), compiled_bad_sum2d(image))
@test_support.checkSkipFlag("Test fails due to problem in Meta.")
def test_bad_sum2d(self):
self._bad_sum2d()
if __name__ == "__main__":
#TestASTSum2d('test_vectorized_sum2d').debug()
unittest.main()
# ______________________________________________________________________
# End of test_sum.py
########NEW FILE########
__FILENAME__ = test_tuple
#! /usr/bin/env python
# ______________________________________________________________________
'''test_tuple
Unit test aimed at testing symbolic execution of the BUILD_TUPLE opcode.
'''
# ______________________________________________________________________
import numpy
from numba import *
from numba.decorators import jit
from numba.testing import test_support
import unittest
# ______________________________________________________________________
def tuple_fn_0 (inarr):
i = 1
j = 2
k = 3
internal_tuple = (i, j, k)
return inarr[internal_tuple]
# return inarr[1,2,3]
# ______________________________________________________________________
class TestASTTuple(test_support.ASTTestCase):
def test_tuple_fn_0 (self):
test_arr = numpy.zeros((4,4,4))
compiled_fn = self.jit(argtypes = [double[:,:,:]])(tuple_fn_0)
self.assertEqual(compiled_fn(test_arr), 0.)
# ______________________________________________________________________
if __name__ == "__main__":
unittest.main()
# ______________________________________________________________________
# End of test_tuple.py
########NEW FILE########
__FILENAME__ = test_types
#! /usr/bin/env python
# ______________________________________________________________________
'''
Test type mapping.
'''
# ______________________________________________________________________
import numba
from numba import *
from numba.decorators import jit
from numba.testing import test_support
import unittest
# ______________________________________________________________________
def if1(arg): # stupid nosetests
if arg > 0:
result = 22
else:
result = 42
return result
def if2(arg):
if arg > 0:
result = 22
else:
result = 42
return result
# ______________________________________________________________________
class TestASTIf(test_support.ASTTestCase):
def test_int(self):
func = self.jit(restype=numba.int_,
argtypes=[numba.int_])(if1)
self.assertEqual(func(-1), 42)
self.assertEqual(func(1), 22)
def test_long(self):
func = self.jit(restype=numba.long_,
argtypes=[numba.long_])(if2)
self.assertEqual(func(-1), 42)
self.assertEqual(func(1), 22)
# ______________________________________________________________________
if __name__ == "__main__":
unittest.main()
# ______________________________________________________________________
# End of test_if.py
########NEW FILE########
__FILENAME__ = test_unbound_variables
from numba import *
a = 10
b = 11
c = 12
def jitter():
a = 20
b = 21
c = 22
@jit(object_())
def func():
return a, c
return func
func = jitter()
assert func() == (20, 22)
########NEW FILE########
__FILENAME__ = test_unpacking
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ctypes
import unittest
import numba as nb
from numba import jit, list_, tuple_, object_, int_, sized_pointer, npy_intp
import numpy as np
A = np.empty((5, 6))
shape_t = sized_pointer(npy_intp, 2)
# ______________________________________________________________________
def unpack(x):
x, y = x
return x * y
# ______________________________________________________________________
class Iterable(object):
"""I don't work yet"""
def __iter__(self):
return iter((5, 6))
class Sequence(object):
"""I work"""
def __getitem__(self, idx):
return [5, 6][idx]
# ______________________________________________________________________
class TestUnpacking(unittest.TestCase):
def test_unpacking(self):
lunpack = jit(int_(list_(int_, 2)))(unpack)
tunpack = jit(int_(tuple_(int_, 2)))(unpack)
tounpack = jit(int_(tuple_(object_, 2)))(unpack)
iunpack = jit(int_(object_))(unpack)
sunpack = jit(int_(object_))(unpack)
punpack = jit(int_(shape_t), wrap=False)(unpack)
self.assertEqual(lunpack([5, 6]), 30)
self.assertEqual(tunpack((5, 6)), 30)
self.assertEqual(tounpack((5, 6)), 30)
# self.assertEqual(iunpack(Iterable()), 30)
self.assertEqual(sunpack(Sequence()), 30)
c_punpack = nb.addressof(punpack)
self.assertEqual(c_punpack(A.ctypes.shape), 30)
if __name__ == "__main__":
unittest.main()
########NEW FILE########
__FILENAME__ = test_unsigned_arith
import numpy as np
import unittest
from numba import void, int32, uint32, jit, int64
@jit(void(uint32[:], uint32, uint32))
def prng(X, A, C):
for i in range(X.shape[0]):
for j in range(100):
v = (A * X[i] + C)
X[i] = v & 0xffffffff
@jit(uint32())
def unsigned_literal():
return abs(0xFFFFFFFF)
@jit(int64())
def unsigned_literal_64():
return 0x100000000
@jit(int64(int32))
def constant_int_add(a):
return 0xffffffff + a
class Test(unittest.TestCase):
def test_prng(self):
N = 100
A = 1664525
C = 1013904223
X0 = np.arange(N, dtype=np.uint32)
X1 = X0.copy()
prng.py_func(X0, A, C)
prng(X1, A, C)
self.assertTrue(np.all(X1 >= 0))
self.assertTrue(np.all(X0 == X1))
def test_unsigned_literal(self):
got = unsigned_literal()
expect = abs(0xFFFFFFFF)
self.assertEqual(expect, got)
def test_unsigned_literal_64(self):
got = unsigned_literal_64()
expect = 0x100000000
self.assertEqual(expect, got)
def test_constant_int_add(self):
got = constant_int_add(1)
expect = 0xffffffff + 1
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = test_while
#! /usr/bin/env python
# ______________________________________________________________________
from numba import *
import numpy
from nose.tools import nottest
from numba.testing import test_support
from numba.utils import debugout
# ______________________________________________________________________
def while_loop_fn_0(max_index, indexable):
i = 0
acc = 0.
while i < max_index:
acc += indexable[i]
i += 1
return acc
# ______________________________________________________________________
def while_loop_fn_1(indexable):
i = 0
acc = 0.
while i < len(indexable):
acc += indexable[i]
i += 1
return acc
# ______________________________________________________________________
def while_loop_fn_2(ndarr):
i = 0
acc = 0.
while i < ndarr.shape[0]:
acc += ndarr[i]
i += 1
return acc
# ______________________________________________________________________
def while_loop_fn_3(count):
i = 0
acc = 1.
while i < count:
acc *= 2
i += 1
return acc
# ______________________________________________________________________
def while_loop_fn_4(start, stop, inc):
'''Intended to parallel desired translation target for
test_forloop.for_loop_fn_1.'''
acc = 0
i = start
while i != stop:
acc += i
i += inc
return acc
# ______________________________________________________________________
def while_loop_fn_5(i_max, j_max):
j = 1.
acc = 0.
while j < j_max:
i = 1.
while i < i_max:
acc += i * j
i += 1.
j += 1.
return acc
# ______________________________________________________________________
def while_loop_fn_6(test_input):
'''While-loop version of for-loop tests for issue #25.
https://github.com/numba/numba/issues/25'''
acc = 0.0
i = 0.0
while i < 5.0:
if i == test_input:
acc += 100.0
else:
acc += i
i += 1.0
return acc
# ______________________________________________________________________
def while_loop_fn_7(test_input):
'''While-loop version of for-loop tests for issue #25.
https://github.com/numba/numba/issues/25'''
acc = 0.0
i = 0.0
while i < 5.0:
tmp = i
acc += i
i += 1.0
if tmp == test_input:
return acc
return acc
# ______________________________________________________________________
def while_loop_fn_8(test_input):
acc = 0.0
i = 0.0
while i < 5.0:
acc += i
if i == test_input:
i += 0.5
continue
elif i > test_input:
break
i += 1.
return acc
# ______________________________________________________________________
class TestASTWhile(test_support.ASTTestCase):
def _do_test(self, function, argtypes, *args, **kws):
_jit = (self.jit(argtypes = argtypes)
if argtypes is not None else self.jit())
compiled_fn = _jit(function)
self.assertEqual(compiled_fn(*args, **kws), function(*args, **kws))
def test_while_loop_fn_0(self):
test_data = numpy.array([1., 2., 3.])
self._do_test(while_loop_fn_0, [long_, double[:]], len(test_data),
test_data)
def test_while_loop_fn_1(self):
self._do_test(while_loop_fn_1, [double[:]], numpy.array([1., 2., 3.]))
def test_while_loop_fn_2(self):
self._do_test(while_loop_fn_2, [double[:]], numpy.array([1., 2., 3.]))
def test_while_loop_fn_3(self):
compiled_fn = self.jit(argtypes = [long_])(while_loop_fn_3)
compiled_result = compiled_fn(3)
self.assertEqual(compiled_result, while_loop_fn_3(3))
self.assertEqual(compiled_result, 8.)
def test_while_loop_fn_4(self):
compiled_fn = self.jit(argtypes = (long_, long_, long_),
restype = long_)(while_loop_fn_4)
compiled_result = compiled_fn(1, 4, 1)
self.assertEqual(compiled_result, while_loop_fn_4(1, 4, 1))
self.assertEqual(compiled_result, 6)
def test_while_loop_fn_5(self):
compiled_fn = self.jit(argtypes = [double, double])(while_loop_fn_5)
compiled_result = compiled_fn(3, 4)
self.assertEqual(compiled_result, while_loop_fn_5(3, 4))
self.assertEqual(compiled_result, 18.)
def test_while_loop_fn_6(self):
compiled_fn = self.jit(restype=double, argtypes=[double])(while_loop_fn_6)
self.assertEqual(while_loop_fn_6(4.), compiled_fn(4.))
self.assertEqual(while_loop_fn_6(5.), compiled_fn(5.))
def test_while_loop_fn_7(self):
compiled_fn = self.jit(restype=double, argtypes=[double])(while_loop_fn_7)
self.assertEqual(while_loop_fn_7(4.), compiled_fn(4.))
self.assertEqual(while_loop_fn_7(5.), compiled_fn(5.))
def test_while_loop_fn_8(self):
compiled_fn = self.jit(restype=double, argtypes=[double])(while_loop_fn_8)
self.assertEqual(while_loop_fn_8(3.), compiled_fn(3.))
self.assertEqual(while_loop_fn_8(4.), compiled_fn(4.))
self.assertEqual(while_loop_fn_8(5.), compiled_fn(5.))
@test_support.checkSkipFlag("Test fails due to problem in Meta.")
def test_while_loop_fn_7(self, *args, **kws):
return super(TestASTWhile, self).test_while_loop_fn_7(*args, **kws)
# ______________________________________________________________________
if __name__ == "__main__":
TestASTWhile("test_while_loop_fn_8").debug()
# autojit(while_loop_fn_2)(numpy.array([1., 2., 3.]))
# jit(argtypes = [long_])(while_loop_fn_3)
# jit(argtypes = (long_, long_, long_),
# restype = long_)(while_loop_fn_4)
# jit(argtypes = [double, double])(while_loop_fn_5)
# jit(restype=double, argtypes=[double])(while_loop_fn_6)
# jit(restype=double, argtypes=[double])(while_loop_fn_7)
# jit(restype=double, argtypes=[double])(while_loop_fn_8)
test_support.main()
# ______________________________________________________________________
# End of test_while.py
########NEW FILE########
__FILENAME__ = test_withpython
import os
import ctypes
from numba import *
from numba import error
from nose.tools import *
@autojit(backend='ast')
def withnopython():
val = 0.0
with nopython:
val += 1.0
return val
@autojit(backend='ast')
def withnopython_nested(obj):
result = 0.0
with nopython:
with python:
obj_result = obj.method()
with nopython:
result += 1.0
return obj_result, result
@autojit(backend='ast', nopython=True)
def nopython(obj):
with python:
return obj.method()
class Class(object):
def method(self):
return 20.0
def test_with_no_python():
assert withnopython() == 1.0
assert withnopython_nested(Class()) == (20.0, 1.0)
assert nopython(Class()) == 20.0
#
### Test errors
#
@autojit(backend='ast')
def withnopython_error(obj):
with nopython:
return obj.method()
@autojit(backend='ast', nopython=True)
def withnopython_error2(obj):
return obj.method()
@raises(error.NumbaError)
def test_errors1():
withnopython_error(Class())
@raises(error.NumbaError)
def test_errors2():
withnopython_error2(Class())
if __name__ == "__main__":
test_with_no_python()
test_errors1()
test_errors2()
########NEW FILE########
__FILENAME__ = threads
"""
Implements threads using llvm cbuilder. Taken from
numbapro/vectorizers/parallel.py
"""
from llvm.core import *
from llvm_cbuilder import *
import llvm_cbuilder.shortnames as C
import sys
class PThreadAPI(CExternal):
'''external declaration of pthread API
'''
pthread_t = C.void_p
pthread_create = Type.function(C.int,
[C.pointer(pthread_t), # thread_t
C.void_p, # thread attr
C.void_p, # function
C.void_p]) # arg
pthread_join = Type.function(C.int, [C.void_p, C.void_p])
class WinThreadAPI(CExternal):
'''external declaration of pthread API
'''
_calling_convention_ = CC_X86_STDCALL
handle_t = C.void_p
# lpStartAddress is an LPTHREAD_START_ROUTINE, with the form
# DWORD ThreadProc (LPVOID lpdwThreadParam )
CreateThread = Type.function(handle_t,
[C.void_p, # lpThreadAttributes (NULL for default)
C.intp, # dwStackSize (0 for default)
C.void_p, # lpStartAddress
C.void_p, # lpParameter
C.int32, # dwCreationFlags (0 for default)
C.pointer(C.int32)]) # lpThreadId (NULL if not required)
# Return is WAIT_OBJECT_0 (0x00000000) to indicate the thread exited,
# or WAIT_ABANDONED, WAIT_TIMEOUT, WAIT_FAILED for other conditions.
WaitForSingleObject = Type.function(C.int32,
[handle_t, # hHandle
C.int32]) # dwMilliseconds (INFINITE == 0xFFFFFFFF means wait forever)
CloseHandle = Type.function(C.int32, [handle_t])
class ParallelUFuncPosixMixin(object):
'''ParallelUFunc mixin that implements _dispatch_worker to use pthread.
'''
def _dispatch_worker(self, worker, contexts, num_thread):
api = PThreadAPI(self)
NULL = self.constant_null(C.void_p)
threads = self.array(api.pthread_t, num_thread, name='threads')
# self.debug("launch threads")
with self.for_range(num_thread) as (loop, i):
status = api.pthread_create(threads[i].reference(), NULL, worker,
contexts[i].reference().cast(C.void_p))
with self.ifelse(status != self.constant_null(status.type)) as ifelse:
with ifelse.then():
# self.debug("Error at pthread_create: ", status)
self.unreachable()
with self.for_range(num_thread) as (loop, i):
status = api.pthread_join(threads[i], NULL)
with self.ifelse(status != self.constant_null(status.type)) as ifelse:
with ifelse.then():
# self.debug("Error at pthread_join: ", status)
self.unreachable()
class ParallelUFuncWindowsMixin(object):
'''ParallelUFunc mixin that implements _dispatch_worker to use Windows threading.
'''
def _dispatch_worker(self, worker, contexts, num_thread):
api = WinThreadAPI(self)
NULL = self.constant_null(C.void_p)
lpdword_NULL = self.constant_null(C.pointer(C.int32))
zero = self.constant(C.int32, 0)
intp_zero = self.constant(C.intp, 0)
INFINITE = self.constant(C.int32, 0xFFFFFFFF)
threads = self.array(api.handle_t, num_thread, name='threads')
# self.debug("launch threads")
# TODO error handling
with self.for_range(num_thread) as (loop, i):
threads[i] = api.CreateThread(NULL, intp_zero, worker,
contexts[i].reference().cast(C.void_p),
zero, lpdword_NULL)
with self.for_range(num_thread) as (loop, i):
api.WaitForSingleObject(threads[i], INFINITE)
api.CloseHandle(threads[i])
if sys.platform == 'win32':
ParallelMixin = ParallelUFuncWindowsMixin
else:
ParallelMixin = ParallelUFuncPosixMixin
########NEW FILE########
__FILENAME__ = astformat
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast, sys
def prettyprint(node, stream=sys.stdout):
text = ast.dump(node)
depth = 0
last = ''
for i in text:
if i == ' ':
continue # ignore space
indent = ' ' * 4 * depth
if last in ['(', '[', ','] and i not in [')', ']']:
stream.write('\n' + indent)
if i in ['(', '[']:
depth += 1
elif i in [')', ']']:
# if last not in ['(', '[']:
# stream.write('\n' + indent)
depth -= 1
stream.write(i)
last = i
stream.write('\n')
########NEW FILE########
__FILENAME__ = traits
"""
Minimal traits implementation:
@traits
class MyClass(object):
attr = Instance(SomeClass)
my_delegation = Delegate('attr')
"""
import inspect
# from numba.utils import TypedProperty
def traits(cls):
"@traits class decorator"
for name, py_func in vars(cls).items():
if isinstance(py_func, TraitBase):
py_func.set_attr_name(name)
return cls
class TraitBase(object):
"Base class for traits"
def __init__(self, value, doc=None):
self.value = value
self.doc = doc
def set_attr_name(self, name):
self.attr_name = name
class Delegate(TraitBase):
"""
Delegate to some other object.
"""
def __init__(self, value, delegate_attr_name=None, doc=None):
super(Delegate, self).__init__(value, doc=doc)
self.delegate_attr_name = delegate_attr_name
def obj(self, instance):
return getattr(instance, self.value)
@property
def attr(self):
return self.delegate_attr_name or self.attr_name
def __get__(self, instance, owner):
return getattr(self.obj(instance), self.attr)
def __set__(self, instance, value):
return setattr(self.obj(instance), self.attr, value)
def __delete__(self, instance):
delattr(self.obj(instance), self.attr)
########NEW FILE########
__FILENAME__ = transforms
# -*- coding: utf-8 -*-
"""
This module provides a variety of transforms that transform the AST
into a final form ready for code generation.
Below follows an explanation and justification of the design of the main
compilation stages in numba.
We start with a Python AST, compiled from source code or decompiled from
bytecode using meta. We run the following transformations:
1) Type inference
Infer types of all expressions, and fix the types of all local
variables. Local variable types are promoted (for instance float
to double), but cannot change (e.g. string cannot be assigned to
float).
When the type inferencer cannot determine a type, such as when it
calls a Python function or method that is not a Numba function, it
assumes type object. Object variables may be coerced to and from
most native types.
The type inferencer inserts CoercionNode nodes that perform such
coercions, as well as coercions between promotable native types.
It also resolves the return type of many math functions called
in the numpy, math and cmath modules.
Each AST expression node has a Variable that holds the type of
the expression, as well as any meta-data such as constant values
that have been determined.
2) Transform for loops
Provides some transformations of for loops over arrays to loops
over a range. Iteration over range/xrange is resolved at
compilation time.
What I would like to see is the generation of a RangeNode holding
a ast.Compare and an iteration variable incrementing ast.BinOp.
3) Low level specializations (LateSpecializer)
This stage performs low-level specializations. For instance it
resolves coercions to and from object into calls such as
PyFloat_FromDouble, with a fallback to Py_BuildValue/PyArg_ParseTuple.
This specializer also has the responsibility to ensure that new
references are accounted for by refcounting ObjectTempNode nodes.
This node absorbs the references and lets parent nodes borrow the
reference. At function cleanup, it decrefs its value. In loops,
it also decrefs any previous value, if set. Hence, these temporaries
must be initialized to NULL.
An object temporary is specific to one specific sub-expression, and
they are not reused (like in Cython).
It also rewrites object attribute access and method calls into
PyObject_GetAttrString etc.
4) Code generation
Generate LLVM code from the transformed AST.
This should be as minimal as possible, and should *not* contain
blobs of code performing complex operations. Instead, complex
operations should be broken down by AST transformers into
fundamental operations that are already supported by the AST.
This way we maximize code reuse, and make potential future additions
of different code generation backends easier. This can be taken
only so far, since low-level transformations must also tailor to
limitations of the code generation backend, such as intrinsic LLVM
calls or calls into libc. However, code reuse is especially convenient
in the face of type coercions, which LLVM does not provide any
leniency for.
"""
from __future__ import print_function, division, absolute_import
import sys
import ast
import ctypes
import warnings
if __debug__:
import pprint
import numba
from numba import *
from numba import error
from .minivect import codegen
from numba import macros, utils, typesystem
from numba import visitors, nodes
from numba import function_util
from numba.typesystem import is_obj, promote_to_native
from numba.type_inference.modules import mathmodule
from numba.nodes import constnodes
from numba.external import utility
from numba.utils import dump
import llvm.core
import numpy as np
logger = logging.getLogger(__name__)
from numba.external import pyapi
# ______________________________________________________________________
def get_funcname(py_func):
if py_func in (abs, np.abs):
return 'abs'
elif py_func is np.round:
return 'round'
return mathmodule.ufunc2math.get(py_func.__name__, py_func.__name__)
def resolve_pow(env, restype, args):
promote = env.crnt.typesystem.promote
if restype.is_numeric:
type = reduce(promote, [double, restype] + [a.type for a in args])
signature = type(*[type] * len(args))
result = nodes.MathCallNode(signature, args, None, name='pow')
else:
result = nodes.call_pyfunc(pow, args)
return nodes.CoercionNode(result, restype)
def math_call(env, name, args, dst_type):
signature = dst_type(*[a.type for a in args])
return nodes.MathCallNode(signature, args, None, name=name)
def math_call2(env, name, call_node):
return math_call(env, name, [call_node.args[0]], call_node.type)
# ______________________________________________________________________
class BuiltinResolver(object):
"""
Perform final low-level transformations such as abs(value) -> fabs(value)
"""
def __init__(self, env):
self.env = env
self.external_call = partial(function_util.external_call,
self.env.context,
self.env.crnt.llvm_module)
def resolve_builtin_call(self, node, func):
"""
Resolve an ast.Call() of a built-in function.
Returns None if no specific transformation is applied.
"""
resolver = getattr(self, '_resolve_' + func.__name__, None)
if resolver is not None:
# Pass in the first argument type
argtype = None
if len(node.args) >= 1:
argtype = node.args[0].variable.type
return resolver(func, node, argtype)
return None
def resolve_builtin_call_or_object(self, node, func):
"""
Resolve an ast.Call() of a built-in function, or call the built-in
through the object layer otherwise.
"""
result = self.resolve_builtin_call(node, func)
if result is None:
result = nodes.call_pyfunc(func, node.args)
return nodes.CoercionNode(result, node.type)
def _resolve_abs(self, func, node, argtype):
if argtype.is_int and not argtype.signed:
# abs() on unsigned integral value
return node.args[0]
elif not node.type.is_numeric:
result = nodes.call_pyfunc(func, node.args)
else:
return math_call2(self.env, 'abs', node)
def _resolve_round(self, func, node, argtype):
return nodes.call_pyfunc(round, node.args)
def _resolve_pow(self, func, node, argtype):
return resolve_pow(self.env, node.type, node.args)
def _resolve_int_number(self, func, node, argtype, dst_type, ext_name):
assert len(node.args) == 2
arg1, arg2 = node.args
if arg1.variable.type.is_string:
return nodes.CoercionNode(
nodes.ObjectTempNode(
self.external_call(ext_name, args=[arg1, nodes.NULL, arg2])),
dst_type=dst_type)
def _resolve_int(self, func, node, argtype, dst_type=int_):
if PY3:
return self._resolve_int_number(func, node, argtype, long_, 'PyLong_FromString')
return self._resolve_int_number(func, node, argtype, int_, 'PyInt_FromString')
def _resolve_long(self, func, node, argtype, dst_type=int_):
return self._resolve_int_number(func, node, argtype, long_, 'PyLong_FromString')
def _resolve_len(self, func, node, argtype):
if argtype.is_string:
call = self.external_call('strlen', node.args)
return call # nodes.CoercionNode(call, Py_ssize_t)
class ResolveCoercions(visitors.NumbaTransformer):
def visit_CoercionNode(self, node):
if not isinstance(node, nodes.CoercionNode):
# CoercionNode.__new__ returns the node to be coerced if it doesn't
# need coercion
return node
node_type = node.node.type
dst_type = node.dst_type
if __debug__ and self.env and self.env.debug_coercions:
logger.debug('coercion: %s --> %s\n%s',
node_type, dst_type, utils.pformat_ast(node))
# TODO: the below is a problem due to implicit string <-> int coercions!
if (node_type.is_string and dst_type.is_numeric and not
(node_type.is_pointer or node_type.is_null)):
if dst_type.typename in ('char', 'uchar'):
raise error.NumbaError(
node, "Conversion from string to (u)char not yet supported")
result = self.str_to_int(dst_type, node)
elif self.nopython and (is_obj(node_type) ^ is_obj(dst_type)):
raise error.NumbaError(node, "Cannot coerce to or from object in "
"nopython context")
elif is_obj(node.dst_type) and not is_obj(node_type):
node = nodes.ObjectTempNode(nodes.CoerceToObject(
node.node, node.dst_type, name=node.name))
result = self.visit(node)
elif is_obj(node_type) and not is_obj(node.dst_type):
node = nodes.CoerceToNative(node.node, node.dst_type,
name=node.name)
result = self.visit(node)
elif node_type.is_null:
if not dst_type.is_pointer:
raise error.NumbaError(node.node,
"NULL must be cast or implicitly "
"coerced to a pointer type")
result = self.visit(nodes.NULL.coerce(dst_type))
elif node_type.is_numeric and dst_type.is_bool:
to_bool = ast.Compare(node.node, [ast.NotEq()],
[nodes.const(0, node_type)])
to_bool = nodes.typednode(to_bool, bool_)
result = self.visit(to_bool)
else:
self.generic_visit(node)
if dst_type == node.node.type:
result = node.node
else:
result = node
if __debug__ and self.env and self.env.debug_coercions:
logger.debug('result = %s', utils.pformat_ast(result))
return result
def str_to_int(self, dst_type, node):
# TODO: int <-> string conversions are explicit, this should not
# TODO: be a coercion
if self.nopython:
node = nodes.CoercionNode(
function_util.external_call(
self.context,
self.llvm_module,
('atol' if dst_type.is_int else 'atof'),
args=[node.node]),
dst_type, name=node.name, )
else:
if dst_type.is_int:
cvtobj = function_util.external_call(
self.context,
self.llvm_module,
'PyInt_FromString' if not PY3 else 'PyLong_FromString',
args=[node.node, nodes.NULL,
nodes.const(10, int_)])
else:
cvtobj = function_util.external_call(
self.context,
self.llvm_module,
'PyFloat_FromString',
args=[node.node,
nodes.const(0, Py_ssize_t)])
node = nodes.CoerceToNative(nodes.ObjectTempNode(cvtobj),
dst_type, name=node.name)
result = self.visit(node)
return result
def convert_int_to_object(self, arg):
funcs = ["__Numba_PyInt_FromLongLong",
"__Numba_PyInt_FromUnsignedLongLong"]
func = funcs[arg.type.signed]
return function_util.utility_call(self.context, self.llvm_module,
func, [arg])
def visit_CoerceToObject(self, node):
new_node = node
node_type = node.node.type
if node_type.is_bool:
new_node = function_util.external_call(self.context,
self.llvm_module,
"PyBool_FromLong",
args=[node.node])
elif node_type.is_numeric and node_type.typename not in ('char', 'uchar'):
cls = None
args = node.node,
if node_type.is_int:
new_node = self.convert_int_to_object(node.node)
elif node_type.is_float:
cls = pyapi.PyFloat_FromDouble
elif node_type.is_complex:
cls = pyapi.PyComplex_FromDoubles
complex_value = nodes.CloneableNode(node.node)
args = [
nodes.ComplexAttributeNode(complex_value, "real"),
nodes.ComplexAttributeNode(complex_value.clone, "imag")
]
elif node_type.is_numpy_datetime:
datetime_value = nodes.CloneableNode(node.node)
args = [
nodes.DateTimeAttributeNode(datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(datetime_value.clone, 'units'),
nodes.ConstNode(np.datetime64(), object_),
]
new_node = function_util.utility_call(
self.context, self.llvm_module,
"create_numpy_datetime", args=args)
elif node_type.is_datetime:
datetime_value = nodes.CloneableNode(node.node)
args = [
nodes.DateTimeAttributeNode(datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(datetime_value.clone, 'units'),
]
new_node = function_util.utility_call(
self.context, self.llvm_module,
"create_python_datetime", args=args)
elif node_type.is_timedelta:
timedelta_value = nodes.CloneableNode(node.node)
args = [
nodes.TimeDeltaAttributeNode(timedelta_value, 'diff'),
nodes.TimeDeltaAttributeNode(timedelta_value.clone, 'units'),
nodes.ConstNode(np.timedelta64(), object_),
]
new_node = function_util.utility_call(
self.context, self.llvm_module,
"create_numpy_timedelta", args=args)
else:
raise error.NumbaError(
node, "Don't know how to coerce type %r to PyObject" %
node_type)
if cls:
new_node = function_util.external_call(self.context,
self.llvm_module,
cls.__name__,
args=args)
elif node_type.is_pointer and not node_type in (char.pointer(), string_):
# Create ctypes pointer object
ctypes_pointer_type = node_type.to_ctypes()
args = [nodes.CoercionNode(node.node, int64),
nodes.ObjectInjectNode(ctypes_pointer_type, object_)]
new_node = nodes.call_pyfunc(ctypes.cast, args)
self.generic_visit(new_node)
return new_node
def object_to_int(self, node, dst_type):
"""
Return node that converts the given node to the dst_type.
This also performs overflow/underflow checking, and conversion to
a Python int or long if necessary.
PyLong_AsLong and friends do not do this (overflow/underflow checking
is only for longs, and conversion to int|long depends on the Python
version).
"""
dst_type = promote_to_native(dst_type)
assert dst_type in utility.object_to_numeric, (dst_type, utility.object_to_numeric)
utility_func = utility.object_to_numeric[dst_type]
result = function_util.external_call_func(self.context,
self.llvm_module,
utility_func,
args=[node])
return result
def coerce_to_function_pointer(self, node, jit_func_type, func_pointer_type):
jit_func = jit_func_type.jit_func
if jit_func.signature != func_pointer_type.base_type:
raise error.NumbaError(node,
"Cannot coerce jit funcion %s to function of type %s" % (
jit_func, func_pointer_type))
pointer = self.env.llvm_context.get_pointer_to_function(jit_func.lfunc)
new_node = nodes.const(pointer, func_pointer_type)
return new_node
def visit_CoerceToNative(self, node):
"""
Try to perform fast coercion using e.g. PyLong_AsLong(), with a
fallback to PyArg_ParseTuple().
"""
new_node = None
from_type = node.node.type
node_type = node.type
if node_type.is_numeric:
cls = None
if node_type == size_t:
node_type = ulonglong
if node_type.is_int: # and not
new_node = self.object_to_int(node.node, node_type)
elif node_type.is_float:
cls = pyapi.PyFloat_AsDouble
elif node_type.is_complex:
# FIXME: This conversion has to be pretty slow. We
# need to move towards being ABI-savvy enough to just
# call PyComplex_AsCComplex().
cloneable = nodes.CloneableNode(node.node)
new_node = nodes.ComplexNode(
real=function_util.external_call(
self.context, self.llvm_module,
"PyComplex_RealAsDouble", args=[cloneable]),
imag=function_util.external_call(
self.context, self.llvm_module,
"PyComplex_ImagAsDouble", args=[cloneable.clone]))
elif node_type.is_numpy_datetime:
timestamp_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_numpy_datetime_to_timestamp", args=[node.node])
units_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_numpy_datetime_to_units", args=[node.node])
new_node = nodes.DateTimeNode(timestamp_func, units_func)
elif node_type.is_datetime:
timestamp_func = function_util.utility_call(
self.context, self.llvm_module,
"pydatetime2timestamp", args=[node.node])
units_func = function_util.utility_call(
self.context, self.llvm_module,
"pydatetime2units", args=[node.node])
new_node = nodes.DateTimeNode(timestamp_func, units_func)
elif node_type.is_numpy_timedelta:
diff_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_numpy_timedelta_to_diff", args=[node.node])
units_func = function_util.utility_call(
self.context, self.llvm_module,
"convert_numpy_timedelta_to_units", args=[node.node])
new_node = nodes.DateTimeNode(diff_func, units_func)
elif node_type.is_timedelta:
raise NotImplementedError
else:
raise error.NumbaError(
node, "Don't know how to coerce a Python object to a %r" %
node_type)
if cls:
# TODO: error checking!
new_node = function_util.external_call(self.context,
self.llvm_module,
cls.__name__,
args=[node.node])
elif node_type.is_pointer and not node_type.is_string:
if from_type.is_jit_function and node_type.base_type.is_function:
new_node = self.coerce_to_function_pointer(
node, from_type, node_type)
else:
raise error.NumbaError(node, "Obtaining pointers from objects "
"is not yet supported (%s)" % node_type)
elif node_type.is_void:
raise error.NumbaError(node, "Cannot coerce %s to void" %
(from_type,))
if new_node is None:
# Create a tuple for PyArg_ParseTuple
new_node = node
new_node.node = ast.Tuple(elts=[node.node], ctx=ast.Load())
self.generic_visit(node)
return node
if new_node.type != node.type:
# Fast native coercion. E.g. coercing an object to an int_
# will use PyLong_AsLong, but that will return a long_. We
# need to coerce the long_ to an int_
new_node = nodes.CoercionNode(new_node, node.type)
# Specialize replacement node
new_node = self.visit(new_node)
return new_node
class LateSpecializer(ResolveCoercions,
visitors.NoPythonContextMixin):
def visit_FunctionDef(self, node):
self.builtin_resolver = BuiltinResolver(self.env)
node.decorator_list = self.visitlist(node.decorator_list)
# Make sure to visit the entry block (not part of the CFG) and the
# first actual code block which may have synthetically
# inserted promotions
self.visit_ControlBlock(node.flow.blocks[0])
self.visit_ControlBlock(node.flow.blocks[1])
node.body = self.visitlist(node.body)
ret_type = self.func_signature.return_type
self.verify_context(ret_type)
self.setup_error_return(node, ret_type)
return node
def verify_context(self, ret_type):
if ret_type.is_object or ret_type.is_array:
# This will require some increfs, but allow it if people
# use 'with python' later on. If 'with python' isn't used, a
# return will issue the error
#if self.nopython:
# raise error.NumbaError(
# node, "Function cannot return object in "
# "nopython context")
pass
def setup_error_return(self, node, ret_type):
"""
Set FunctionDef.error_return to the AST statement that returns a
"bad value" that can be used as error indicator.
"""
value = nodes.badval(ret_type)
if value is not None:
value = nodes.CoercionNode(value, dst_type=ret_type).cloneable
error_return = ast.Return(value=value)
if self.nopython and is_obj(self.func_signature.return_type):
error_return = nodes.WithPythonNode(body=[error_return])
error_return = self.visit(error_return)
node.error_return = error_return
def visit_ControlBlock(self, node):
# print node
self.visitchildren(node)
return node
def visit_While(self, node):
self.generic_visit(node)
return node
def check_context(self, node):
if self.nopython:
raise error.NumbaError(node, "Cannot construct object in "
"nopython context")
def _print_nopython(self, value, dest=None):
if dest is not None:
raise error.NumbaError(dest, "No file may be given in nopython mode")
# stdin, stdout, stderr = stdio_util.get_stdio_streams()
# stdout = stdio_util.get_stream_as_node(stdout)
format = codegen.get_printf_specifier(value.type)
if format is None:
raise error.NumbaError(
value, "Printing values of type '%s' is not supported "
"in nopython mode" % (value.type,))
return function_util.external_call(
self.context,
self.llvm_module,
'printf',
args=[nodes.const(format, c_string_type),
value])
def _print(self, value, dest=None):
signature, lfunc = self.context.external_library.declare(
self.llvm_module,
'PyObject_CallMethod')
if dest is None:
dest = nodes.ObjectInjectNode(sys.stdout)
value = function_util.external_call(self.context,
self.llvm_module,
"PyObject_Str",
args=[value])
args = [dest, nodes.ConstNode("write"), nodes.ConstNode("O"), value]
return nodes.NativeCallNode(signature, args, lfunc)
def visit_Print(self, node):
if self.nopython:
printfunc = self._print_nopython
dst_type = string_
else:
printfunc = self._print
dst_type = object_
result = []
if node.values:
print_space = printfunc(nodes.const(" ", dst_type), node.dest)
for value in node.values:
result.append(printfunc(value, node.dest))
result.append(print_space)
if node.nl:
result.pop() # pop last space
if node.nl:
result.append(printfunc(nodes.const("\n", dst_type), node.dest))
return ast.Suite(body=self.visitlist(result))
def visit_Tuple(self, node):
self.check_context(node)
sig, lfunc = self.context.external_library.declare(self.llvm_module,
'PyTuple_Pack')
objs = self.visitlist(nodes.CoercionNode.coerce(node.elts, object_))
n = nodes.ConstNode(len(node.elts), Py_ssize_t)
args = [n] + objs
new_node = nodes.NativeCallNode(sig, args, lfunc, name='tuple')
# TODO: determine element type of node.elts
new_node.type = typesystem.tuple_(object_, size=len(node.elts))
return nodes.ObjectTempNode(new_node)
def visit_List(self, node):
self.check_context(node)
self.generic_visit(node)
return nodes.ObjectTempNode(node)
def visit_Dict(self, node):
self.check_context(node)
self.generic_visit(node)
return nodes.ObjectTempNode(node)
def visit_ObjectCallNode(self, node):
# self.generic_visit(node)
assert node.function
if self.nopython:
meth_name = node.name and ' (%r)' % node.name
raise error.NumbaError(node, "Cannot use object call in "
"nopython context" + meth_name)
node.function = self.visit(node.function)
node.args_tuple = self.visit(node.args_tuple)
node.kwargs_dict = self.visit(node.kwargs_dict)
return nodes.ObjectTempNode(node)
def visit_Call(self, node):
func_type = node.func.type
if self.query(node, "is_math") and node.type.is_numeric:
assert node.func.type.is_known_value
name = get_funcname(node.func.type.value)
result = math_call(self.env, name, node.args, node.type)
elif func_type.is_builtin:
result = self.builtin_resolver.resolve_builtin_call_or_object(
node, func_type.func)
else:
result = nodes.call_obj(node)
return self.visit(result)
def _c_string_slice(self, node):
ret_val = node
logger.debug(node.slice)
node_slice = node.slice
if isinstance(node_slice, nodes.ObjectInjectNode):
node_slice = node.slice.object
lower, upper, step = (
value if value is None else nodes.const(value, size_t)
for value in (node_slice.start, node_slice.stop,
node_slice.step))
else:
lower, upper, step = (node_slice.lower, node_slice.upper,
node_slice.step)
if step is None:
node_value = self.visit(node.value)
if lower is None:
lower = nodes.const(0, size_t)
if upper is None:
ret_val = nodes.LLMacroNode(
macros.c_string_slice_1.__signature__,
macros.c_string_slice_1, self.visit(node.value),
self.visit(lower))
else:
ret_val = nodes.LLMacroNode(
macros.c_string_slice_2.__signature__,
macros.c_string_slice_2, self.visit(node.value),
self.visit(lower), self.visit(upper))
logger.debug(ret_val)
else:
raise NotImplementedError('String slices where step != None.')
return ret_val
def visit_Subscript(self, node):
if isinstance(node.value, nodes.ArrayAttributeNode):
if node.value.is_read_only and isinstance(node.ctx, ast.Store):
raise error.NumbaError("Attempt to load read-only attribute")
# Short-circuit visiting a Slice child if this is a nopython
# string slice.
if (self.nopython and node.value.type.is_string and
node.type.is_string):
return self.visit(self._c_string_slice(node))
# logging.debug(ast.dump(node))
# TODO: do this in the respective cases below when needed
self.generic_visit(node)
node_type = node.value.type
if ((node_type.is_object and not node_type.is_array) or
(node_type.is_array and node.slice.type.is_object)):
# Array or object slicing
if isinstance(node.ctx, ast.Load):
result = function_util.external_call(self.context,
self.llvm_module,
'PyObject_GetItem',
args=[node.value,
node.slice])
node = nodes.CoercionNode(result, dst_type=node.type)
node = self.visit(node)
else:
# This is handled in visit_Assign
pass
elif (node.value.type.is_array and node.type.is_numpy_datetime and
node.slice.type.is_int):
# JNB: ugly hack to make array of datetimes look like array of
# int64, since numba datetime type doesn't match numpy datetime type.
node.value.type = array_(int64, node.value.type.ndim,
node.value.type.is_c_contig,
node.value.type.is_f_contig,
node.value.type.inner_contig)
node.value.variable.type = node.value.type
data_node = nodes.DataPointerNode(node.value, node.slice, node.ctx)
units_node = function_util.utility_call(
self.context, self.llvm_module,
"get_units_num",
args=[nodes.ConstNode(node_type.dtype.units_char, string_)])
node = nodes.DateTimeNode(data_node, units_node)
elif (node.value.type.is_array and node.type.is_numpy_timedelta and
node.slice.type.is_int):
# JNB: ugly hack to make array of timedeltas look like array of
# int64, since numba timedelta type doesn't match numpy timedelta type.
node.value.type = array_(int64, node.value.type.ndim,
node.value.type.is_c_contig,
node.value.type.is_f_contig,
node.value.type.inner_contig)
node.value.variable.type = node.value.type
data_node = nodes.DataPointerNode(node.value, node.slice, node.ctx)
units_node = function_util.utility_call(
self.context, self.llvm_module,
"get_units_num",
args=[nodes.ConstNode(node_type.dtype.units_char, string_)])
node = nodes.TimeDeltaNode(data_node, units_node)
elif (node.value.type.is_array and not node.type.is_array and
node.slice.type.is_int):
# Array index with integer indices
node = nodes.DataPointerNode(node.value, node.slice, node.ctx)
elif node.value.type.is_string and node.type.is_string:
node.value = nodes.CoercionNode(node.value, dst_type = object_)
node.type = object_
node = nodes.CoercionNode(nodes.ObjectTempNode(node),
dst_type = c_string_type)
node = self.visit(node)
return node
def visit_ExtSlice(self, node):
if node.type.is_object:
return self.visit(ast.Tuple(elts=node.dims, ctx=ast.Load()))
else:
if node.type.is_float:
self.warn(node, "Using a float for indexing")
self.generic_visit(node)
return node
def visit_Index(self, node):
return self.visit(node.value)
def allocate_struct_on_stack(self, assmnt_node, target):
# Allocate struct on stack
temp = nodes.TempNode(target.type)
assmnt_node.targets[0] = temp.store()
assmnt_node.value = self.visit(assmnt_node.value)
# Expose LLVM value through SSA (patch the Variable or the
# LHS). We need to store the pointer to the struct (the alloca)
ssa_assmnt = ast.Assign(targets=[target], value=temp.store())
return ast.Suite(body=[assmnt_node, ssa_assmnt])
def visit_Assign(self, node):
target = node.targets[0]
target_is_subscript = (len(node.targets) == 1 and
isinstance(target, ast.Subscript))
if target_is_subscript and is_obj(target.type):
# Slice assignment / index assignment w/ objects
# TODO: discount array indexing with dtype object
target = self.visit(target)
obj = target.value
key = target.slice
value = self.visit(node.value)
call = function_util.external_call(self.context,
self.llvm_module,
'PyObject_SetItem',
args=[obj, key, value])
return self.visit(call)
elif target.type.is_struct and nodes.is_name(target):
node = self.allocate_struct_on_stack(node, target)
return node
self.generic_visit(node)
return node
def visit_Slice(self, node):
"""
Rewrite slice objects. Do this late in the pipeline so that other
code can still recognize the code structure.
"""
slice_values = [node.lower, node.upper, node.step]
if self.nopython:
raise error.NumbaError(node, "Cannot slice in nopython context")
if node.variable.is_constant:
return self.visit(nodes.ObjectInjectNode(node.variable.constant_value))
bounds = []
for node in slice_values:
if node is None:
bounds.append(nodes.NULL_obj)
else:
bounds.append(node)
new_slice = function_util.external_call(self.context,
self.llvm_module,
'PySlice_New',
args=bounds,
temp_name='slice')
return self.visit(new_slice)
# return nodes.ObjectTempNode(new_slice)
def visit_Attribute(self, node):
if (self.nopython and not node.value.type.is_module and
not node.value.type.is_complex and
not node.value.type.is_datetime and
not node.value.type.is_timedelta):
raise error.NumbaError(
node, "Cannot access Python attribute in nopython context (%s)" % node.attr)
if node.value.type.is_complex:
value = self.visit(node.value)
return nodes.ComplexAttributeNode(value, node.attr)
elif node.value.type.is_numpy_datetime:
value = self.visit(node.value)
if node.attr in ['year', 'month', 'day', 'hour', 'min', 'sec']:
func_dict = {'year' : 'extract_datetime_year',
'month' : 'extract_datetime_month',
'day' : 'extract_datetime_day',
'hour' : 'extract_datetime_hour',
'min' : 'extract_datetime_min',
'sec' : 'extract_datetime_sec',}
value = nodes.CloneableNode(value)
timestamp_node = nodes.DateTimeAttributeNode(value,
'timestamp')
unit_node = nodes.DateTimeAttributeNode(value.clone, 'units')
new_node = function_util.utility_call(
self.context, self.llvm_module,
func_dict[node.attr],
args=[timestamp_node, unit_node])
return new_node
else:
return nodes.DateTimeAttributeNode(value, node.attr)
elif node.value.type.is_datetime:
value = self.visit(node.value)
return nodes.DateTimeAttributeNode(value, node.attr)
elif node.value.type.is_timedelta:
value = self.visit(node.value)
return nodes.TimeDeltaAttributeNode(value, node.attr)
elif node.type.is_numpy_attribute:
return nodes.ObjectInjectNode(node.type.value)
elif node.type.is_numpy_dtype:
dtype_type = node.type.dtype
return nodes.ObjectInjectNode(dtype_type.get_dtype())
elif is_obj(node.value.type):
if node.value.type.is_module:
# Resolve module attributes as constants
if node.type.is_module_attribute:
new_node = nodes.ObjectInjectNode(node.type.value)
else:
new_node = nodes.ConstNode(getattr(node.value.type.module,
node.attr))
else:
new_node = function_util.external_call(
self.context,
self.llvm_module,
'PyObject_GetAttrString',
args=[node.value,
nodes.ConstNode(node.attr)])
return self.visit(new_node)
self.generic_visit(node)
return node
def visit_ArrayNewNode(self, node):
if self.nopython:
raise error.NumbaError(
node, "Cannot yet allocate new array in nopython context")
PyArray_Type = nodes.ObjectInjectNode(np.ndarray)
descr = nodes.ObjectInjectNode(node.type.dtype.get_dtype()).cloneable
ndim = nodes.const(node.type.ndim, int_)
flags = nodes.const(0, int_)
args = [PyArray_Type, descr.clone, ndim,
node.shape, node.strides, node.data, flags]
incref_descr = nodes.IncrefNode(descr)
incref_base = None
setbase = None
if node.base is None:
args.append(nodes.NULL_obj)
else:
base = nodes.CloneableNode(node.base)
incref_base = nodes.IncrefNode(base)
args.append(base.clone)
array = nodes.PyArray_NewFromDescr(args)
array = nodes.ObjectTempNode(array).cloneable
body = [incref_descr, incref_base, array, setbase]
if node.base is not None:
body.append(nodes.PyArray_SetBaseObject([array.clone, base.clone]))
# TODO: PyArray_UpdateFlags()
result = nodes.ExpressionNode(filter(None, body), array.clone)
return self.visit(result)
def visit_ArrayNewEmptyNode(self, node):
if self.nopython:
raise error.NumbaError(
node, "Cannot yet allocate new empty array in nopython context")
ndim = nodes.const(node.type.ndim, int_)
dtype = nodes.const(node.type.dtype.get_dtype(), object_).cloneable
is_fortran = nodes.const(node.is_fortran, int_)
result = nodes.PyArray_Empty([ndim, node.shape, dtype, is_fortran])
result = nodes.ObjectTempNode(result)
incref_descr = nodes.IncrefNode(dtype)
return self.visit(nodes.ExpressionNode([incref_descr], result))
def visit_Name(self, node):
if node.variable.is_constant:
obj = node.variable.constant_value
return self.visit(nodes.const(obj, node.type))
return node
def visit_Return(self, node):
return_type = self.func_signature.return_type
if node.value is not None:
node.value = self.visit(nodes.CoercionNode(node.value, return_type))
return node
def visit_For(self, node):
self.generic_visit(node)
return node
def _object_binop(self, node, api_name):
return self.visit(
function_util.external_call(self.context,
self.llvm_module,
api_name,
args=[node.left,
node.right]))
def _object_Add(self, node):
return self._object_binop(node, 'PyNumber_Add')
def _object_Sub(self, node):
return self._object_binop(node, 'PyNumber_Subtract')
def _object_Mult(self, node):
return self._object_binop(node, 'PyNumber_Multiply')
def _object_Div(self, node):
if PY3:
return self._object_binop(node, 'PyNumber_TrueDivide')
else:
return self._object_binop(node, 'PyNumber_Divide')
def _object_Mod(self, node):
return self._object_binop(node, 'PyNumber_Remainder')
def _object_Pow(self, node):
args = [node.left,
node.right,
nodes.ObjectInjectNode(None)]
return self.visit(function_util.external_call(self.context,
self.llvm_module,
'PyNumber_Power',
args=args),
llvm_module=self.llvm_module)
def _object_LShift(self, node):
return self._object_binop(node, 'PyNumber_Lshift')
def _object_RShift(self, node):
return self._object_binop(node, 'PyNumber_Rshift')
def _object_BitOr(self, node):
return self._object_binop(node, 'PyNumber_Or')
def _object_BitXor(self, node):
return self._object_binop(node, 'PyNumber_Xor')
def _object_BitAnd(self, node):
return self._object_binop(node, 'PyNumber_And')
def _object_FloorDiv(self, node):
return self._object_binop(node, 'PyNumber_FloorDivide')
def visit_BinOp(self, node):
if isinstance(node.op, ast.Pow):
return self.visit(resolve_pow(self.env, node.type, [node.left,
node.right]))
self.generic_visit(node)
if is_obj(node.left.type) or is_obj(node.right.type):
op_name = type(node.op).__name__
op_method = getattr(self, '_object_%s' % op_name, None)
if op_method:
node = op_method(node)
else:
raise error.NumbaError(
node, 'Unsupported binary operation for object: %s' %
op_name)
elif node.left.type.is_datetime and node.right.type.is_datetime:
if isinstance(node.op, ast.Sub):
datetime_value = nodes.CloneableNode(node.left)
units1_node = nodes.DateTimeAttributeNode(
datetime_value, 'units')
datetime_value = nodes.CloneableNode(node.right)
units2_node = nodes.DateTimeAttributeNode(
datetime_value, 'units')
unit_node = function_util.utility_call(
self.context, self.llvm_module,
"get_target_unit_for_datetime_datetime",
args=[units1_node, units2_node])
datetime_value = nodes.CloneableNode(node.left)
args1 = [
nodes.DateTimeAttributeNode(datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(datetime_value.clone, 'units'),
]
datetime_value = nodes.CloneableNode(node.right)
args2 = [
nodes.DateTimeAttributeNode(datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(datetime_value.clone, 'units'),
]
diff_node = function_util.utility_call(
self.context, self.llvm_module,
"sub_datetime_datetime", args=args1+args2+[unit_node])
node = nodes.TimeDeltaNode(diff_node, unit_node)
else:
raise NotImplementedError
elif (node.left.type.is_datetime and
node.right.type.is_timedelta) or \
(node.left.type.is_timedelta and
node.right.type.is_datetime):
if isinstance(node.op, ast.Add) or isinstance(node.op, ast.Sub):
datetime_value = nodes.CloneableNode(node.left)
if node.left.type.is_datetime:
units1_node = nodes.DateTimeAttributeNode(
datetime_value, 'units')
else:
units1_node = nodes.TimeDeltaAttributeNode(
datetime_value, 'units')
datetime_value = nodes.CloneableNode(node.right)
if node.right.type.is_datetime:
units2_node = nodes.DateTimeAttributeNode(
datetime_value, 'units')
else:
units2_node = nodes.TimeDeltaAttributeNode(
datetime_value, 'units')
unit_node = function_util.utility_call(
self.context, self.llvm_module,
"get_target_unit_for_datetime_timedelta",
args=[units1_node, units2_node])
datetime_value = nodes.CloneableNode(node.left)
if node.left.type.is_datetime:
args1 = [
nodes.DateTimeAttributeNode(
datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(
datetime_value.clone, 'units'),]
else:
args1 = [
nodes.TimeDeltaAttributeNode(
datetime_value, 'diff'),
nodes.TimeDeltaAttributeNode(
datetime_value.clone, 'units'),]
datetime_value = nodes.CloneableNode(node.right)
if node.right.type.is_datetime:
args2 = [
nodes.DateTimeAttributeNode(
datetime_value, 'timestamp'),
nodes.DateTimeAttributeNode(
datetime_value.clone, 'units'),]
else:
args2 = [
nodes.TimeDeltaAttributeNode(
datetime_value, 'diff'),
nodes.TimeDeltaAttributeNode(
datetime_value.clone, 'units'),]
if isinstance(node.op, ast.Add):
diff_node = function_util.utility_call(
self.context, self.llvm_module,
"add_datetime_timedelta",
args=args1+args2+[unit_node])
elif isinstance(node.op, ast.Sub):
diff_node = function_util.utility_call(
self.context, self.llvm_module,
"sub_datetime_timedelta",
args=args1+args2+[unit_node])
node = nodes.DateTimeNode(diff_node, unit_node)
else:
raise NotImplementedError
elif node.left.type.is_string and node.right.type.is_string:
node.left = nodes.CoercionNode(node.left, object_)
node.right = nodes.CoercionNode(node.right, object_)
return nodes.CoercionNode(self.visit_BinOp(node), c_string_type)
return node
def _object_unaryop(self, node, api_name):
return self.visit(
function_util.external_call(self.context,
self.llvm_module,
api_name,
args=[node.operand]))
def _object_Invert(self, node):
return self._object_unaryop(node, 'PyNumber_Invert')
def _object_Not(self, node):
callnode = function_util.external_call(self.function_cache,
self.llvm_module,
'PyObject_IsTrue',
args=[node.operand])
cmpnode = ast.Compare(callnode, [nodes.Eq()], [nodes.ConstNode(0)])
return self.visit(nodes.IfExp(cmpnode,
nodes.ObjectInjectNode(True),
nodes.ObjectInjectNode(False)))
def _object_UAdd(self, node):
return self._object_unaryop(node, 'PyNumber_Positive')
def _object_USub(self, node):
return self._object_unaryop(node, 'PyNumber_Negative')
def visit_UnaryOp(self, node):
self.generic_visit(node)
if is_obj(node.type):
op_name = type(node.op).__name__
op_method = getattr(self, '_object_%s' % op_name, None)
if op_method:
node = op_method(node)
else:
raise error.NumbaError(
node, 'Unsupported unary operation for objects: %s' %
op_name)
return node
def visit_ConstNode(self, node):
constant = node.pyval
if node.type.is_known_value:
node.type = object_ # TODO: Get rid of known_value
if node.type.is_complex:
real = nodes.ConstNode(constant.real, node.type.base_type)
imag = nodes.ConstNode(constant.imag, node.type.base_type)
node = nodes.ComplexNode(real, imag)
elif node.type.is_numpy_datetime:
datetime_str = nodes.ConstNode('', c_string_type)
node = nodes.NumpyDateTimeNode(datetime_str)
elif node.type.is_datetime:
# JNB: not sure what to do here for datetime value
timestamp = nodes.ConstNode(0, int64)
units = nodes.ConstNode(0, int32)
node = nodes.DateTimeNode(timestamp, units)
elif node.type.is_timedelta:
diff = nodes.ConstNode(0, int64)
units = nodes.ConstNode(0, int32)
node = nodes.TimeDeltaNode(diff, units)
elif node.type.is_pointer and not node.type.is_string:
addr_int = constnodes.get_pointer_address(constant, node.type)
node = nodes.ptrfromint(addr_int, node.type)
elif node.type.is_object and not nodes.is_null_constant(constant):
node = nodes.ObjectInjectNode(constant, node.type)
return node
#------------------------------------------------------------------------
# User nodes
#------------------------------------------------------------------------
def visit_UserNode(self, node):
return node.specialize(self)
########NEW FILE########
__FILENAME__ = typedefs
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
from numba.typesystem import defaults
_trace_refs_ = hasattr(sys, 'getobjects')
def define(u):
void_star = u.pointer(u.void)
intp_star = u.pointer(u.npy_intp)
if _trace_refs_:
pyobject_head_extra_fields = [
('ob_next', void_star),
('ob_prev', void_star),
]
else:
pyobject_head_extra_fields = []
pyobject_head_fields = pyobject_head_extra_fields + [
('ob_refcnt', u.Py_ssize_t),
('ob_type', void_star),
]
PyObject_HEAD = u.struct_(pyobject_head_fields, 'PyObject_HEAD')
PyArray = u.struct_(pyobject_head_fields + [
("data", void_star),
("nd", u.int32),
("dimensions", intp_star),
("strides", intp_star),
("base", void_star),
("descr", void_star),
("flags", u.int32),
("weakreflist", void_star),
])
PyCFunctionObject = u.struct_([
('head', PyObject_HEAD),
('m_ml', void_star),
('m_self', u.object_),
('m_module', u.object_),
])
# TODO: Parse C and Cython header files...
NumbaFunctionObject = u.struct_([
('pycfunction', PyCFunctionObject),
('flags', u.int_),
('func_dict', u.object_),
('func_weakreflist', u.object_),
('func_name', u.object_),
('func_doc', u.object_),
('func_code', u.object_),
('func_closure', u.object_),
])
return locals()
globals().update(define(defaults.numba_typesystem))
########NEW FILE########
__FILENAME__ = closuretypes
# -*- coding: utf-8 -*-
"""
Types for closures and inner functions.
"""
from __future__ import print_function, division, absolute_import
from numba.typesystem import NumbaType
from numba.exttypes.types.extensiontype import ExtensionType
class ClosureType(NumbaType):
"""
Type of closures and inner functions.
"""
typename = "closure"
argnames = ["signature", ("closure", None)]
flags = ["object"]
def add_scope_arg(self, scope_type):
self.signature = self.signature.add_arg(0, scope_type)
def __repr__(self):
return "<closure(%s)>" % self.signature
class ClosureScopeType(ExtensionType):
"""
Type of the enclosing scope for closures. This is always passed in as
first argument to the function.
"""
typename = "closure_scope"
is_final = True
def __init__(self, py_class, parent_scope):
super(ClosureScopeType, self).__init__(py_class)
self.parent_scope = parent_scope
self.unmangled_symtab = None
if self.parent_scope is None:
self.scope_prefix = ""
else:
self.scope_prefix = self.parent_scope.scope_prefix + "0"
########NEW FILE########
__FILENAME__ = constants
# -*- coding: utf-8 -*-
"""
Default rules for the typing of constants.
"""
from __future__ import print_function, division, absolute_import
import math
import types
import ctypes
from functools import partial
import numba.typesystem
from numba.typesystem import itypesystem, numpy_support
from numba import numbawrapper
from numba.support.ctypes_support import is_ctypes, from_ctypes_value
from numba.support import cffi_support
import numpy as np
import datetime
#------------------------------------------------------------------------
# Class -> Type
#------------------------------------------------------------------------
def get_typing_defaults(u):
"""
Get a simple table mapping Python classes to types.
:param u: The type universe
"""
typing_defaults = {
float: u.double,
bool: u.bool_,
complex: u.complex128,
str: u.string_,
#datetime.datetime: u.datetime,
np.datetime64: u.datetime(),
np.timedelta64: u.timedelta(),
}
return typing_defaults
#------------------------------------------------------------------------
# Class -> pyval -> Type
#------------------------------------------------------------------------
def get_default_typing_rules(u, typeof, promote):
"""
Get a table mapping Python classes to handlers (value -> type)
:param u: The type universe
"""
table = {}
def register(*classes):
def dec(func):
for cls in classes:
table[cls] = lambda u, value: func(value)
return func
return dec
@register(int, long)
def type_int(value):
if abs(value) < 1:
bits = 0
else:
bits = math.ceil(math.log(abs(value), 2))
if bits < 32:
return u.int_
elif bits < 64:
return u.int64
else:
raise ValueError("Cannot represent %s as int32 or int64", value)
@register(np.ndarray)
def type_ndarray(value):
if isinstance(value, np.ndarray):
dtype = numpy_support.map_dtype(value.dtype)
return u.array(dtype, value.ndim)
#is_c_contig=value.flags['C_CONTIGUOUS'],
#is_f_contig=value.flags['F_CONTIGUOUS'])
@register(tuple, list, dict)
def type_container(value):
assert isinstance(value, (tuple, list, dict))
if isinstance(value, dict):
key_type = type_container(value.keys())
value_type = type_container(value.values())
return u.dict_(key_type, value_type, size=len(value))
if isinstance(value, tuple):
container_type = u.tuple_
else:
container_type = u.list_
if 0 < len(value) < 30:
# Figure out base type if the container is not too large
# base_type = reduce(promote, (typeof(child) for child in value))
ty = typeof(value[0])
if all(typeof(child) == ty for child in value):
base_type = ty
else:
base_type = u.object_
else:
base_type = u.object_
return container_type(base_type, size=len(value))
register(np.dtype)(lambda value: u.numpy_dtype(numpy_support.map_dtype(value)))
register(types.ModuleType)(lambda value: u.module(value))
register(itypesystem.Type)(lambda value: u.meta(value))
return table
def get_constant_typer(universe, typeof, promote):
"""
Get a function mapping values to types, which returns None if unsuccessful.
"""
typetable = get_typing_defaults(universe)
handler_table = get_default_typing_rules(universe, typeof, promote)
return itypesystem.ConstantTyper(universe, typetable, handler_table).typeof
#------------------------------------------------------------------------
# Constant matching ({ pyval -> bool : pyval -> Type })
#------------------------------------------------------------------------
# TODO: Make this a well-defined (easily overridable) matching table
# E.g. { "numpy" : { is_numpy : get_type } }
def is_dtype_constructor(value):
return isinstance(value, type) and issubclass(value, np.generic)
def is_numpy_scalar(value):
return isinstance(value, np.generic)
def is_registered(value):
from numba.type_inference import module_type_inference
return module_type_inference.is_registered(value)
def from_ctypes(value, u):
result = from_ctypes_value(value)
if result.is_function:
pointer = ctypes.cast(value, ctypes.c_void_p).value
return u.pointer_to_function(value, pointer, result)
else:
return result
def from_cffi(value, u):
signature = cffi_support.get_signature(value)
pointer = cffi_support.get_pointer(value)
return u.pointer_to_function(value, pointer, signature)
def from_typefunc(value, u):
from numba.type_inference import module_type_inference
result = module_type_inference.module_attribute_type(value)
if result is not None:
return result
else:
return u.known_value(value)
is_numba_exttype = lambda value: hasattr(type(value), '__numba_ext_type')
is_NULL = lambda value: value is numba.NULL
is_autojit_func = lambda value: isinstance(
value, numbawrapper.NumbaSpecializingWrapper)
def get_default_match_table(u):
"""
Get a matcher table: { (type -> bool) : (value -> type) }
"""
table = {
is_NULL:
lambda value: numba.typesystem.null,
is_dtype_constructor:
lambda value: numba.typesystem.from_numpy_dtype(np.dtype(value)),
is_numpy_scalar:
lambda value: numpy_support.map_dtype(value.dtype),
is_ctypes:
lambda value: from_ctypes(value, u),
cffi_support.is_cffi_func:
lambda value: from_cffi(value, u),
is_numba_exttype:
lambda value: getattr(type(value), '__numba_ext_type'),
numbawrapper.is_numba_wrapper:
lambda value: u.jit_function(value),
is_autojit_func:
lambda value: u.autojit_function(value),
is_registered:
lambda value: from_typefunc(value, u),
}
return table
def find_match(matchtable, value):
for matcher, typefunc in matchtable.iteritems():
if matcher(value):
result = typefunc(value)
assert result is not None
return result
return None
#------------------------------------------------------------------------
# Typeof
#------------------------------------------------------------------------
def object_typer(universe, value):
return universe.object_
def find_first(callables, value):
for callable in callables:
result = callable(value)
if result is not None:
return result
assert False, (callables, value)
def get_default_typeof(universe, promote):
typeof1 = get_constant_typer(universe, lambda value: typeof(value), promote)
typeof2 = partial(find_match, get_default_match_table(universe))
typeof3 = partial(object_typer, universe)
typeof = partial(find_first, [typeof1, typeof2, typeof3])
return typeof
########NEW FILE########
__FILENAME__ = ctypestypes
# -*- coding: utf-8 -*-
"""
ctypes type universe.
"""
from __future__ import print_function, division, absolute_import
import ctypes
from numba.typesystem.itypesystem import consing, tyname
from numba.typesystem import universe
from numba.typesystem import numbatypes as ts
domain_name = "ctypes"
# ______________________________________________________________________
nb2ctypes = {
ts.float32: ctypes.c_float,
ts.float64: ctypes.c_double,
# ts.float128: ctypes.c_longdouble,
ts.object_: ctypes.py_object,
ts.void: None,
ts.string_: ctypes.c_char_p,
}
def cint(name):
ty = getattr(ts, name)
cname = "c_int" if ty.signed else "c_uint"
nb2ctypes[ty] = getattr(ctypes, cname + str(ty.itemsize * 8))
for name in map(tyname, universe.int_typenames):
cint(name)
globals().update((tyname(ty.typename), cty)
for ty, cty in nb2ctypes.iteritems())
# float_, double, longdouble = float32, float64, float128
float_, double = float32, float64
ctypes_map = dict((cty, ty) for ty, cty in nb2ctypes.iteritems())
if ctypes.c_double == ctypes.c_longdouble:
# Ctypes and numpy can disagree on longdouble. If ctypes assume
# double == longdouble, make sure we don't assume longdouble, and then
# later use the numpy representation
ctypes_map[ctypes.c_double] = ts.float64
# ______________________________________________________________________
@consing
def struct_(fields, name=None, readonly=False, packed=False):
class Struct(ctypes.Structure):
_fields_ = fields
if packed:
_pack_ = 1
return Struct
@consing
def function(rettype, argtypes, name=None, is_vararg=False):
assert not is_vararg
return ctypes.CFUNCTYPE(rettype, *argtypes)
@consing
def pointer(base_type):
if base_type in (ctypes.c_char, ctypes.c_byte):
return string_
return ctypes.POINTER(base_type)
carray = consing(lambda base_type, size: base_type * size)
########NEW FILE########
__FILENAME__ = defaults
# -*- coding: utf-8 -*-
"""
Type defaults
"""
from __future__ import print_function, division, absolute_import
from . itypesystem import TypeConverter, TypeSystem
from . import promotion, constants, lowering
from . import numbatypes as numba_domain
from . import llvmtypes as llvm_domain
from . import ctypestypes as ctypes_domain
#------------------------------------------------------------------------
# Defaults initialization
#------------------------------------------------------------------------
def compose(f, g):
return lambda x: f(g(x))
# ______________________________________________________________________
# Converters
def lowerer(table):
return lowering.create_type_lowerer(table, numba_domain, numba_domain)
# Lowerers
lower = lowerer(lowering.default_numba_lowering_table).convert
ctypes_lower = lowerer(lowering.ctypes_lowering_table).convert
# Converters
to_llvm_converter = TypeConverter(numba_domain, llvm_domain)
to_ctypes_converter = TypeConverter(numba_domain, ctypes_domain)
# ...
to_llvm = to_llvm_converter.convert
to_ctypes = to_ctypes_converter.convert
converters = {
"llvm": compose(to_llvm, lower),
"ctypes": compose(to_ctypes, ctypes_lower),
}
# ______________________________________________________________________
# Typesystems
promote = promotion.get_default_promoter(numba_domain)
typeof = constants.get_default_typeof(numba_domain, promote)
numba_typesystem = TypeSystem(numba_domain, promote, typeof, converters)
llvm_typesystem = TypeSystem(llvm_domain, typeof=compose(to_llvm, typeof))
ctypes_typesystem = TypeSystem(ctypes_domain, typeof=compose(to_ctypes, typeof))
########NEW FILE########
__FILENAME__ = itypesystem
# -*- coding: utf-8 -*-
"""
Inferface for our typesystems.
Some requirements:
* The typesystem must allow us to switch between low-level representations.
For instance, we may want to represent an array as a NumPy ndarray, a
Py_buffer, or some other representation.
* The sizes of atom types (e.g. int) must be easily customizable. This allows
an interpreter to switch sizes to simulate different platforms.
* Type representations and sizes, must be overridable without
reimplementing the type. E.g. an atom type can be sliced to create
an array type, which should be separate from its low-level representation.
* Types should map easily between type domains of the same level, e.g.
between the low-level numba and llvm types.
Ideally this forms an isomorphism, which is precluded by ctypes and
numpy type systems::
>>> ctypes.c_longlong
<class 'ctypes.c_long'>
* No type system but our own should be entrenched in any part of the
codebase, including the code generator.
* Sets of type constructors (type universes) must be composable.
For instance, we may want to extend a low-level type systems of ints
and pointers with objects to yield a type universe supporting
both constructs.
* Universes must be re-usable across type-systems. Types of universes
represent abstract type concepts, and the type-systems give meaning
and representation to values of those types.
* Types must be immutable and consed, i.e.
ts.pointer(base) is ts.pointer(base) must always be True
* The use of a certain typesystem must suggest at which level the
corresponding terms operate.
* Conversion code should be written with minimal effort:
- unit types should map immediately between domains of the same level
- parametric types should naturally re-construct in domains of the same level
* Converting a type to a lower-level domain constitutes a one-way
conversion. This should, where possible, constitute a lowering in the
same domain followed by a conversion. E.g.:
def numba_complex_to_llvm(type):
return to_llvm(lower_complex(type))
* Type constructors must be substitutable. E.g. an external user may
want to construct a universe where type construction is logged, or
where special type checking is performed, disallowing certain compositions.
"""
from __future__ import print_function, division, absolute_import
import ctypes
import struct as struct_
import weakref
import keyword
from functools import partial
import numba
from numba.utils import is_builtin
reserved = set(['bool', 'int', 'long', 'float', 'complex',
'string', 'struct', 'array']).__contains__
def tyname(name):
return name + "_" if reserved(name) else name
__all__ = [
"TypeSystem", "Type", "ConstantTyper", "Conser", "TypeConser",
"get_conser", "consing",
]
native_pointer_size = struct_.calcsize('@P')
if struct_.pack('i', 1)[0] == '\1':
nbo = '<' # little endian
else:
nbo = '>' # big endian
if numba.PY3:
map = lambda f, xs, map=map: list(map(f, xs))
class TypeSystem(object):
def __init__(self, universe, promote=None, typeof=None, converters=None):
self.universe = universe
# Find the least general type that subsumes both given types
# t1 -> t2 -> t3
self.promote = promote
# Assign types to Python constants (arbitrary python values)
self.typeof = typeof
self.from_python = typeof # TODO: Remove
# Convert between type domains
self.converters = converters or {}
def convert(self, codomain_name, type):
convert = self.converters[codomain_name]
return convert(type)
def __getattr__(self, attr):
return getattr(self.universe, attr)
def __repr__(self):
return "TypeSystem(%s, %s, %s, %s)" % (self.universe.domain_name,
self.promote, self.typeof,
self.converters)
#------------------------------------------------------------------------
# Typing of Constants
#------------------------------------------------------------------------
class ConstantTyper(object):
def __init__(self, universe, typetable, handler_table):
"Initialize to the given type universe"
self.universe = universe
self.typetable = typetable # type(constant) -> type
self.handler_table = handler_table # type(constant) -> handler
def typeof(self, value):
"""
Get a concrete type given a python value.
Return None f this ConstantTyper cannot type the constant
"""
if type(value) in self.typetable:
return self.typetable[type(value)]
elif type(value) in self.handler_table:
return self.handler_table[type(value)](self.universe, value)
else:
for cls in self.handler_table:
if isinstance(value, cls):
return self.handler_table[cls](self.universe, value)
return None
#------------------------------------------------------------------------
# Type Conversion between type domains
#------------------------------------------------------------------------
def get_ctor(codomain, kind):
name = tyname(kind)
if not hasattr(codomain, name):
raise AttributeError(
"Codomain '%s' has no attribute '%s'" % (codomain, name))
return getattr(codomain, name)
def convert_unit(domain, codomain, type):
return get_ctor(codomain, type.typename)
def convert_para(domain, codomain, type, coparams):
return get_ctor(codomain, type.kind)(*coparams) # Construct type in codomain
# ______________________________________________________________________
class TypeConverter(object):
"""
Map types between type universes.
"""
def __init__(self, domain, codomain,
convert_unit=convert_unit, convert_para=convert_para):
self.domain, self.codomain = domain, codomain
self.convert_unit = partial(convert_unit, domain, codomain)
self.convert_para = partial(convert_para, domain, codomain)
self.partypes = weakref.WeakKeyDictionary()
def convert(self, type):
"Return an LLVM type for the given type."
if isinstance(type, (tuple, list)):
return tuple(map(self.convert, type))
elif not isinstance(type, Type):
return type
elif type.is_unit:
return self.convert_unit(type)
else:
return self.convert_parametrized(type)
def convert_parametrized(self, type):
# if type in self.partypes: # TODO: Check for type mutability
# return self.partypes[type]
# Construct parametrized type in codomain
result = self.convert_para(type, map(self.convert, type.params))
# self.partypes[type] = result
return result
def __repr__(self):
return "TypeConverter(%s -> %s)" % (self.domain.domain_name,
self.codomain.domain_name)
#------------------------------------------------------------------------
# Type Classes
#------------------------------------------------------------------------
def add_flags(obj, flags):
for flag in flags:
setattr(obj, "is_" + flag, True)
class Type(object):
"""
Base of all types.
"""
metadata = None
def __init__(self, kind, *params, **kwds):
self.kind = kind # Type kind
# don't call this 'args' since we already use that in function
self.params = list(params)
self.is_unit = kwds.get("is_unit", False)
if self.is_unit:
self.typename = params[0]
else:
self.typename = kind
# Immutable metadata
self.metadata = kwds.get("metadata", frozenset())
self._metadata = self.metadata and dict(self.metadata)
@classmethod
def unit(cls, kind, name, flags=(), **kwds):
"""
Nullary type constructor creating the most elementary of types.
Does not compose any other type.
"""
type = cls(kind, name, is_unit=True,
metadata=frozenset(kwds.items()))
add_flags(type, flags)
type.flags = flags
return type
@classmethod
def default_args(cls, args, kwargs):
"Add defaults to a given args tuple for type construction"
return args
def __repr__(self):
if self.is_unit:
return self.params[0].rstrip("_")
else:
return "%s(%s)" % (self.kind, ", ".join(map(str, self.params)))
def __getattr__(self, attr):
if attr.startswith("is_"):
return self.kind == attr[3:]
elif self.metadata and attr in self._metadata:
return self._metadata[attr]
raise AttributeError( attr)
#------------------------------------------------------------------------
# Type Memoization
#------------------------------------------------------------------------
class Conser(object):
"""
Conser: constructs new objects only when not already available.
Objects are weakreffed to sanitize memory consumption.
This allows the objects to be compared by and hashed on identity.
"""
__slots__ = ("constructor", "_entries")
def __init__(self, constructor):
self._entries = weakref.WeakValueDictionary()
self.constructor = constructor
def get(self, *args):
args = tuple(tuple(arg) if isinstance(arg, list) else arg
for arg in args)
try:
result = self._entries.get(args)
if result is None:
result = self.constructor(*args)
self._entries[args] = result
except:
result = self.constructor(*args)
return result
class TypeConser(object):
def __init__(self, type):
assert isinstance(type, type), type
assert issubclass(type, Type), type.__mro__
self.type = type
self.conser = Conser(type)
def get(self, *args, **kwargs):
# Add defaults to the arguments to ensure consing correctness
args = self.type.default_args(args, kwargs)
return self.conser.get(*args)
def get_conser(ctor):
if isinstance(ctor, type) and issubclass(ctor, Type):
return TypeConser(ctor) # Use a conser that tracks defaults
else:
return Conser(ctor)
def consing(ctor):
return get_conser(ctor).get
########NEW FILE########
__FILENAME__ = kinds
# -*- coding: utf-8 -*-
"""
Kinds for numba types.
"""
from __future__ import print_function, division, absolute_import
#------------------------------------------------------------------------
# Type Kinds
#------------------------------------------------------------------------
# Low level kinds
KIND_VOID = "void"
KIND_INT = "int"
KIND_FLOAT = "float"
KIND_COMPLEX = "complex"
KIND_FUNCTION = "function"
KIND_ARRAY = "array"
KIND_POINTER = "pointer"
KIND_NULL = "null"
KIND_CARRAY = "carray"
KIND_STRUCT = "struct"
# High-level Numba kinds
KIND_BOOL = "bool"
KIND_OBJECT = "object"
KIND_EXTTYPE = "exttype"
KIND_NONE = "none"
########NEW FILE########
__FILENAME__ = llvmtypes
import llvm.core
from numba.typesystem.itypesystem import consing, tyname
from numba.typesystem import universe
# from llvmmath.ltypes import l_longdouble
domain_name = "llvm"
#------------------------------------------------------------------------
# Helpers
#------------------------------------------------------------------------
def get_target_triple():
target_machine = llvm.ee.TargetMachine.new()
is_ppc = target_machine.triple.startswith("ppc")
is_x86 = target_machine.triple.startswith("x86")
return is_ppc, is_x86
def lbool():
return llvm.core.Type.int(1)
def lint(name, itemsize):
if name == "bool":
return lbool()
return llvm.core.Type.int(itemsize * 8)
def lfloat(name, itemsize):
if itemsize == 4:
return llvm.core.Type.float()
elif itemsize == 8:
return llvm.core.Type.double()
else:
assert False, "long double is not supported"
# return l_longdouble
size = universe.default_type_sizes.__getitem__
unittypes = {}
for typename in universe.int_typenames:
unittypes[typename] = lint(typename, size(typename))
for typename in universe.float_typenames:
unittypes[typename] = lfloat(typename, size(typename))
unittypes["void"] = llvm.core.Type.void()
globals().update((tyname(name), ty) for name, ty in unittypes.iteritems())
#------------------------------------------------------------------------
# Exposed types
#------------------------------------------------------------------------
# @consing # llvm types don't hash in python 3 in llvmpy 0.11.2
def struct_(fields, name=None, readonly=False, packed=False):
if packed:
struct = llvm.core.Type.packed_struct
else:
struct = llvm.core.Type.struct
return struct([field_type for field_name, field_type in fields])
# @consing
def pointer(base_type):
if base_type.kind == llvm.core.TYPE_VOID:
base_type = llvm.core.Type.int(8)
return llvm.core.Type.pointer(base_type)
def sized_pointer(base_type, size):
return pointer(base_type)
# @consing
def function(rettype, argtypes, name=None, is_vararg=False):
return llvm.core.Type.function(rettype, argtypes, is_vararg)
def array_(dtype, ndim, *args):
from numba import environment, ndarray_helpers
# TODO: this is gross, we need to pass in 'env'
env = environment.NumbaEnvironment.get_environment()
if env.crnt:
return env.crnt.array.from_type(dtype)
return ndarray_helpers.NumpyArray.from_type(dtype)
carray = llvm.core.Type.array
########NEW FILE########
__FILENAME__ = lowering
# -*- coding: utf-8 -*-
"""
Type lowering from a higher-level domain to a lower-level domain.
"""
from __future__ import print_function, division, absolute_import
from numba.typesystem import itypesystem
def find_matches(table, flags):
"Find a lowering function from the flags of the type"
matches = []
for flag in flags:
if flag in table:
matches.append(flag)
if len(matches) > 1:
raise ValueError("Multiple matching flags: %s" % flags)
elif matches:
return matches[0]
else:
return None
def find_func(table, kind, flags, default=None):
"Get a function form the table by resolving any indirections"
if kind in table:
flag = kind
else:
flag = find_matches(table, flags)
if flag is None:
return default
while flag in table:
if isinstance(table[flag], basestring):
flag = table[flag]
else:
return table[flag]
return default
def create_type_lowerer(table, domain, codomain):
"""
Create a type lowerer from a domain to a codomain given a lowering table.
"""
def convert_unit(domain, codomain, type):
func = find_func(table, type.typename, type.flags)
if func:
return func(domain, codomain, type, ())
else:
return itypesystem.convert_unit(domain, codomain, type)
def convert_para(domain, codomain, type, params):
ctor = find_func(table, type.kind, type.flags, itypesystem.convert_para)
# print("lowering...", type, ctor)
return ctor(domain, codomain, type, params)
return itypesystem.TypeConverter(domain, codomain, convert_unit, convert_para)
#------------------------------------------------------------------------
# Lowering functions
#------------------------------------------------------------------------
# ______________________________________________________________________
# unit types
def lower_object(domain, codomain, type, params):
from numba import typedefs # hurr
if type.is_array:
return codomain.array_(*params)
return codomain.pointer(typedefs.PyObject_HEAD)
def lower_string(domain, codomain, type, params):
return codomain.pointer(codomain.char)
# ______________________________________________________________________
# parametrized types
def lower_function(domain, codomain, type, params):
restype, args, name, is_vararg = params
newargs = []
for arg in args:
if arg.is_struct or arg.is_function:
arg = codomain.pointer(arg)
newargs.append(arg)
if restype.is_struct:
newargs.append(codomain.pointer(restype))
restype = codomain.void
result = codomain.function(restype, newargs, name, is_vararg)
# print("lowered", type, result, params)
return result
def lower_extmethod(domain, codomain, type, params):
return lower_function(domain, codomain, type, params[:4])
def lower_complex(domain, codomain, type, params):
base_type, = params
return codomain.struct_([('real', base_type), ('imag', base_type)])
def lower_datetime(domain, codomain, type, params):
timestamp, units = params[0:2]
return codomain.struct_([('timestamp', timestamp), ('units', units)])
def lower_to_pointer(domain, codomain, type, params):
return codomain.pointer(params[0])
def lower_timedelta(domain, codomain, type, params):
diff, units = params[0:2]
return codomain.struct_([('diff', diff), ('units', units)])
#------------------------------------------------------------------------
# Default Lowering Table
#------------------------------------------------------------------------
default_numba_lowering_table = {
"object": lower_object,
# parametrized types
"function": lower_function,
"complex": lower_complex,
"datetime": lower_datetime,
"timedelta": lower_timedelta,
# "array": lower_array,
"string": lower_string,
# "carray": lower_to_pointer,
"sized_pointer": lower_to_pointer,
"reference": lower_to_pointer,
"extmethod": lower_extmethod,
"known_pointer": lower_to_pointer,
}
ctypes_lowering_table = {
"object": lambda dom, cod, type, params: cod.object_,
"complex": lower_complex,
"array": "object",
# "string": lambda dom, cod, type, params: ctypes.c_char_p,
"sized_pointer": lower_to_pointer,
"reference": lower_to_pointer,
"extmethod": lower_extmethod,
}
########NEW FILE########
__FILENAME__ = numbatypes
# -*- coding: utf-8 -*-
"""
Shorthands for type constructing, promotions, etc.
"""
from __future__ import print_function, division, absolute_import
import __future__
import inspect
from numba.typesystem import types, universe
from numba.typesystem.types import *
__all__ = [] # set below
integral = []
unsigned_integral = []
floating = []
complextypes = []
numeric = []
native_integral = []
datetimetypes = []
domain_name = "numba"
ranking = ["bool", "int", "float", "complex", "object"]
def rank(type):
return ranking.index(type.kind)
#------------------------------------------------------------------------
# All unit types
#------------------------------------------------------------------------
def unit(*args, **kwargs):
ty = types.unit(*args, **kwargs)
if ty.is_int:
ty.signed = ty.typename in universe.signed
if ty.is_int or ty.is_float:
ty.itemsize = universe.default_type_sizes[ty.typename]
# Add types to categories numeric, integral, floating, etc...
if ty.is_int:
integral.append(ty)
if not ty.signed:
unsigned_integral.append(ty)
if universe.is_native_int(ty.typename):
native_integral.append(ty)
elif ty.is_float:
floating.append(ty)
if ty.is_numeric:
numeric.append(ty)
return ty
# Numeric types
char = unit("int", "char", flags=["numeric"])
uchar = unit("int", "uchar", flags=["numeric"])
short = unit("int", "short", flags=["numeric"])
ushort = unit("int", "ushort", flags=["numeric"])
int_ = unit("int", "int", flags=["numeric"])
uint = unit("int", "uint", flags=["numeric"])
long_ = unit("int", "long", flags=["numeric"])
ulong = unit("int", "ulong", flags=["numeric"])
longlong = unit("int", "longlong", flags=["numeric"])
ulonglong = unit("int", "ulonglong", flags=["numeric"])
int8 = unit("int", "int8", flags=["numeric"])
int16 = unit("int", "int16", flags=["numeric"])
int32 = unit("int", "int32", flags=["numeric"])
int64 = unit("int", "int64", flags=["numeric"])
uint8 = unit("int", "uint8", flags=["numeric"])
uint16 = unit("int", "uint16", flags=["numeric"])
uint32 = unit("int", "uint32", flags=["numeric"])
uint64 = unit("int", "uint64", flags=["numeric"])
size_t = unit("int", "size_t", flags=["numeric"])
npy_intp = unit("int", "npy_intp", flags=["numeric"])
Py_ssize_t = unit("int", "Py_ssize_t", flags=["numeric"])
Py_uintptr_t = unit("int", "Py_uintptr_t", flags=["numeric"])
float32 = unit("float", "float32", flags=["numeric"])
float64 = unit("float", "float64", flags=["numeric"])
# float128 = unit("float", "float128", flags=["numeric"])
# float_, double, longdouble = float32, float64, float128
float_, double = float32, float64
complex64 = complex_(float32)
complex128 = complex_(float64)
# complex256 = complex_(float128)
bool_ = unit("bool", "bool", flags=["int", "numeric"])
null = unit("null", "null", flags=["pointer"])
void = unit("void", "void")
obj_type = lambda name: unit(name, name, flags=["object"])
# Add some unit types... (objects)
object_ = obj_type("object")
unicode_ = obj_type("unicode")
none = obj_type("none")
ellipsis = obj_type("ellipsis")
slice_ = obj_type("slice")
newaxis = obj_type("newaxis")
range_ = obj_type("range")
string_ = unit("string", "string", flags=[#"object",
"c_string"])
c_string_type = string_
complextypes.extend([complex64, complex128]) #, complex256])
tuple_of_obj = tuple_(object_, -1)
list_of_obj = list_(object_, -1)
dict_of_obj = dict_(object_, object_, -1)
def datetime(units=None, numpy=True):
if units not in ['Y', 'M', 'D', 'h', 'm', 's']:
units = None
datetime_type = datetime_(int64, int32, units)
datetime_type.is_numpy_datetime = numpy
return datetime_type
def timedelta(units=None, numpy=True):
if units not in ['Y', 'M', 'D', 'h', 'm', 's']:
units = None
timedelta_type = timedelta_(int64, int32, units)
timedelta_type.is_numpy_timedelta = numpy
return timedelta_type
# ______________________________________________________________________
O = object_
b1 = bool_
i1 = int8
i2 = int16
i4 = int32
i8 = int64
u1 = uint8
u2 = uint16
u4 = uint32
u8 = uint64
f4 = float32
f8 = float64
# f16 = float128
c8 = complex64
c16 = complex128
# c32 = complex256
for name, value in list(globals().iteritems()): # TODO: Do this better
if (not isinstance(value, __future__ ._Feature) and not
inspect.ismodule(value) and not name.startswith("_")):
__all__.append(name)
########NEW FILE########
__FILENAME__ = numpy_support
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import math
from numba.typesystem import *
from numba.typesystem.itypesystem import nbo
import numpy as np
def map_dtype(dtype):
"""
Map a NumPy dtype to a minitype.
>>> map_dtype(np.dtype(np.int32))
int32
>>> map_dtype(np.dtype(np.int64))
int64
>>> map_dtype(np.dtype(np.object))
PyObject *
>>> map_dtype(np.dtype(np.float64))
float64
>>> map_dtype(np.dtype(np.complex128))
complex128
"""
if dtype.byteorder not in ('=', nbo, '|') and dtype.kind in ('iufbc'):
raise minierror.UnmappableTypeError(
"Only native byteorder is supported", dtype)
item_idx = int(math.log(dtype.itemsize, 2))
if dtype.kind == 'i':
return [int8, int16, int32, int64][item_idx]
elif dtype.kind == 'u':
return [uint8, uint16, uint32, uint64][item_idx]
elif dtype.kind == 'f':
if dtype.itemsize == 2:
pass # half floats not supported yet
elif dtype.itemsize == 4:
return float32
elif dtype.itemsize == 8:
return float64
elif dtype.itemsize == 16:
raise TypeError("long double is not support")
# return float128
elif dtype.kind == 'b':
return int8
elif dtype.kind == 'c':
if dtype.itemsize == 8:
return complex64
elif dtype.itemsize == 16:
return complex128
elif dtype.itemsize == 32:
raise TypeError("long double is not support")
# return complex256
elif dtype.kind == 'V':
fields = [(name, map_dtype(dtype.fields[name][0]))
for name in dtype.names]
is_aligned = dtype.alignment != 1
return struct_(fields, packed=not getattr(dtype, 'isalignedstruct',
is_aligned))
elif dtype.kind == 'O':
return object_
elif dtype.kind == 'M':
# Get datetime units from 2nd to last character in dtype string
# Example dtype string: '<M8[D]', where D is datetime units
return datetime(units=dtype.str[-2])
elif dtype.kind == 'm':
# Get timedelta units from 2nd to last character in dtype string
# Example dtype string: '<m8[D]', where D is timedelta units
return timedelta(units=dtype.str[-2])
typemap = {
int8 : np.int8,
int16 : np.int16,
int32 : np.int32,
int64 : np.int64,
uint8 : np.uint8,
uint16 : np.uint16,
uint32 : np.uint32,
uint64 : np.uint64,
float_ : np.float32,
double : np.float64,
# longdouble: np.longdouble,
short : np.dtype('h'),
int_ : np.dtype('i'),
long_ : np.dtype('l'),
longlong : np.longlong,
ushort : np.dtype('H'),
uint : np.dtype('I'),
ulong : np.dtype('L'),
ulonglong: np.ulonglong,
complex64: np.complex64,
complex128: np.complex128,
# complex256: getattr(np, 'complex256', None),
bool_ : np.bool,
object_ : np.object,
}
typemap = dict((k, np.dtype(v)) for k, v in typemap.iteritems())
def to_dtype(type):
if type.is_struct:
fields = [(field_name, to_dtype(field_type))
for field_name, field_type in type.fields]
return np.dtype(fields, align=not type.packed)
elif type.is_array and type.ndim == 1:
return to_dtype(type.dtype)
elif type in typemap:
return typemap[type]
elif type.is_int:
name = 'int' if type.signed else 'uint'
return np.dtype(getattr(np, name + str(type.itemsize * 8)))
elif type.is_numpy_datetime:
return np.dtype('M8[{0}]'.format(type.units_char))
elif type.is_numpy_timedelta:
return np.dtype('m8[{0}]'.format(type.units_char))
else:
raise ValueError("Cannot convert '%s' to numpy type" % (type,))
########NEW FILE########
__FILENAME__ = promotion
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import error
from numba.typesystem import numpy_support
from numba.typesystem import integral, floating, complextypes, unsigned_integral
from numba.typesystem.kinds import *
import numpy as np
#------------------------------------------------------------------------
# Promotion on kind
#------------------------------------------------------------------------
table = {
(KIND_POINTER, KIND_INT): KIND_POINTER,
(KIND_INT, KIND_POINTER): KIND_POINTER,
(KIND_POINTER, KIND_NULL): KIND_POINTER,
(KIND_NULL, KIND_POINTER): KIND_POINTER,
(KIND_OBJECT, KIND_OBJECT): KIND_OBJECT,
(KIND_BOOL, KIND_BOOL): KIND_BOOL,
}
def promote_from_table(table, u, promote, type1, type2):
result = table.get((type1.kind, type2.kind))
if result is not None:
return { type1.kind: type1, type2.kind: type2}[result]
return None
#------------------------------------------------------------------------
# Numeric promotion
#------------------------------------------------------------------------
def find_type_of_size(size, typelist):
for type in typelist:
if type.itemsize == size:
return type
assert False, "Type of size %d not found: %s" % (size, typelist)
def promote_int(u, promote, type1, type2):
"""
Promote two integer types. We have to remain portable at this point, e.g.
promote_int(Py_ssize_t, int) should be Py_ssize_t, and not int64 or int32.
"""
t1, t2 = type1.get_dtype(), type2.get_dtype()
return numpy_support.map_dtype(np.result_type(t1, t2))
# size = max(type1.itemsize, type2.itemsize)
# if type1.signed and type2.signed:
# return find_type_of_size(size, integral)
#
# if type1.signed:
# unsigned = type1
# other = type2
# else:
# unsigned = type2
# other = type1
#
# if (type1.itemsize == size == type2.itemsize or not other.signed or
# other.itemsize <= size):
# return find_type_of_size(size, unsigned_integral)
# else:
# return find_type_of_size(size, integral)
def promote_numeric(u, promote, type1, type2):
"Promote two numeric types"
ranks = ["bool", "int", "float", "complex"]
type = max([type1, type2], key=lambda type: ranks.index(type.kind))
size = max(type1.itemsize, type2.itemsize)
if type.is_complex:
return find_type_of_size(size, complextypes)
elif type.is_float:
return find_type_of_size(size, floating)
else:
assert type.is_int
return promote_int(u, promote, type1, type2)
#------------------------------------------------------------------------
# Array promotion
#------------------------------------------------------------------------
def promote_arrays(u, promote, type1, type2):
"Promote two array types in an expression to a new array type"
equal_ndim = type1.ndim == type2.ndim
return u.array(promote(type1.dtype, type2.dtype),
ndim=max((type1.ndim, type2.ndim)),
is_c_contig=(equal_ndim and type1.is_c_contig and
type2.is_c_contig),
is_f_contig=(equal_ndim and type1.is_f_contig and
type2.is_f_contig))
def promote_array_and_other(u, promote, type1, type2):
if type1.is_array:
array_type = type1
other_type = type2
else:
array_type = type2
other_type = type1
if other_type.is_object and not array_type.dtype.is_object:
# Make sure that (double[:], object_) -> object_
return u.object_
dtype = promote(array_type.dtype, other_type)
return u.array(dtype, array_type.ndim)
#------------------------------------------------------------------------
# Default type promotion
#------------------------------------------------------------------------
class DefaultPromoter(object):
def __init__(self, universe, promotion_table):
self.universe = universe
self.promotion_table = promotion_table
def promote(self, type1, type2):
"Promote two arbitrary types"
if type1 == type2:
return type1
u = self.universe
args = u, self.promote, type1, type2
result = promote_from_table(self.promotion_table, *args)
if result is not None:
return result
elif type1.is_numeric and type2.is_numeric:
return promote_numeric(*args)
elif type1.is_array and type2.is_array:
return promote_arrays(*args)
elif type1.is_array or type2.is_array:
return promote_array_and_other(*args)
elif (type1, type2) in [(u.string_, u.char.pointer()),
(u.char.pointer(), u.string_)]:
return u.string_
elif type1.is_object or type2.is_object:
return u.object_
else:
raise error.UnpromotableTypeError((type1, type2))
def have_properties(type1, type2, property1, property2):
"""
Return whether the two types satisfy the two properties:
>>> have_properties(int32, int32.pointer(), "is_pointer", "is_int")
True
"""
type1_p1 = getattr(type1, property1)
type1_p2 = getattr(type1, property2)
type2_p1 = getattr(type2, property1)
type2_p2 = getattr(type2, property2)
if (type1_p1 and type2_p2) or (type1_p2 and type2_p1):
if type1_p1:
return type1
else:
return type2
else:
return None
#------------------------------------------------------------------------
# Promote
#------------------------------------------------------------------------
def get_default_promoter(universe):
return DefaultPromoter(universe, table).promote
########NEW FILE########
__FILENAME__ = ssatypes
# -*- coding: utf-8 -*-
"""
This module provides deferred types used for type dependences that
haven't been processed yet, or circular dependences.
1) Types participating in statements that are deferred until later:
x = 0 # x_0
for i in range(10):
# x_1 = phi(x_0, x_2)
print x
x = i # x_2
Here x_2 is not resolved yet when we encounter the print statement.
2) Types participating in type graph cycles:
x = 0 # x_0
for i in range(10):
# x_1 = phi(x_0, x_2)
print x
x = x + i # x_2
Here we have:
type(x_1) = promote(type(x_0) = int, type(x_2))
type(x_2) = promote(type(x_1), type(i) = Py_ssize_t)
This is a simple cycle that will constitute the following graph:
x_0 (int)
\
x_1 __
\ \
X_2
\
i_0 (Py_ssize_t)
"""
from __future__ import print_function, division, absolute_import
from functools import partial
from numba import oset
from numba.minivect import minierror
from numba.typesystem import *
class UninitializedType(NumbaType):
is_uninitialized = True
subtypes = ['base_type']
def __init__(self, base_type, **kwds):
super(UninitializedType, self).__init__(**kwds)
self.base_type = base_type
def to_llvm(self, context):
ltype = self.base_type.to_llvm(context)
return ltype
def __repr__(self):
return "<uninitialized>"
class UnresolvedType(NumbaType):
"""
The directed type graph works as follows:
1) if type x depends on type y, then y is a parent of x.
2) we construct a condensation graph by contracting strongly connected
components to single nodes
3) we resolve types in topological order
-> types in SCCs are handled specially
"""
is_unresolved = True
rank = 1
def __init__(self, variable, **kwds):
super(UnresolvedType, self).__init__(**kwds)
self.variable = variable
self.assertions = []
self.parents = oset.OrderedSet()
self.children = oset.OrderedSet()
def add_children(self, children):
for child in children:
if child.is_unresolved:
self.children.add(child)
child.parents.add(self)
def add_parents(self, parents):
for parent in parents:
if parent.is_unresolved:
self.parents.add(parent)
parent.children.add(self)
def __hash__(self):
return hash(self.variable)
def __eq__(self, other):
return (isinstance(other, UnresolvedType) and
self.variable == other.variable and
self.is_deferred == other.is_deferred and
self.is_promotion == other.is_promotion and
self.is_unanalyzable == other.is_unanalyzable)
def simplify(self):
return not (self.resolve() is self)
def make_assertion(self, assertion_attr, node, msg):
def assertion(result_type):
if not getattr(result_type, assertion_attr):
raise error.NumbaError(node, msg)
self.assertions.append(assertion)
def process_assertions(self, result_type):
for assertion in self.assertions:
assertion(result_type)
del self.assertions[:]
def resolve(self):
if not self.variable.type:
self.variable.type = self
result = self.variable.type
if not result.is_unresolved:
self.process_assertions(result)
return result
class PromotionType(UnresolvedType):
is_promotion = True
resolved_type = None
count = 0 # for debugging
def __init__(self, variable, promote, types, assignment=False, **kwds):
super(PromotionType, self).__init__(variable, **kwds)
self.promote = promote
self.types = oset.OrderedSet(types)
self.assignment = assignment
variable.type = self
self.add_parents(type for type in types if type.is_unresolved)
self.count = PromotionType.count
PromotionType.count += 1
@property
def t(self):
# for debugging only
return list(self.types)
def add_type(self, seen, type, types):
if type not in seen:
if type.is_unresolved:
seen.add(type)
new_type = type.resolve()
if new_type is not type:
seen.add(new_type)
self.add_type(seen, new_type, types)
type = new_type
else:
types.add(type)
else:
types.add(type)
return type
def dfs(self, types, seen):
for type in self.types:
if type not in seen:
seen.add(type)
type = resolve_type_chain(type)
seen.add(type)
if type.is_promotion:
type.dfs(types, seen)
elif not type.is_uninitialized:
types.add(type)
def find_types(self, seen):
types = oset.OrderedSet([self])
seen.add(self)
seen.add(self.variable.deferred_type)
self.dfs(types, seen)
types.remove(self)
return types
def find_simple(self, seen):
types = oset.OrderedSet()
for type in self.types:
if type.is_promotion:
types.add(type.types)
else:
type.add(type)
return types
def get_partial_types(self, unresolved_types):
for unresolved_type in unresolved_types:
if (unresolved_type.is_reanalyse_circular and
unresolved_type.resolved_type):
unresolved_types.append(unresolved_type)
def _simplify(self, seen=None):
"""
Simplify a promotion type tree:
promote(int_, float_)
-> float_
promote(deferred(x), promote(float_, double), int_, promote(<self>))
-> promote(deferred(x), double)
promote(deferred(x), deferred(y))
-> promote(deferred(x), deferred(y))
"""
if seen is None:
seen = set()
# Find all types in the type graph and eliminate nested promotion types
types = self.find_types(seen)
# types = self.find_simple(seen)
resolved_types = [type for type in types if not type.is_unresolved]
unresolved_types = [type for type in types if type.is_unresolved]
self.get_partial_types(unresolved_types)
self.variable.type = self
if not resolved_types:
# Everything is deferred
self.resolved_type = None
return False
else:
# Simplify as much as possible
if self.assignment:
result_type, unresolved_types = promote_for_assignment(
self.promote, resolved_types, unresolved_types,
self.variable.name)
else:
result_type = promote_for_arithmetic(self.promote, resolved_types)
self.resolved_type = result_type
if len(resolved_types) == len(types) or not unresolved_types:
self.variable.type = result_type
return True
else:
old_types = self.types
self.types = oset.OrderedSet([result_type] + unresolved_types)
return old_types != self.types
def simplify(self, seen=None):
try:
return self._simplify(seen)
except minierror.UnpromotableTypeError as e:
if self.variable.name:
name = "variable %s" % self.variable.name
else:
name = "subexpression"
types = sorted(e.args[0], key=str)
types = tuple(types)
raise error.NumbaError("Cannot promote types %s for %s" % (types, name))
@classmethod
def promote(cls, *types):
var = Variable(None)
type = PromotionType(var, types)
type.resolve()
return type.variable.type
repr_seen = None
repr_count = 0
def __repr__(self):
if not self.repr_seen:
self.repr_seen = set()
self.repr_seen.add(self)
self.repr_count += 1
types = []
for type in self.types:
if type not in self.repr_seen:
types.append(type)
self.repr_seen.add(type)
else:
types.append("...")
result = "promote%d(%s)" % (self.count, ", ".join(map(str, types)))
self.repr_count -= 1
if not self.repr_count:
self.repr_seen = None
return result
class DeferredType(UnresolvedType):
"""
We don't know what the type is at the point we need a type, so we create
a deferred type.
Depends on: self.variable.type
Example:
def func():
for i in range(10):
# type(x) = phi(undef, deferred(x_1)) = phi(deferred(x_1))
if i > 1:
print x # type is deferred(x_1)
x = ... # resolve deferred(x_1) to type(...)
"""
is_deferred = True
updated = False
def update(self):
assert self.variable.type is not self
self.updated = True
type = self.variable.type
if not type.is_unresolved:
# Type is a scalar or otherwise resolved type tree, and doesn't
# need to participate in the graph
return
for parent in self.parents:
if self in parent.children:
parent.children.remove(self)
parent.children.add(type)
for child in self.children:
if self in child.parents:
child.parents.remove(self)
child.parents.add(type)
type.parents.update(self.parents)
type.children.update(self.children)
# def resolve(self):
# result_type = super(DeferredType, self).resolve()
# if result_type is not self and result_type.is_unresolved:
# result_type = result_type.resolve()
# self.variable.type = result_type
# return result_type
def __repr__(self):
if self.variable.type is self:
return "<deferred(%s)>" % (self.variable.unmangled_name,)
return "<deferred(%s)>" % self.variable.type
# def to_llvm(self, context):
# assert self.resolved_type, self
# return self.resolved_type.to_llvm(context)
class ReanalyzeCircularType(UnresolvedType):
"""
This is useful when there is a circular dependence on yourself. e.g.
s = "hello"
for i in range(5):
s = s[1:]
The type of 's' depends on the result of the slice, and on the input to
the loop. But to determine the output, we need to assume the input,
and unify the output with the input, and see the result for a subsequent
slice. e.g.
a = np.empty((10, 10, 10))
for i in range(3):
a = a[0]
Here the type would change on each iteration. Arrays do not demote to
object, but other types do. The same goes for a call:
error_circular(result_type.variable)
for i in range(n):
f = f(i)
but also
x = 0
for i in range(n):
x = f(x)
or linked-list traversal
current = ...
while current:
current = current.next
"""
is_reanalyze_circular = True
resolved_type = None
converged = False
def __init__(self, variable, type_inferer, **kwds):
super(ReanalyzeCircularType, self).__init__(variable, **kwds)
self.type_inferer = type_inferer
self.dependences = []
def update(self):
"Update the graph after having updated the dependences"
self.add_parents(node.variable.type
for node in self.dependences
if node.variable.type.is_unresolved)
def _reinfer(self):
result_type = self.retry_infer()
if not result_type.is_unresolved:
self.resolved_type = result_type
self.variable.type = result_type
return result_type is not self
def substitute_and_reinfer(self):
"""
Try substituting resolved parts of promotions and reinfer the types.
"""
from numba import symtab
if not self.variable.type.is_unresolved:
return False
# Find substitutions and save original variables
old_vars = []
for node in self.dependences:
sub_type = self.substitution_candidate(node.variable)
if sub_type:
old_vars.append((node, node.variable))
node.variable = symtab.Variable(sub_type, name='<substitute>')
if old_vars:
# We have some substitutions, retry type inference
result = self._reinfer()
# Reset our original variables!
for node, old_var in old_vars:
node.variable = old_var
return result
# We cannot substitute any promotion candidates, see if we can resolve
# anyhow (this should be a cheap operation anyway if it fails)
new_type = self.retry_infer()
if not new_type.is_unresolved:
self.variable.type = new_type
return True
return False
def substitution_candidate(self, variable):
if variable.type.is_unresolved:
variable.type = variable.type.resolve()
if variable.type.is_promotion:
p = resolve_var(variable)
if p.is_promotion and p.resolved_type:
return p.resolved_type
return None
def simplify(self):
"""
Resolve the reanalyzable statement by setting the already resolved
dependences for the type inference code.
"""
if self.resolved_type is not None:
return False # nothing changed
for dep in self.dependences:
if dep.variable.type.is_unresolved:
dep.variable.type = dep.variable.type.resolve()
assert not dep.variable.type.is_unresolved
return self._reinfer()
def retry_infer(self):
"Retry inferring the type with the new type set"
def substitute_variables(self, substitutions):
"Try to set the new variables and retry type inference"
class DeferredIndexType(ReanalyzeCircularType):
"""
Used when we don't know the type of the variable being indexed.
"""
def __init__(self, variable, type_inferer, index_node, **kwds):
super(DeferredIndexType, self).__init__(variable, type_inferer, **kwds)
self.type_inferer = type_inferer
self.index_node = index_node
def retry_infer(self):
node = self.type_inferer.visit_Subscript(self.index_node,
visitchildren=False)
return node.variable.type
def __repr__(self):
return "<deferred_index(%s, %s)" % (self.index_node,
", ".join(map(str, self.parents)))
class DeferredAttrType(ReanalyzeCircularType):
"""
Used when we don't know the type of the object of which we access an
attribute.
"""
def __init__(self, variable, type_inferer, node, **kwds):
super(DeferredAttrType, self).__init__(variable, type_inferer, **kwds)
self.type_inferer = type_inferer
self.node = node
def retry_infer(self):
node = self.type_inferer.visit_Attribute(self.node,
visitchildren=False)
return node.variable.type
def __repr__(self):
return "<deferred_attr(%s, %s)" % (self.node,
", ".join(map(str, self.parents)))
class DeferredCallType(ReanalyzeCircularType):
"""
Used when we don't know the type of the expression being called, or when
we have an autojitting function and don't know all the argument types.
"""
def __init__(self, variable, type_inferer, call_node, **kwds):
super(DeferredCallType, self).__init__(variable, type_inferer, **kwds)
self.type_inferer = type_inferer
self.call_node = call_node
def retry_infer(self):
node = self.type_inferer.visit_Call(self.call_node),
# visitchildren=False)
return node[0].variable.type
def __repr__(self):
return "<deferred_call(%s, %s)" % (self.call_node,
", ".join(map(str, self.parents)))
def resolve_type_chain(type):
if not type.is_unresolved:
return type
while type.is_unresolved:
old_type = type
type = old_type.resolve()
if type is old_type or not type.is_unresolved:
break
return type
def error_circular(var):
raise error.NumbaError(
var.name_assignment and var.name_assignment.assignment_node,
"Unable to infer type for assignment to %r,"
" insert a cast or initialize the variable." % var.name)
class StronglyConnectedCircularType(UnresolvedType):
"""
Circular type dependence. This can be a strongly connected component
of just promotions, or a mixture of promotions and re-inferable statements.
If we have only re-inferable statements, but no promotions, we have nothing
to feed into the re-inference process, so we issue an error.
"""
is_resolved = False
is_scc = True
def __init__(self, scc, **kwds):
super(StronglyConnectedCircularType, self).__init__(None, **kwds)
self.scc = scc
types = oset.OrderedSet(scc)
for type in scc:
self.add_children(type.children - types)
self.add_parents(type.parents - types)
self.types = scc
self.promotions = oset.OrderedSet(
type for type in scc if type.is_promotion)
self.reanalyzeable = oset.OrderedSet(
type for type in scc if type.is_reanalyze_circular)
def retry_infer_reanalyzable(self):
for reanalyzeable in self.reanalyzeable:
if reanalyzeable.resolve().is_unresolved:
reanalyzeable.substitute_and_reinfer()
def err_no_input(self):
raise error.NumbaError(self.variable and self.variable.assignment_node,
"No input types for this assignment were "
"found, a cast is needed")
def retry_infer(self):
candidates = []
no_input = []
for promotion in self.promotions:
p = resolve_var(promotion.variable)
if p.is_promotion:
if p.resolved_type:
candidates.append(p)
else:
no_input.append(p)
if not candidates:
if no_input:
self.err_no_input()
# All types are resolved, resolve all delayed types
self.retry_infer_reanalyzable()
return
# Re-infer re-analyzable statements until we converge
changed = True
while changed:
self.retry_infer_reanalyzable()
changed = False
for p in list(self.promotions):
if p.resolve() is not p:
self.promotions.remove(p)
else:
changed |= p.simplify()
for promotion in self.promotions:
promotion.variable.type = promotion.resolved_type
def resolve_promotion_cycles(self):
p = self.promotions.pop()
self.promotions.add(p)
p.simplify()
result_type = p.resolve()
if result_type.is_unresolved:
# Note: There are no concrete input types and it is impossible to
# infer anything but 'object'. Usually this indicates an
# invalid program
error_circular(result_type.variable)
for p in self.promotions:
p.variable.type = result_type
def simplify(self):
if self.reanalyzeable:
self.retry_infer()
elif self.promotions:
self.resolve_promotion_cycles()
else:
# All dependencies are resolved, we are done
pass
self.is_resolved = True
def resolve(self):
# We don't have a type, we are only an aggregation of circular types
raise TypeError
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return id(self) == id(other)
def dfs(start_type, stack, seen, graph=None, parents=False):
seen.add(start_type)
if parents:
children = start_type.parents
else:
children = start_type.children
for child_type in children:
if child_type not in seen and child_type.is_unresolved:
if graph is None or child_type in graph:
dfs(child_type, stack, seen, graph, parents=parents)
stack.append(start_type)
class UnanalyzableType(UnresolvedType):
"""
A type that indicates the statement cannot be analyzed without first
analysing its dependencies.
"""
is_unanalyzable = True
def resolve_var(var):
if var.type.is_unresolved:
var.type.simplify()
if var.type.is_unresolved:
var.type = var.type.resolve()
return var.type
def kosaraju_strongly_connected(start_type, strongly_connected, seen):
"""
Find the strongly connected components in the connected graph starting at
start_type.
"""
stack = []
dfs(start_type, stack, set(seen))
seen = set(seen)
graph = oset.OrderedSet(stack)
while stack:
start = stack[-1]
scc = []
dfs(start, scc, seen, graph, parents=True)
if len(scc) > 1:
scc_type = StronglyConnectedCircularType(scc)
for type in scc_type.types:
strongly_connected[type] = scc_type
stack.pop()
else:
strongly_connected[scc[0]] = scc[0]
stack.pop()
#------------------------------------------------------------------------
# Type promotion and validation
#------------------------------------------------------------------------
def _validate_array_types(array_types):
first_array_type = array_types[0]
for array_type in array_types[1:]:
if array_type.ndim != first_array_type.ndim:
raise TypeError(
"Cannot unify arrays with distinct dimensionality: "
"%d and %d" % (first_array_type.ndim, array_type.ndim))
elif array_type.dtype != first_array_type.dtype:
raise TypeError("Cannot unify arrays with distinct dtypes: "
"%s and %s" % (first_array_type.dtype,
array_type.dtype))
def promote_for_arithmetic(promote, types, assignment=False):
result_type = types[0]
for type in types[1:]:
result_type = promote(result_type, type, assignment)
return result_type
def promote_arrays(array_types, non_array_types, types,
unresolved_types, var_name):
"""
This promotes arrays for assignments. Arrays must have a single consistent
type in an assignment (phi). Any promotion of delayed types are immediately
resolved.
"""
_validate_array_types(array_types)
# TODO: figure out whether result is C/F/inner contig
result_type = array_types[0].strided
def assert_equal(other_type):
if result_type != other_type:
raise TypeError(
"Arrays must have consistent types in assignment "
"for variable %r: '%s' and '%s'" % (
var_name, result_type, other_type))
if len(array_types) < len(types):
assert_equal(non_array_types[0])
# Add delayed assertion that triggers when the delayed types are resolved
for unresolved_type in unresolved_types:
unresolved_type.assertions.append(assert_equal)
return result_type, []
def promote_for_assignment(promote, types, unresolved_types, var_name):
"""
Promote a list of types for assignment (e.g. in a phi node).
- if there are any objects, the result will always be an object
- if there is an array, all types must be of that array type
(minus any contiguity constraints)
"""
obj_types = [type for type in types if type == object_ or type.is_array]
if obj_types:
array_types = [obj_type for obj_type in obj_types if obj_type.is_array]
non_array_types = [type for type in types if not type.is_array]
if array_types:
return promote_arrays(array_types, non_array_types, types,
unresolved_types, var_name)
else:
# resolved_types = obj_types
return object_, []
partial_result_type = promote_for_arithmetic(promote, types,
assignment=True)
return partial_result_type, unresolved_types
def promote(typesystem, type1, type2, assignment=False):
promote_ = partial(promote, typesystem)
if type1.is_unresolved or type2.is_unresolved:
if type1.is_unresolved:
type1 = type1.resolve()
if type2.is_unresolved:
type2 = type2.resolve()
if type1.is_unresolved or type2.is_unresolved:
# The Variable is really only important for ast.Name, fabricate
# one
from numba import symtab
var = symtab.Variable(None)
return PromotionType(var, promote_, [type1, type2])
else:
return typesystem.promote(type1, type2)
return typesystem.promote(type1, type2)
########NEW FILE########
__FILENAME__ = tbaa
# -*- coding: utf-8 -*-
"""
Some types to aid in type-based alias analysis. See numba/metadata.py.
"""
from __future__ import print_function, division, absolute_import
from numba.typesystem.types import NumbaType
from numba.typesystem import object_, npy_intp
class TBAAType(NumbaType):
is_tbaa = True
typename = "tbaa_type"
argnames = ["name", "root"]
numpy_array = TBAAType("numpy array", object_)
numpy_shape = TBAAType("numpy shape", npy_intp.pointer())
numpy_strides = TBAAType("numpy strides", npy_intp.pointer())
numpy_ndim = TBAAType("numpy flags", npy_intp.pointer())
numpy_dtype = TBAAType("numpy dtype", object_)
numpy_base = TBAAType("numpy base", object_)
numpy_flags = TBAAType("numpy flags", npy_intp.pointer())
########NEW FILE########
__FILENAME__ = templatetypes
# -*- coding: utf-8 -*-
"""
Autojit template types.
"""
from __future__ import print_function, division, absolute_import
import numba as nb
from numba import error
from numba.typesystem import Type, NumbaType
# type_attribute => [type_assertions]
VALID_TYPE_ATTRIBUTES = {
"dtype": ["is_array"],
"base_type": ["is_pointer", "is_carray", "is_complex",
"is_list", "is_tuple"],
"args": ["is_function"],
"return_type": ["is_function"],
# "fields": ["is_struct"],
"fielddict": ["is_struct"],
}
class _TemplateType(NumbaType):
def resolve_template(self, template_context):
if self not in template_context:
raise error.InvalidTemplateError("Unknown template type: %s" % self)
return template_context[self]
def __getitem__(self, index):
if isinstance(index, (tuple, slice)):
return super(_TemplateType, self).__getitem__(index)
return TemplateIndexType(self, index)
def __getattr__(self, attr):
if attr in VALID_TYPE_ATTRIBUTES:
return TemplateAttributeType(self, attr)
return super(_TemplateType, self).__getattr__(attr)
def __repr__(self):
return "template(%s)" % self.name
def __str__(self):
return self.name
class template(_TemplateType):
argnames = [("name", None)]
flags = ["object"]
template_count = 0
def __init__(self, name):
super(template, self).__init__(name)
if name is None:
name = "T%d" % self.template_count
template.template_count += 1
self.name = name
class TemplateAttributeType(_TemplateType):
typename = "template_attribute"
argnames = ["template_type", "attribute_name"]
flags = ["object", "template"]
def __init__(self, template_type, attribute_name, **kwds):
super(TemplateAttributeType, self).__init__(template_type, attribute_name)
assert attribute_name in VALID_TYPE_ATTRIBUTES
def resolve_template(self, template_context):
resolved_type = self.template_type.resolve_template(template_context)
assertions = VALID_TYPE_ATTRIBUTES[self.attribute_name]
valid_attribute = any(getattr(resolved_type, a) for a in assertions)
if not valid_attribute:
raise error.InvalidTemplateError(
"%s has no attribute %s" % (self.template_type,
self.attribute_name))
return getattr(resolved_type, self.attribute_name)
def __repr__(self):
return "%r.%s" % (self.template_type, self.attribute_name)
def __str__(self):
return "%s.%s" % (self.template_type, self.attribute_name)
class TemplateIndexType(_TemplateType):
typename = "template_index"
argnames = ["template_type", "index"]
flags = ["object", "template"]
def resolve_template(self, template_context):
attrib = self.template_type.resolve_template(template_context)
assert isinstance(attrib, (list, tuple, dict))
return attrib[self.index]
def __repr__(self):
return "%r[%r]" % (self.template_type, self.index)
def __str__(self):
return "%s[%r]" % (self.template_type, self.index)
def validate_template(concrete_type, template_type):
if not isinstance(template_type, type(concrete_type)):
raise error.InvalidTemplateError(
"Type argument does not match template type: %s and %s" % (
concrete_type, template_type))
if concrete_type.is_array:
if template_type.ndim != concrete_type.ndim:
raise error.InvalidTemplateError(
"Template expects %d dimensions, got %d" % (template_type.ndim,
concrete_type.ndim))
def match_template(template_type, concrete_type, template_context):
"""
This function matches up T in the example below with a concrete type
like double when a double pointer is passed in as argument:
def f(T.pointer() pointer):
scalar = T(...)
We can go two ways with this, e.g.
def f(T.base_type scalar):
pointer = T(...)
Which could work for things like pointers, though not for things like
arrays, since we can't infer the dimensionality.
We mandate that each Template type be resolved through a concrete type,
i.e.:
def f(T scalar):
pointer = T.pointer(...)
template_context:
Dict mapping template types to concrete types:
T1 -> double *
T2 -> float[:]
"""
if template_type.is_template_attribute:
# As noted in the description, we don't handle this
pass
elif template_type.is_template:
if template_type in template_context:
prev_type = template_context[template_type]
if prev_type != concrete_type:
raise error.InvalidTemplateError(
"Inconsistent types found for template: %s and %s" % (
prev_type, concrete_type))
else:
template_context[template_type] = concrete_type
else:
validate_template(concrete_type, template_type)
for t1, t2 in zip(subtype_list(template_type),
subtype_list(concrete_type)):
if not isinstance(t1, (list, tuple)):
t1, t2 = [t1], [t2]
for t1, t2 in zip(t1, t2):
match_template(t1, t2, template_context)
def resolve_template_type(ty, template_context):
"""
After the template context is known, resolve functions on template types
E.g.
T[:] -> array_(dtype=T)
void(T) -> function(args=[T])
Struct { T arg } -> struct(fields={'arg': T})
T * -> pointer(base_type=T)
Any other compound types?
"""
r = lambda t: resolve_template_type(t, template_context)
if ty.is_template:
ty = ty.resolve_template(template_context)
elif ty.is_array:
ty = nb.array(r(ty.dtype), ty.ndim)
elif ty.is_function:
ty = r(ty.return_type)(*map(r, ty.args))
elif ty.is_struct:
S = ty
fields = []
for field_name, field_type in S.fields:
fields.append((field_name, r(field_type)))
ty = nb.struct_(fields, name=S.name, readonly=S.readonly, packed=S.packed)
elif ty.is_pointer:
ty = r(ty.base_type).pointer()
return ty
def is_template_list(types):
return any(is_template(type) for type in types)
def subtype_list(T):
return T.subtypes
def is_template(T):
if isinstance(T, (list, tuple)):
return is_template_list(T)
return T.is_template or is_template_list(subtype_list(T))
def resolve_templates(locals, template_signature, arg_names, arg_types):
"""
Resolve template types given a signature with concrete types.
"""
template_context = {}
locals = locals or {}
# Resolve the template context with the types we have
for i, (arg_name, arg_type) in enumerate(zip(arg_names, arg_types)):
T = template_signature.args[i]
if is_template(T):
# Resolve template type
if arg_name in locals:
# Locals trump inferred argument types
arg_type = locals[arg_name]
match_template(T, arg_type, template_context)
else:
# Concrete type, patch argtypes. This is valid since templates
# are only supported for autojit functions
arg_types[i] = T
# Resolve types of local variables and functions on templates
# (T.dtype, T.pointer(), etc)
for local_name, local_type in locals.iteritems():
locals[local_name] = resolve_template_type(local_type,
template_context)
return_type = resolve_template_type(template_signature.return_type,
template_context)
signature = return_type(*arg_types)
return template_context, signature
########NEW FILE########
__FILENAME__ = test_casting
import sys
import os
import numpy as np
import ctypes
from numba import *
import numba
@autojit(backend='ast')
def cast_int():
value = 1.7
return int32(value)
@autojit(backend='ast')
def cast_complex():
value = 1.2
return complex128(value)
@autojit(backend='ast')
def cast_float():
value = 5
return float_(value)
@autojit(backend='ast')
def cast_object(dst_type):
value = np.arange(10, dtype=np.double)
return dst_type(value)
@autojit(backend='ast')
def cast_as_numba_type_attribute():
value = 4.4
return numba.int32(value)
def cast_in_python():
return int_(10) == 10
def test_casts():
assert cast_int() == 1
assert cast_complex() == 1.2 + 0j
assert cast_float() == 5.0
value = cast_object(double[:])
# print sys.getrefcount(value), value, np.arange(10, dtype=np.double)
assert np.all(value == np.arange(10, dtype=np.double)), value
assert cast_as_numba_type_attribute() == 4
assert cast_in_python()
if __name__ == "__main__":
test_casts()
########NEW FILE########
__FILENAME__ = test_consing
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.typesystem import numba_typesystem as ts
# ______________________________________________________________________
def test_mutability():
ty = ts.function(ts.int_, (ts.float_,))
ty.args
try:
ty.args = [1, 2]
except AttributeError as e:
pass
else:
raise Exception(
"Expected: AttributeError: Cannot set attribute 'args' of type ...")
# ______________________________________________________________________
def test_pointers():
assert ts.pointer(ts.int_) is ts.pointer(ts.int_)
def test_functions():
functype1 = ts.function(ts.int_, (ts.float_,))
functype2 = ts.function(ts.int_, (ts.float_,))
functype3 = ts.function(ts.int_, (ts.float_,), is_vararg=False)
functype4 = ts.function(ts.int_, (ts.float_,), name="hello")
functype5 = ts.function(ts.int_, (ts.float_,), name="hello", is_vararg=False)
functype6 = ts.function(ts.int_, (ts.float_,), name="hello", is_vararg=True)
assert functype1 is functype2
assert functype1 is functype3
assert functype1 is not functype4
assert functype1 is not functype5
assert functype1 is not functype6
assert functype4 is functype5
assert functype4 is not functype6
# def test_struct():
# s1 = ts.struct_([('a', ts.int_), ('b', ts.float_)])
# s2 = ts.struct_([('a', ts.int_), ('b', ts.float_)])
# assert s1 is not s2
def test_arrays():
A = ts.array(ts.double, 1)
B = ts.array(ts.double, 1)
C = ts.array(ts.float_, 1)
D = ts.array(ts.double, 2)
assert A is B
assert A is not C
assert A is not D
def test_complex():
assert ts.complex_(ts.float_) is ts.complex64
assert ts.complex_(ts.double) is ts.complex128
# assert ts.complex_(ts.longdouble) is ts.complex256
if __name__ == "__main__":
test_mutability()
test_pointers()
test_functions()
# test_struct()
test_arrays()
test_complex()
########NEW FILE########
__FILENAME__ = test_conversion
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ctypes
import inspect
from functools import partial
# from numba import llvm_types
from numba.typesystem.itypesystem import tyname
from numba import llvm_types
from numba.typesystem import itypesystem, universe
from numba.typesystem import (numba_typesystem as ts,
llvm_typesystem as lts,
ctypes_typesystem as cts)
typenames = universe.int_typenames + universe.float_typenames + ["void"]
def convert(ts1, ts2, conversion_type, typenames):
for typename in typenames:
t1 = getattr(ts1, tyname(typename))
t2 = getattr(ts2, tyname(typename))
converted = ts1.convert(conversion_type, t1)
assert converted == t2, (str(t1), str(converted), str(t2))
#-------------------------------------------------------------------
# Numba -> LLVM
#-------------------------------------------------------------------
llvmt = partial(ts.convert, "llvm")
def test_llvm_numeric_conversion():
convert(ts, lts, "llvm", typenames)
def test_llvm_pointers():
# Test pointer conversion
for typename in typenames:
ty = getattr(ts, tyname(typename))
lty = getattr(lts, tyname(typename))
assert llvmt(ts.pointer(ty)) == lts.pointer(lty)
p = ts.pointer(ts.pointer(ts.int_))
lp = lts.pointer(lts.pointer(lts.int_))
# See if the conversion works
assert llvmt(p) == lp
# See if the consing works
# assert llvmt(p) is lp
def test_llvm_functions():
functype = ts.function(ts.int_, (ts.float_,))
lfunctype = lts.function(lts.int_, (lts.float_,))
assert llvmt(functype) == lfunctype
def test_llvm_complex():
c1 = llvmt(ts.complex128)
c2 = lts.struct_([('real', lts.double), ('imag', lts.double)])
c3 = lts.struct_([('real', lts.double), ('imag', lts.double)])
assert c1 == c2
# assert c1 is c2
# assert c2 is c3 # enable after upgrading llvmpy to include type hash fix
def test_llvm_object():
assert llvmt(ts.object_) == llvm_types._pyobject_head_struct_p
def test_llvm_array():
assert llvmt(ts.array(ts.double, 1)) == llvm_types._numpy_array
assert llvmt(ts.array(ts.int_, 2)) == llvm_types._numpy_array
assert llvmt(ts.array(ts.object_, 3)) == llvm_types._numpy_array
def test_llvm_range():
assert llvmt(ts.range_) == llvm_types._pyobject_head_struct_p
#-------------------------------------------------------------------
# Numba -> ctypes
#-------------------------------------------------------------------
ct = partial(ts.convert, "ctypes")
def test_ctypes_numeric_conversion():
convert(ts, cts, "ctypes", typenames)
def test_ctypes_pointers(): # TODO: unifiy with test_llvm_pointers
# Test pointer conversion
for typename in typenames:
ty = getattr(ts, tyname(typename))
cty = getattr(cts, tyname(typename))
assert ct(ts.pointer(ty)) == cts.pointer(cty)
p = ts.pointer(ts.pointer(ts.int_))
cp = cts.pointer(cts.pointer(cts.int_))
# See if the conversion works
assert ct(p) == cp
# See if the consing works
# assert ct(p) is cp
def test_ctypes_functions(): # TODO: unifiy with test_llvm_functions
functype = ts.function(ts.int_, (ts.float_,))
cfunctype = cts.function(cts.int_, (cts.float_,))
assert ct(functype) == cfunctype
def test_ctypes_complex():
c1 = ct(ts.complex128)
c2 = cts.struct_([('real', cts.double), ('imag', cts.double)])
c3 = cts.struct_([('real', cts.double), ('imag', cts.double)])
assert c1._fields_ == c2._fields_, (c1._fields_, c2._fields_)
def test_ctypes_object():
assert ct(ts.object_) == ctypes.py_object
def test_ctypes_array():
assert ct(ts.array(ts.double, 1)) == ctypes.py_object
assert ct(ts.array(ts.int_, 2)) == ctypes.py_object
assert ct(ts.array(ts.object_, 3)) == ctypes.py_object
def test_ctypes_string():
assert ct(ts.string_) == ctypes.c_char_p
assert ct(ts.char.pointer()) == ctypes.c_char_p
if __name__ == "__main__":
# print(ct(ts.array(ts.double, 1)))
for name, f in globals().items():
if name.startswith("test_") and inspect.isfunction(f):
f()
########NEW FILE########
__FILENAME__ = test_template_types
from pprint import pprint
import numpy as np
import numba
from numba import *
from numba.testing.test_support import *
from numba.control_flow.tests.test_cfg_type_infer import infer as _infer
from numba import typesystem
T = numba.template()
@autojit_py3doc(T(T[:, :]), warn=False, locals=dict(scalar=T))
def test_simple_template(array):
"""
>>> test_simple_template(np.arange(10, 12, dtype=np.float32))
Traceback (most recent call last):
...
InvalidTemplateError: Template expects 2 dimensions, got 1
>>> test_simple_template(np.arange(10, 12, dtype=np.float32).reshape(2, 1))
10.0
#------------------------------------------------------------------------
# Test type resolving
#------------------------------------------------------------------------
>>> infer(test_simple_template.py_func, float64(float64[:, :]), T(T[:, :]),
... locals=dict(scalar=T))
[('array', float64[:, :]), ('scalar', float64)]
>>> infer(test_simple_template.py_func, float64(float64[:, :]), T(T[:, :]),
... locals=dict(scalar=T.pointer()))
Traceback (most recent call last):
...
UnpromotableTypeError: Cannot promote types float64 * and float64
#------------------------------------------------------------------------
# Test type attributes
#------------------------------------------------------------------------
>>> infer(test_simple_template.py_func, float64(float64[:, :]), T.dtype(T),
... locals=dict(scalar=T.dtype))
[('array', float64[:, :]), ('scalar', float64)]
"""
scalar = array[0, 0]
return scalar
#------------------------------------------------------------------------
# Test type matching
#------------------------------------------------------------------------
T1 = numba.template("T1")
T2 = numba.template("T2")
T3 = numba.template("T3")
T4 = numba.template("T4")
A = T1[:, :]
F = void(T1)
S = numba.struct([('a', T1), ('b', T2.pointer()), ('c', T3[:]), ('d', void(T4))])
P = T2.pointer()
type_context1 = { T1: int_, T2: float_, T3: float64, T4: short, }
type_context2 = { T1: int_[:, :], T2: void(float32),
T3: numba.struct(a=float64, b=float_), T4: short.pointer(), }
def test_type_matching(array, func, struct, pointer):
"""
>>> infer(test_type_matching, template_signature=void(A, F, S, P),
... type_context=type_context1)
[('array', int[:, :]), ('func', void (*)(int)), ('pointer', float32 *), ('struct', struct { int a, float32 * b, float64[:] c, void (*)(short) d })]
"""
func(array[0, 0])
struct.b = pointer
def test_type_attributes(array, func, struct, pointer):
"""
>>> locals = dict(dtype=T1.dtype, arg=T2.args[0], field_a=T3.fielddict['a'],
... field_b=T3.fielddict['b'], scalar=T4.base_type)
>>> pprint(infer(test_type_attributes, template_signature=void(T1, T2, T3, T4),
... type_context=type_context2, locals=locals))
[('array', int[:, :]),
('func', void (*)(float32)),
('pointer', short *),
('struct', struct { float64 a, float32 b }),
('arg', float32),
('dtype', int),
('field_a', float64),
('field_b', float32),
('scalar', short)]
"""
dtype = array[0, 0]
arg = 0
field_a = 0
field_b = 0
scalar = 0
@autojit_py3doc(T(T, float64), locals=None)
def test_template_with_concretes(a, b):
"""
>>> test_template_with_concretes(1, 2)
3
"""
return a + b
@autojit(complex128(T, float64), locals=None)
def test_template_with_concretes2(a, b):
"""
>>> test_template_with_concretes2(1, 2)
(3+0j)
>>> test_template_with_concretes2(1.0, 2.0)
(3+0j)
>>> test_template_with_concretes2(1+0j, 2)
(3+0j)
>>> test_template_with_concretes2(1+0j, 2+0j)
Traceback (most recent call last):
...
TypeError: can't convert complex to float
"""
return a + b
@autojit_py3doc(T2(T1, float64), locals=None)
def test_unknown_template_error(a, b):
"""
>>> test_unknown_template_error(1, 2)
Traceback (most recent call last):
...
InvalidTemplateError: Unknown template type: T2
"""
return a + b
@autojit_py3doc(T(T, T), locals=None)
def test_template_inconsistent_types_error(a, b):
"""
>>> test_template_inconsistent_types_error(1, 2)
3
>>> test_template_inconsistent_types_error(1, 2.0)
Traceback (most recent call last):
...
InvalidTemplateError: Inconsistent types found for template: int and float64
"""
return a + b
#------------------------------------------------------------------------
# Test utilities
#------------------------------------------------------------------------
def infer(func, signature=None, template_signature=None,
locals=None, type_context=None):
if signature is None:
signature = specialize(template_signature, type_context)
if locals is not None:
locals = dict(locals)
sig, symbols = _infer(func, signature,
template_signature=template_signature,
locals=locals, warn=False)
if locals is not None:
local_vars = sorted(locals.iteritems())
else:
local_vars = []
vars = sorted((name, var.type) for name, var in symbols.iteritems())
return vars + local_vars
def specialize(T, context):
return typesystem.resolve_template_type(T, context)
if __name__ == '__main__':
testmod()
########NEW FILE########
__FILENAME__ = test_typeof
import numpy as np
import numba
from numba import *
from numba.testing.test_support import autojit_py3doc
@jit
class Foo(object):
def __init__(self, arg):
self.arg = double(arg)
def test_typeof_pure(arg):
"""
>>> test_typeof_pure(10)
int
>>> test_typeof_pure(10.0)
float64
>>> print(test_typeof_pure(Foo(10)))
<JitExtension Foo({'arg': float64})>
"""
return numba.typeof(arg)
@autojit_py3doc
def test_typeof_numba(a, b):
"""
>>> test_typeof_numba(10, 11.0)
21
>>> test_typeof_numba(11.0, 10)
21.0
>>> test_typeof_numba(np.arange(10), 1)
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
"""
return numba.typeof(a)(a + b)
@autojit
def test_typeof_numba2(arg):
"""
>>> test_typeof_numba2(10)
(10+0j)
"""
x = 1 + 2j
arg = numba.typeof(x)(arg)
return numba.typeof(arg)(arg)
@autojit
def test_typeof_numba3(arg):
"""
>>> print(test_typeof_numba3(10))
int
>>> print(test_typeof_numba3(Foo(10)))
<JitExtension Foo({'arg': float64})>
"""
return numba.typeof(arg)
@autojit
def test_typeof_type(arg):
"""
>>> test_typeof_type(int_)
meta(int)
"""
return numba.typeof(arg)
numba.testing.testmod()
########NEW FILE########
__FILENAME__ = test_type_constructors
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba.typesystem import numba_typesystem as ts
def test_pointers():
p = ts.pointer(ts.pointer(ts.int_))
assert str(p) == "int **", str(p)
def test_arrays():
A = ts.array(ts.double, 1)
B = ts.array(ts.double, 2)
assert str(A) == "float64[:]"
assert str(A[1:]) == "float64"
assert str(B[1:]) == "float64[:]"
assert str(B[-1:10]) == "float64[:]"
assert str(B[0:]) == "float64[:, :]"
assert str(B[0:10]) == "float64[:, :]"
assert str(B[-2:10]) == "float64[:, :]"
def test_functions():
functype = ts.function(ts.int_, (ts.float_,))
assert str(functype) == "int (*)(float32)", functype
functype = ts.function(ts.int_, (ts.float_,), "hello")
assert str(functype) == "int (*hello)(float32)", functype
if __name__ == "__main__":
test_pointers()
test_arrays()
test_functions()
########NEW FILE########
__FILENAME__ = test_type_properties
from numba.typesystem import *
assert int_.is_int
assert int_.is_numeric
assert long_.is_int
assert long_.is_numeric
assert not long_.is_long
assert float_.is_float
assert float_.is_numeric
assert double.is_float
assert double.is_numeric
assert not double.is_double
assert object_.is_object
assert list_(int_, 2).is_list
assert list_(int_, 2).is_object
assert function(void, [double]).is_function
########NEW FILE########
__FILENAME__ = typematch
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import fnmatch
def _typematch(pattern, typerepr):
return fnmatch.fnmatch(typerepr, pattern)
def typematch(pattern, ty):
"""
Match a type pattern to a type.
>>> type = list_(object_, 2)
>>> typematch("list(*, 2)", type)
True
>>> typematch("list(*)", type)
True
>>> typematch("list(*)", type)
True
>>> typematch("tuple(*)", type)
False
>>> typematch("object_", type)
True
"""
return (_typematch(pattern, repr(ty)) or
any(_typematch(pattern, flag) for flag in ty.flags))
if __name__ == '__main__':
import doctest
doctest.testmod()
########NEW FILE########
__FILENAME__ = types
# -*- coding: utf-8 -*-
"""
User-facing numba types.
"""
from __future__ import print_function, division, absolute_import
import numba
import ctypes
from itertools import imap
from functools import partial
from numba import odict
from numba.typesystem.itypesystem import Type, Conser, add_flags, tyname
import numpy as np
#------------------------------------------------------------------------
# Type metaclass
#------------------------------------------------------------------------
numba_type_registry = odict.OrderedDict()
register = numba_type_registry.__setitem__
class Accessor(object):
def __init__(self, idx):
self.idx = idx
def __get__(self, obj, type=None):
return obj.params[self.idx]
def __set__(self, obj, value):
if not obj.mutable:
raise AttributeError("Cannot set attribute '%s' of type '%s'" %
(obj.argnames[self.idx], type(obj)))
obj.params[self.idx] = value
class TypeMetaClass(type):
"Metaclass for numba types, conses immutable types."
def __init__(self, name, bases, dict):
if dict.get('typename') is None and name[0].islower():
self.typename = name.rstrip("_")
if self.typename is not None:
register(self.typename, self)
_update_class(self)
self.conser = Conser(partial(type.__call__, self))
def __call__(self, *args, **kwds):
args = self.default_args(args, kwds)
if not self.mutable:
return self.conser.get(*args)
return type.__call__(self, *args)
#------------------------------------------------------------------------
# Type Decorators
#------------------------------------------------------------------------
def _update_class(cls):
# Build defaults dict { argname : default_value }
if 'defaults' not in vars(cls):
cls.defaults = {}
for i, argname in enumerate(cls.argnames):
if isinstance(argname, (list, tuple)):
name, default = argname
cls.argnames[i] = name
cls.defaults[name] = default
# Create accessors
for i, arg in enumerate(vars(cls).get("argnames", ())):
assert not getattr(cls, arg, False), (cls, arg)
setattr(cls, arg, Accessor(i))
# Process flags
flags = list(cls.flags)
if cls.typename:
flags.append(cls.typename.strip("_"))
add_flags(cls, flags)
def consing(cls):
"""
Cons calls to the constructor.
"""
cls.mutable = False
return cls
def notconsing(cls):
cls.mutable = True
return cls
#------------------------------------------------------------------------
# Type Implementations
#------------------------------------------------------------------------
class _NumbaType(Type):
"""
MonoType with user-facing methods:
call: create a function type
slice: create an array type
conversion: to_llvm/to_ctypes/get_dtype
"""
argnames = []
flags = []
defaults = {}
qualifiers = frozenset()
# ______________________________________________________________________
# Internal
def add(self, attr, value):
"Construct new type with attr=value (e.g. functype.add('args', []))"
assert not self.mutable
params = list(self.params)
params[self.argnames.index(attr)] = value
return type(self)(*params)
@property
def subtypes(self):
subtypes = []
for p in self.params:
if isinstance(p, (Type, list, tuple)):
subtypes.append(p)
return subtypes
# ______________________________________________________________________
# User functionality
def pointer(self):
return pointer(self)
def ref(self):
return reference(self)
def qualify(self, *qualifiers):
return self # TODO: implement
def unqualify(self, *qualifiers):
return self # TODO: implement
# TODO: Remove context argument in favour of typesystem argument
def to_llvm(self, context=None):
from . import defaults
return defaults.numba_typesystem.convert("llvm", self)
def to_ctypes(self):
from . import defaults
return defaults.numba_typesystem.convert("ctypes", self)
def to_numpy(self):
from numba.typesystem import numpy_support
return numpy_support.to_dtype(self)
get_dtype = to_numpy
# ______________________________________________________________________
# Special methods (user functionality)
def __getitem__(self, item):
"""
Support array type creation by slicing, e.g. double[:, :] specifies
a 2D strided array of doubles. The syntax is the same as for
Cython memoryviews.
"""
assert isinstance(item, (tuple, slice)), item
def verify_slice(s):
if s.start or s.stop or s.step not in (None, 1):
raise ValueError(
"Only a step of 1 may be provided to indicate C or "
"Fortran contiguity")
if isinstance(item, tuple):
step_idx = None
for idx, s in enumerate(item):
verify_slice(s)
if s.step and (step_idx or idx not in (0, len(item) - 1)):
raise ValueError(
"Step may only be provided once, and only in the "
"first or last dimension.")
if s.step == 1:
step_idx = idx
return array_(self, len(item),
is_c_contig=step_idx == len(item) - 1,
is_f_contig=step_idx == 0)
else:
verify_slice(item)
return array_(self, 1, is_c_contig=bool(item.step))
def __call__(self, *args):
"""
Return a new function type when called with type arguments.
"""
if len(args) == 1 and not isinstance(args[0], Type):
# Cast in Python space
# TODO: Create proxy object
# TODO: Fully customizable type system (do this in Numba, not
# minivect)
return args[0]
return function(self, args)
@notconsing
class NumbaType(_NumbaType):
"""
Base for numba types.
"""
__metaclass__ = TypeMetaClass
# __slots__ = Type.slots
typename = None
def __init__(self, *args, **kwds):
super(NumbaType, self).__init__(self.typename, *args, **kwds)
assert len(args) == len(self.argnames), (self.typename, args)
for name in kwds:
assert name in self.argnames, (self.typename, kwds, self.argnames)
@classmethod
def default_args(cls, args, kwargs):
names = cls.argnames
if len(args) == len(names):
return args
# Insert defaults in args tuple
args = list(args)
for name in names[len(args):]:
if name in kwargs:
args.append(kwargs[name])
elif name in cls.defaults:
args.append(cls.defaults[name])
else:
raise TypeError("Constructor '%s' requires %d arguments "
"(got %d)" % (cls.typename, len(names), len(args)))
return tuple(args)
#------------------------------------------------------------------------
# Low-level parametrized types
#------------------------------------------------------------------------
def pass_by_ref(type): # TODO: Get rid of this
return type.is_struct or type.is_complex or type.is_datetime or type.is_timedelta
class Function(object):
"""
Function types may be called with Python functions to create a Function
object. This may be used to minivect users for their own purposes. e.g.
@double(double, double)
def myfunc(...):
...
"""
def __init__(self, signature, py_func):
self.signature = signature
self.py_func = py_func
def __call__(self, *args, **kwargs):
"""
Implement this to pass the callable test for classmethod/staticmethod.
E.g.
@classmethod
@void()
def m(self):
...
"""
raise TypeError("Not a callable function")
@consing
class function(NumbaType):
typename = "function"
argnames = ['return_type', 'args', ('name', None), ('is_vararg', False)]
def add_arg(self, i, arg):
args = list(self.args)
args.insert(i, arg)
return self.add('args', args)
# ______________________________________________________________________
@property
def struct_by_reference(self):
rt = self.return_type
byref = lambda t: t.is_struct or t.is_complex or t.is_datetime or t.is_timedelta
return rt and byref(rt) or any(imap(byref, self.args))
@property
def actual_signature(self):
"""
Passing structs by value is not properly supported for different
calling conventions in LLVM, so we take an extra argument
pointing to a caller-allocated struct value.
"""
from numba import typesystem as ts
if self.struct_by_reference:
args = []
for arg in self.args:
if pass_by_ref(arg):
arg = arg.pointer()
args.append(arg)
return_type = self.return_type
if pass_by_ref(self.return_type):
return_type = ts.void
args.append(self.return_type.pointer())
self = function(return_type, args)
return self
@property
def struct_return_type(self):
# Function returns a struct.
return self.return_type.pointer()
# ______________________________________________________________________
def __repr__(self):
args = [str(arg) for arg in self.args]
if self.is_vararg:
args.append("...")
if self.name:
namestr = self.name
else:
namestr = ''
return "%s (*%s)(%s)" % (self.return_type, namestr, ", ".join(args))
def __call__(self, *args):
if len(args) != 1 or isinstance(args[0], Type):
return super(function, self).__call__(*args)
assert self.return_type is not None
assert self.argnames is not None
func, = args
return Function(self, func)
# ______________________________________________________________________
# Pointers
@consing
class pointer(NumbaType):
argnames = ['base_type']
@property
def is_string(self): # HACK
import numba
return self.base_type == numba.char
def __repr__(self):
space = " " * (not self.base_type.is_pointer)
return "%s%s*" % (self.base_type, space)
@consing
class sized_pointer(NumbaType):
"""
A pointer with knowledge of its range.
E.g. an array's 'shape' or 'strides' attribute.
This also allow tuple unpacking.
"""
typename = "sized_pointer"
argnames = ["base_type", "size"]
flags = ["pointer"]
# def __eq__(self, other):
# if other.is_sized_pointer:
# return (self.base_type == other.base_type and
# self.size == other.size)
# return other.is_pointer and self.base_type == other.base_type
#
# def __hash__(self):
# return hash(self.base_type.pointer())
@consing
class carray(NumbaType):
argnames = ["base_type", "size"]
# ______________________________________________________________________
# Structs
@consing
class istruct(NumbaType):
argnames = ["fields", ("name", None), ("readonly", False), ("packed", False)]
@property
def subtypes(self):
return [f[1] for f in self.fields]
@property
def fielddict(self):
return dict(self.fields)
def __repr__(self):
if self.name:
name = self.name + ' '
else:
name = ''
return 'struct %s{ %s }' % (
name, ", ".join(["%s %s" % (field_type, field_name)
for field_name, field_type in self.fields]))
def is_prefix(self, other_struct):
other_fields = other_struct.fields[:len(self.fields)]
return self.fields == other_fields
def offsetof(self, field_name):
"""
Compute the offset of a field. Must be used only after mutation has
finished.
"""
ctype = self.to_ctypes()
return getattr(ctype, field_name).offset
@notconsing
class struct_(istruct):
"""
Create a struct type. Fields may be ordered or unordered. Unordered fields
will be ordered from big types to small types (for better alignment).
"""
mutable = True
def __eq__(self, other):
return other.is_struct and self.fields == other.fields
def __hash__(self):
return hash(tuple(self.fields))
def copy(self):
return type(self)(self.fields, self.name, self.readonly, self.packed)
def add_field(self, name, type):
assert name not in self.fielddict
self.fields.append((name, type))
self.mutated = True
def update_mutated(self):
self.rank = sum([sort_key(field) for field in self.fields])
self.mutated = False
#------------------------------------------------------------------------
# High-level types
#------------------------------------------------------------------------
@consing
class array_(NumbaType):
"""
An array type. array_ may be sliced to obtain a subtype:
>>> double[:, :, ::1][1:]
double[:, ::1]
>>> double[:, :, ::1][:-1]
double[:, :]
>>> double[::1, :, :][:-1]
double[::1, :]
>>> double[::1, :, :][1:]
double[:, :]
"""
argnames = ["dtype", "ndim", "is_c_contig", "is_f_contig", "inner_contig"]
defaults = dict.fromkeys(argnames[-3:], False)
flags = ["object"]
def pointer(self):
raise Exception("You probably want a pointer type to the dtype")
@property
def strided(self):
return array_(self.dtype, self.ndim)
def __repr__(self):
axes = [":"] * self.ndim
if self.is_c_contig and self.ndim > 0:
axes[-1] = "::1"
elif self.is_f_contig and self.ndim > 0:
axes[0] = "::1"
return "%s[%s]" % (self.dtype, ", ".join(axes))
def __getitem__(self, index):
"Slicing an array slices the dimensions"
assert isinstance(index, slice)
assert index.step is None
assert index.start is not None or index.stop is not None
start = 0
stop = self.ndim
if index.start is not None:
start = index.start
if index.stop is not None:
stop = index.stop
ndim = len(range(self.ndim)[start:stop])
if ndim == 0:
return self.dtype
elif ndim > 0:
return type(self)(self.dtype, ndim)
else:
raise IndexError(index, ndim)
@consing
class autojit_function(NumbaType):
"Type for autojit functions"
argnames = ["autojit_func"]
flags = ["object"]
@consing
class jit_function(NumbaType):
"Type for jit functions"
argnames = ["jit_func"]
flags = ["object"]
@consing
class numpy_dtype(NumbaType):
"Type of numpy dtypes"
argnames = ["dtype"]
flags = ["object"]
@consing
class complex_(NumbaType):
argnames = ["base_type"]
flags = ["numeric"]
@property
def itemsize(self):
return self.base_type.itemsize * 2
def __repr__(self):
return "complex%d" % (self.itemsize * 8,)
@consing
class datetime_(NumbaType):
argnames = ["timestamp", "units", "units_char"]
flags = ["numeric"]
is_numpy_datetime = True
@property
def itemsize(self):
return self.timestamp.itemsize + self.units.itemsize
def __repr__(self):
if self.units_char:
return "datetime_" + self.units_char
else:
return "datetime"
@consing
class timedelta_(NumbaType):
argnames = ["diff", "units", "units_char"]
flags = ["numeric"]
is_numpy_timedelta = True
@property
def itemsize(self):
return self.diff.itemsize + self.units.itemsize
def __repr__(self):
if self.units_char:
return "timedelta_" + self.units_char
else:
return "timedelta"
@consing
class meta(NumbaType):
"""
A type instance in user code. e.g. double(value). The Name node will have
a cast-type with dst_type 'double'.
"""
argnames = ["dst_type"]
flags = [
"object",
"cast", # backwards compat
]
#------------------------------------------------------------------------
# Container Types
#------------------------------------------------------------------------
@consing
class ContainerListType(NumbaType):
"""
:param base_type: the element type of the tuple
:param size: set to a value >= 0 is the size is known
:return: a tuple type representation
"""
argnames = ["base_type", "size"]
flags = ["object", "container"]
def is_sized(self):
return self.size >= 0
@consing
class tuple_(ContainerListType):
"tuple(base_type, size)"
@consing
class list_(ContainerListType):
"list(base_type, size)"
@consing
class MapContainerType(NumbaType):
argnames = ["key_type", "value_type", "size"]
flags = ["object"]
@consing
class dict_(MapContainerType):
"dict(key, value, size)"
#------------------------------------------------------------------------
# Types to be removed
#------------------------------------------------------------------------
class numpy_attribute(NumbaType): # TODO: remove
argnames = ["module", "attr"]
flags = ["object", "known_value"]
@property
def value(self):
return getattr(self.module, self.attr)
class module_attribute(NumbaType): # TODO: remove
argnames = ["module", "attr"]
flags = ["object", "known_value"]
@property
def value(self):
return getattr(self.module, self.attr)
@consing
class reference(NumbaType): # TODO: remove ?
"""
A reference to an (primitive or Python) object. This is passed as a
pointer and dereferences automatically.
Currently only supported for structs.
"""
argnames = ["referenced_type"]
@consing
class method(NumbaType): # TODO: remove
"""
Method of something.
base_type: the object type the attribute was accessed on
"""
argnames = ["base_type", "attr_name"]
flags = ["object"]
class pointer_to_function(NumbaType): # TODO: remove
"""
Pointer to a function at a known address represented by some Python
object (e.g. a ctypes or CFFI function).
"""
typename = "pointer_to_function"
argnames = ["obj", "ptr", "signature"]
flags = ["object"]
@consing
class known_value(NumbaType): # TODO: remove
"""
Type which is associated with a known value or well-defined symbolic
expression:
np.add => np.add
np.add.reduce => (np.add, "reduce")
(Remember that unbound methods like np.add.reduce are transient, i.e.
np.add.reduce is not np.add.reduce).
"""
argnames = ["value"]
@consing
class known_pointer(pointer): # TODO: remove
argnames = ["base_type", "address"]
@notconsing
class global_(known_value): # TODO: Remove
"Global type"
@consing
class builtin_(known_value): # TODO: remove
argnames = ["name", "value"]
flags = ["object"]
@property
def func(self):
return self.value
@consing
class module(known_value): # TODO: remove
"""
Represents a type for modules.
Attributes:
is_numpy_module: whether the module is the numpy module
module: in case of numpy, the numpy module or a submodule
"""
flags = ["object"]
# TODO: Get rid of these
is_numpy_module = property(lambda self: self.module is np)
is_numba_module = property(lambda self: self.module is np)
is_math_module = property(lambda self: self.module is np)
@property
def module(self):
return self.value
#------------------------------------------------------------------------
# Convenience functions...
#------------------------------------------------------------------------
unit = _NumbaType.unit
_array = array_
_struct = struct_
def from_numpy_dtype(np_dtype):
"""
:param np_dtype: the NumPy dtype (e.g. np.dtype(np.double))
:return: a dtype type representation
"""
from numba.typesystem import numpy_support
return numpy_dtype(numpy_support.map_dtype(np_dtype))
def array(dtype, ndim, is_c_contig=False, is_f_contig=False, inner_contig=False):
"""
:param dtype: the Numba dtype type (e.g. double)
:param ndim: the array dimensionality (int)
:return: an array type representation
"""
if ndim == 0:
return dtype
return _array(dtype, ndim, is_c_contig, is_f_contig, inner_contig)
# ______________________________________________________________________
def sort_key(t):
n, ty = t
return ctypes.sizeof(ty.to_ctypes())
def struct_(fields=(), name=None, readonly=False, packed=False, **kwargs):
"Create a mutable struct type"
if fields and kwargs:
raise TypeError("The struct must be either ordered or unordered")
elif kwargs:
import ctypes
fields = sorted(kwargs.iteritems(), key=sort_key, reverse=True)
# fields = sort_types(kwargs)
# fields = list(kwargs.iteritems())
return _struct(fields, name, readonly, packed)
########NEW FILE########
__FILENAME__ = typeset
# -*- coding: utf-8 -*-
"""
Defines the typeset class and a number of builtin type sets.
"""
from __future__ import print_function, division, absolute_import
import collections
from functools import reduce
from itertools import starmap
from itertools import izip
from numba import typesystem
from numba.typesystem import types
__all__ = [ 'typeset', 'numeric', 'integral', 'floating', 'complextypes' ]
#----------------------------------------------------------------------------
# Signature matching
#----------------------------------------------------------------------------
def _match_argtype(type1, type2):
return (type1.is_typeset and type2 in type1.types) or type1 == type2
def _build_position_table(signature):
table = collections.defaultdict(list)
for i, argtype in enumerate(signature.args):
if argtype.is_typeset:
table[argtype].append(i)
return table
def get_effective_argtypes(promote, signature, argtypes):
"""
Get promoted argtypes for typeset arguments, e.g.
signature = floating(floating, floating)
argtypes = [float, double]
=>
[double, double]
"""
args = list(argtypes)
position_table = _build_position_table(signature)
promotion_table = {}
for poslist in position_table.values():
if len(poslist) > 1:
# Find all argument types corresponding to a type set
types = [args[i] for i in poslist]
# Promote corresponding argument types
result_type = reduce(promote, types)
# Update promotion table
type_set = signature.args[poslist[-1]]
promotion_table[type_set] = result_type
# Build coherent argument type list
for i in poslist:
args[i] = result_type
return promotion_table, args
def match(promote, signature, argtypes):
"""
See whether a specialization matches the given function signature.
"""
if len(signature.args) == len(argtypes):
promotion_table, args = get_effective_argtypes(
promote, signature, argtypes)
if all(starmap(_match_argtype, izip(signature.args, args))):
restype = signature.return_type
restype = promotion_table.get(restype, restype)
return restype(*args)
return None
#----------------------------------------------------------------------------
# Type sets
#----------------------------------------------------------------------------
class typeset(types.NumbaType):
"""
Holds a set of types that can be used to specify signatures for
type inference.
"""
typename = "typeset"
argnames = ["types", "name"]
defaults = {"name": None}
flags = ["object"]
def __init__(self, types, name):
super(typeset, self).__init__(frozenset(types), name)
self.first_type = types[0]
self._from_argtypes = {}
for type in types:
if type.is_function:
self._from_argtypes[type.args] = type
def find_match(self, promote, argtypes):
argtypes = tuple(argtypes)
if argtypes in self._from_argtypes:
return self._from_argtypes[argtypes]
for type in self.types:
signature = match(promote, type, argtypes)
if signature:
return signature
return None
def __iter__(self):
return iter(self.types)
def __repr__(self):
return "typeset(%s, ...)" % (self.first_type,)
def __hash__(self):
return hash(id(self))
numeric = typeset(typesystem.numeric)
integral = typeset(typesystem.integral)
floating = typeset(typesystem.floating)
complextypes = typeset(typesystem.complextypes)
########NEW FILE########
__FILENAME__ = typeutils
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import error
from numba.typesystem import *
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
ts = numba_typesystem
def is_obj(type):
return type.is_object or type.is_array
native_type_dict = {}
for native_type in native_integral:
if native_type not in (Py_ssize_t, npy_intp, Py_uintptr_t, size_t): # TODO: do this better
native_type_dict[(native_type.itemsize, native_type.signed)] = native_type
def promote_to_native(int_type):
return native_type_dict[int_type.itemsize, int_type.signed]
def promote_closest(ts, int_type, candidates):
"""
promote_closest(Py_ssize_t, [int_, long_, longlong]) -> longlong
"""
for candidate in candidates:
promoted = ts.promote(int_type, candidate)
if promoted.itemsize == candidate.itemsize and promoted.signed == candidate.signed:
return candidate
return candidates[-1]
def get_type(ast_node):
"""
:param ast_node: a Numba or Python AST expression node
:return: the type of the expression node
"""
return ast_node.variable.type
def error_index(type):
raise error.NumbaError("Type %s can not be indexed or "
"iterated over" % (type,))
def index_type(type):
"Result of indexing a value of the given type with an integer index"
if type.is_array:
result = array(type.dtype, type.ndim - 1)
elif type.is_container or type.is_pointer or type.is_carray:
result = type.base_type
elif type.is_dict:
result = type.value_type
elif type.is_range:
result = Py_ssize_t
elif type.is_object:
result = object_
else:
error_index(type)
return result
def element_type(type):
"Result type of iterating over something"
if type.is_dict:
return type.key_type
elif type.is_pointer and not type.is_sized_pointer:
error_index(type)
else:
return index_type(type)
def require(ast_nodes, properties):
"Assert that the types of the given nodes meets a certain requirement"
for ast_node in ast_nodes:
if not any(getattr(get_type(ast_node), p) for p in properties):
typenames = ", or ".join(p[3:] for p in properties) # remove 'is_' prefix
raise error.NumbaError(ast_node, "Expected an %s" % (typenames,))
def pyfunc_signature(nargs):
"Signature of a python function with N arguments"
return function(args=(object_,) * nargs, return_type=object_)
########NEW FILE########
__FILENAME__ = universe
# -*- coding: utf-8 -*-
"""
Universes of type constructors for numba.
"""
from __future__ import print_function, division, absolute_import
import struct as struct_module
import ctypes
import numpy as np
names = lambda *names: list(names) #list(map(tyname, names))
int_typenames = names(
'char', 'uchar', 'short', 'ushort', 'int', 'uint', 'long', 'ulong',
'longlong', 'ulonglong', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'size_t', 'npy_intp', 'Py_ssize_t', 'Py_uintptr_t',
'bool', # hmm
)
signed = frozenset(names(
'char', 'short', 'int', 'long', 'longlong',
'int8', 'int16', 'int32', 'int64',
'Py_ssize_t', 'npy_intp',
))
float_typenames = names(
'float', 'double', 'float32', 'float64', #'longdouble', 'float128',
)
complex_typenames = names(
'complex64', 'complex128', #'complex256',
)
#------------------------------------------------------------------------
# Default type sizes
#------------------------------------------------------------------------
_plat_bits = struct_module.calcsize('@P') * 8
def getsize(ctypes_name, default):
try:
return ctypes.sizeof(getattr(ctypes, ctypes_name))
except ImportError:
return default
# Type sizes in bytes
type_sizes = {
"bool": 1,
# Int
"char": 1,
"int8": 1,
"int16": 2,
"int32": 4,
"int64": 8,
# Unsigned int
"uchar": 1,
"uint8": 1,
"uint16": 2,
"uint32": 4,
"uint64": 8,
# Float
# "float16": 2,
"float32": 4,
"float64": 8,
# "float128": 16,
"float": 4,
"double": 8,
# Complex
"complex64": 8,
"complex128": 16,
# "complex256": 32,
}
ctypes_npy_intp = np.empty(0).ctypes.strides._type_
sizeof_longdouble = np.dtype(np.longdouble).itemsize # Use numpy's opinion here
native_sizes = {
"char": 1,
"uchar": 1,
# Int
"short": struct_module.calcsize("h"),
"int": struct_module.calcsize("i"),
"long": struct_module.calcsize("l"),
"longlong": struct_module.calcsize("Q"),
"Py_ssize_t": getsize('c_size_t', _plat_bits // 8),
"npy_intp": ctypes.sizeof(ctypes_npy_intp),
# Unsigned int
"ushort": struct_module.calcsize("H"),
"uint": struct_module.calcsize("I"),
"ulong": struct_module.calcsize("L"),
"ulonglong": struct_module.calcsize("Q"),
"size_t": getsize('c_size_t', _plat_bits // 8),
"Py_uintptr_t": ctypes.sizeof(ctypes.c_void_p),
# Float
# ctypes and numpy may disagree on longdouble
# "longdouble": sizeof_longdouble,
# "float128": sizeof_longdouble,
# Complex
# "complex256": sizeof_longdouble * 2,
# Pointer
"pointer": ctypes.sizeof(ctypes.c_void_p),
}
default_type_sizes = dict(type_sizes, **native_sizes)
is_native_int = native_sizes.__contains__
########NEW FILE########
__FILENAME__ = deferred
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import translate, utils, typesystem
from numba.symtab import Variable
def create_deferred(type_inferer, node, deferred_cls):
"Create a deferred type for an AST node"
variable = Variable(None)
deferred_type = deferred_cls(variable, type_inferer, node)
variable.type = deferred_type
node.variable = variable
return deferred_type
def create_deferred_call(type_inferer, arg_types, call_node):
"Set the ast.Call as uninferable for now"
deferred_type = create_deferred(type_inferer, call_node,
typesystem.DeferredCallType)
for arg, arg_type in zip(call_node.args, arg_types):
if arg_type.is_unresolved:
deferred_type.dependences.append(arg)
deferred_type.update()
return call_node
########NEW FILE########
__FILENAME__ = infer
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
import cmath
import types
import logging
try:
import __builtin__ as builtins
except ImportError:
import builtins
from functools import reduce, partial
import numba
from numba import *
from numba import error, control_flow, visitors, nodes
from numba import oset, odict
from numba.type_inference.modules import mathmodule
from numba.type_inference import module_type_inference, infer_call, deferred
from numba import utils, typesystem
from numba.control_flow import ssa
from numba.typesystem import ssatypes
from numba.typesystem.ssatypes import kosaraju_strongly_connected
from numba.symtab import Variable
from numba import closures as closures
import numba.wrapping.compiler
from numba.support import numpy_support
from numba.exttypes.variable import ExtensionAttributeVariable
from numba.typesystem import get_type
import llvm.core
import numpy
debug = False
#debug = True
logger = logging.getLogger(__name__)
#logger.setLevel(logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
def lookup_global(env, name, position_node):
func_env = env.translation.crnt
func = func_env.func
if (func is not None and name in func.__code__.co_freevars and
func.__closure__):
cell_idx = func.__code__.co_freevars.index(name)
cell = func.__closure__[cell_idx]
value = cell.cell_contents
elif name in func_env.function_globals:
value = func_env.function_globals[name]
elif func and name == func.__name__:
# Assume recursive function, grab function from cache
value = numba.jit(func_env.func_signature)(func)
else:
raise error.NumbaError(position_node, "No global named '%s'" % (name,))
return value
def no_keywords(node):
if node.keywords or node.starargs or node.kwargs:
raise error.NumbaError(
node, "Function call does not support keyword or star arguments")
class TypeInferer(visitors.NumbaTransformer):
"""
Type inference. Initialize with a minivect context, a Python ast,
and a function type with a given or absent, return type.
Infers and checks types and inserts type coercion nodes.
See transform.py for an overview of AST transformations.
"""
# Whether to analyse everything (True), or whether to only analyse
# the result type of the statement (False)
analyse = True
def __init__(self, context, func, ast, closure_scope=None, **kwds):
super(TypeInferer, self).__init__(context, func, ast, **kwds)
self.given_return_type = self.func_signature.return_type
self.return_type = None
ast.symtab = self.symtab
self.closure_scope = closure_scope
ast.closure_scope = closure_scope
ast.closures = []
self.function_level = kwds.get('function_level', 0)
self.init_locals()
ast.have_return = False
def infer_types(self):
"""
Infer types for the function.
"""
self.return_variable = Variable(self.given_return_type)
self.ast = self.visit(self.ast)
self.return_type = self.return_variable.type or void
ret_type = self.func_signature.return_type
if ret_type and ret_type != self.return_type:
self.assert_assignable(ret_type, self.return_type)
self.return_type = self.promote_types(ret_type, self.return_type)
restype, argtypes = self.return_type, self.func_signature.args
self.func_signature = typesystem.function(return_type=restype,
args=argtypes)
#------------------------------------------------------------------------
# Symbol Table Type Population and Argument Processing
#------------------------------------------------------------------------
def initialize_constants(self):
self.symtab['None'] = Variable(typesystem.none, name='None',
is_constant=True, constant_value=None)
self.symtab['True'] = Variable(bool_, name='True', is_constant=True,
constant_value=True)
self.symtab['False'] = Variable(bool_, name='False', is_constant=True,
constant_value=False)
def handle_locals(self, arg_types):
"Process entries in the locals={...} dict"
for local_name, local_type in self.locals.iteritems():
if local_name not in self.symtab:
self.symtab[local_name] = Variable(local_type, is_local=True,
name=local_name)
variable = self.symtab[local_name]
variable.type = local_type
variable.promotable_type = False
if local_name in self.argnames:
idx = self.argnames.index(local_name)
arg_types[idx] = local_type
def initialize_argtypes(self, arg_types):
"Initialize argument types"
for var_name, arg_type in zip(self.local_names, arg_types):
self.symtab[var_name].type = arg_type
def initialize_ssa(self):
"Propagate argument types to first rename of the variable in the block"
for var in self.symtab.values():
if var.parent_var and not var.parent_var.parent_var:
var.type = var.parent_var.type
if not var.type:
var.type = typesystem.UninitializedType(None)
def init_locals(self):
"Populate symbol table for local variables and constants."
arg_types = list(self.func_signature.args)
self.initialize_constants()
self.handle_locals(arg_types)
self.initialize_argtypes(arg_types)
self.initialize_ssa()
self.func_signature = self.func_signature.add('args', arg_types)
self.have_cfg = hasattr(self.ast, 'flow')
if self.have_cfg:
self.deferred_types = []
self.resolve_variable_types()
if debug and self.have_cfg:
for block in self.ast.flow.blocks:
for var in block.symtab.values():
if var.type and var.cf_references:
assert not var.type.is_unresolved
print(("Variable after analysis: %s" % var))
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
def is_object(self, type):
return type.is_object or type.is_array
def promote_types(self, type1, type2):
return ssatypes.promote(self.env.crnt.typesystem, type1, type2)
def promote_types_numeric(self, t1, t2):
"Type promotion but demote objects to numeric types"
if (t1.is_numeric or t2.is_numeric) and (self.is_object(t1) or
self.is_object(t2)):
if t1.is_numeric:
return t1
else:
return t2
else:
return self.promote_types(t1, t2)
def promote(self, v1, v2):
return self.promote_types(v1.type, v2.type)
def assert_assignable(self, dst_type, src_type):
self.promote_types(dst_type, src_type)
def type_from_pyval(self, pyval):
return self.env.crnt.typesystem.typeof(pyval)
#------------------------------------------------------------------------
# SSA-based type inference
#------------------------------------------------------------------------
def handle_NameAssignment(self, assignment_node):
if assignment_node is None:
# ast.Name parameter to the function
return
if isinstance(assignment_node, ast.For):
# Analyse target variable assignment
return self.visit_For(assignment_node)
else:
return self.visit(assignment_node)
def handle_phi(self, node):
# Merge point for different definitions
incoming = [v for v in node.incoming
if not v.type or not v.type.is_uninitialized]
assert incoming
for v in incoming:
if v.type is None:
# We have not analyzed this definition yet, delay the type
# resolution
v.type = v.deferred_type
self.deferred_types.append(v.type)
incoming_types = [v.type for v in incoming]
if len(incoming_types) > 1:
promoted_type = typesystem.PromotionType(
node.variable, partial(ssatypes.promote, self.env.crnt.typesystem),
incoming_types, True)
promoted_type.simplify()
node.variable.type = promoted_type.resolve()
else:
node.variable.type = incoming_types[0]
#print "handled", node.variable
return node
def analyse_assignments(self):
"""
Analyze all variable assignments and phis.
"""
cfg = self.ast.flow
ssa.kill_unused_phis(cfg)
self.analyse = False
self.function_level += 1
for block in cfg.blocks:
# print block
phis = []
for phi in block.phi_nodes:
phi = self.handle_phi(phi)
if phi is not None:
phis.append(phi)
block.phi_nodes = phis
for stat in block.stats:
# TODO: inject back in AST...
if isinstance(stat, control_flow.AttributeAssignment):
stat.assignment_node = self.visit(stat.assignment_node)
elif isinstance(stat, control_flow.NameAssignment):
# print "analysing", stat.lhs
assmnt = self.handle_NameAssignment(stat.assignment_node)
stat.assignment_node = assmnt
self.analyse = True
self.function_level -= 1
def candidates(self, unvisited):
"Types with in-degree zero"
return [type for type in unvisited if len(type.parents) == 0]
def add_resolved_parents(self, unvisited, start_points, strongly_connected):
"Check for immediate resolved parents"
for type in unvisited:
for parent in type.parents:
parent = strongly_connected.get(parent, parent)
if self.is_resolved(parent) or len(parent.parents) == 0:
start_points.append(parent)
def is_resolved(self, t):
return not t.is_unresolved or (t.is_unresolved and not t.is_scc and not
t.resolve().is_unresolved)
def is_trivial_cycle(self, type):
"Return whether the type directly refers to itself"
return type in type.parents
def _debug_type(self, start_point):
if start_point.is_scc:
print(("scc", start_point, start_point.types))
else:
print(start_point)
def remove_resolved_type(self, start_point):
"Remove a resolved type from the type graph"
self.assert_resolved(start_point)
for child in start_point.children:
if start_point in child.parents:
child.parents.remove(start_point)
if start_point.is_scc:
for type in start_point.types:
assert not type.is_scc
self.remove_resolved_type(type)
def assert_resolveable(self, start_point):
"Assert a type in the type graph can be resolved"
assert (len(start_point.parents) == 0 or
self.is_trivial_cycle(start_point) or
self.is_resolved(start_point))
def assert_resolved(self, start_point):
"Assert a type in the type graph is resolved somewhere down the line"
if not (start_point.is_scc or start_point.is_deferred):
r = start_point
while r.is_unresolved:
resolved = r.resolve()
if resolved is r:
break
r = resolved
assert not r.is_unresolved
def process_unvisited(self, unvisited):
"""
Find and resolve any final reduced self-referential
portions in the graph
"""
for u in list(unvisited):
u.simplify()
if u.is_resolved or not u.resolve().is_unresolved:
unvisited.remove(u)
def update_visited(self, start_point, visited, unvisited):
visited.add(start_point)
if start_point in unvisited:
unvisited.remove(start_point)
if start_point.is_scc:
visited.update(start_point.types)
for type in start_point.types:
if type in unvisited:
unvisited.remove(type)
def resolve_variable_types(self):
"""
Resolve the types for all variable assignments. We run type inference
on each assignment which builds a type graph in case of dependencies.
The dependencies are resolved after type inference completes.
"""
self.analyse_assignments()
for deferred_type in self.deferred_types:
deferred_type.update()
#-------------------------------------------------------------------
# Find all unresolved variables
#-------------------------------------------------------------------
unresolved = oset.OrderedSet()
for block in self.ast.flow.blocks:
for variable in block.symtab.itervalues():
if variable.parent_var: # renamed variable
if variable.type.is_unresolved:
variable.type.resolve()
if variable.type.is_unresolved:
unresolved.add(variable.type)
#-------------------------------------------------------------------
# Find the strongly connected components (build a condensation graph)
#-------------------------------------------------------------------
unvisited = oset.OrderedSet(unresolved)
strongly_connected = odict.OrderedDict()
while unresolved:
start_type = unresolved.pop()
sccs = {}
kosaraju_strongly_connected(start_type, sccs, strongly_connected)
unresolved -= set(sccs)
strongly_connected.update(sccs)
#-------------------------------------------------------------------
# Process type dependencies in topological order. Handle strongly
# connected components specially.
#-------------------------------------------------------------------
if unvisited:
unvisited = oset.OrderedSet(strongly_connected.itervalues())
visited = oset.OrderedSet()
# sccs = dict((k, v) for k, v in strongly_connected.iteritems()
# if k is not v)
# unvisited = set([strongly_connected[type] for type in unvisited])
# original_unvisited = set(unvisited)
while unvisited:
L = list(unvisited)
start_points = self.candidates(unvisited)
self.add_resolved_parents(unvisited, start_points,
strongly_connected)
if not start_points:
self.process_unvisited(unvisited)
break
while start_points:
start_point = start_points.pop()
self.assert_resolveable(start_point)
self.update_visited(start_point, visited, unvisited)
# self._debug_type(start_point)
if not self.is_resolved(start_point):
start_point.simplify()
self.remove_resolved_type(start_point)
children = (strongly_connected.get(c, c)
for c in start_point.children
if c not in visited)
start_points.extend(self.candidates(children))
if unvisited:
t = list(unvisited)[0] # for debugging
self.error_unresolved_types(unvisited)
def error_unresolved_types(self, unvisited):
"Raise an exception for a circular dependence we can't resolve"
def getvar(type):
if type.is_scc:
candidates = [type for type in type.scc if not type.is_scc]
return type.scc[0].variable
else:
return type.variable
def pos(type):
assmnt = getvar(type).name_assignment
if assmnt:
assmnt_node = assmnt.assignment_node
return error.format_pos(assmnt_node) or 'na'
else:
return 'na'
type = sorted(unvisited, key=pos)[0]
typesystem.error_circular(getvar(type))
#------------------------------------------------------------------------
# Visit methods
#------------------------------------------------------------------------
def visit(self, node):
if node is Ellipsis:
node = ast.Ellipsis()
result = super(TypeInferer, self).visit(node)
return result
def visit_PhiNode(self, node):
# Already handled
return node
#------------------------------------------------------------------------
# Closures
#------------------------------------------------------------------------
def visit_FunctionDef(self, node):
if self.function_level == 0:
return self.visit_func_children(node)
signature = closures.process_decorators(self.env, self.visit, node)
type = typesystem.ClosureType(signature)
self.symtab[node.name] = Variable(type, is_local=True)
# Generates ClosureNodes that hold inner functions. When visited, they
# do not recurse into the inner functions themselves!
closure = nodes.ClosureNode(self.env, node, type, self.func)
type.closure = closure
self.ast.closures.append(closure)
self.closures[node.name] = closure
return closure
#------------------------------------------------------------------------
# Assignments
#------------------------------------------------------------------------
def _handle_unpacking(self, node):
"""
Handle tuple unpacking
"""
value_type = node.value.variable.type
if len(node.targets) == 1:
# tuple or list constant
targets = node.targets[0].elts
else:
targets = node.targets
# Do some validation
valid_type = (value_type.is_carray or value_type.is_sized_pointer or
value_type.is_list or value_type.is_tuple or
value_type.is_object)
if not valid_type:
self.error(node.value,
'Cannot unpack value of type %s' % (value_type,))
elif value_type != object_ and value_type.size != len(targets):
self.error(node.value,
"Too many/few arguments for tuple unpacking, "
"got (%d, %d)" % (value_type.size, len(targets)))
if isinstance(node.value, (ast.Tuple, ast.List)):
stats = self._unpack_literal(node.targets, node.value.elts)
else:
# TODO: general iterables and iterators
stats = self._unpack_sequence(node.targets, node.value)
return ast.Suite(stats)
def _unpack_literal(self, lhss, rhss):
"""Unpack a literal given the lhs and rhs values as lists"""
rhss = list(map(nodes.CloneableNode, rhss))
# Evaluate RHS first, then generate assignments
return rhss + self._gen_assignments(lhss,
[nodes.CloneNode(n) for n in rhss])
def _unpack_sequence(self, targets, obj):
"""Unpack a sequence given the lhs targets as a list"""
# TODO: Verify length!
obj = nodes.CloneableNode(obj) # evaluate only once!
clone = nodes.CloneNode(obj)
rhss = [nodes.index(clone, i) for i in range(len(targets))]
# Evaluate RHS obj before assignment
return [obj] + self._gen_assignments(targets, rhss)
def _gen_assignments(self, lhss, rhss):
"""
Generate assignments from a list of RHS values to a list of LHS values
"""
for lhs, rhs in zip(lhss, rhss):
lhs.variable.type = rhs.variable.type
return [ast.Assign(targets=[lhs], value=rhs)
for lhs, rhs in zip(lhss, rhss)]
def visit_Assign(self, node):
# Initialize inplace operator
node.inplace_op = getattr(node, 'inplace_op', None)
node.value = self.visit(node.value)
for i in range(len(node.targets)):
node.targets[i] = self.visit(node.targets[i])
if len(node.targets) != 1 or isinstance(node.targets[0], (ast.List,
ast.Tuple)):
return self._handle_unpacking(node)
target = node.targets[0]
self.assign(target, node.value)
lhs_var = target.variable
rhs_var = node.value.variable
if isinstance(target, ast.Name):
node.value = nodes.CoercionNode(node.value, lhs_var.type)
elif lhs_var.type != rhs_var.type:
if lhs_var.type.is_array: # and rhs_var.type.is_array:
# Let other code handle array coercions
pass
else:
node.value = nodes.CoercionNode(node.value, lhs_var.type)
return node
def assign(self, lhs_node, rhs_node, rhs_var=None):
lhs_var = lhs_node.variable
if rhs_var is None:
rhs_var = rhs_node.variable
if lhs_var.type is None:
lhs_var.perform_assignment(rhs_var.type)
elif lhs_var.type != rhs_var.type:
if lhs_var.name in self.locals:
# Type must be consistent
self.assert_assignable(lhs_var.type, rhs_var.type)
if rhs_node:
rhs_node = nodes.CoercionNode(rhs_node, lhs_var.type)
elif lhs_var.type.is_deferred:
# Override type with new assignment of a deferred LHS and
# update the type graph to link it together correctly
assert lhs_var is lhs_var.type.variable
deferred_type = lhs_var.type
lhs_var.perform_assignment(rhs_var.type)
deferred_type.update()
elif isinstance(lhs_node, ast.Name):
if lhs_var.renameable:
# Override type with new assignment
lhs_var.perform_assignment(rhs_var.type)
else:
# Promote type for cellvar or freevar
self.assert_assignable(lhs_var.type, rhs_var.type)
if (lhs_var.type.is_numeric and rhs_var.type.is_numeric and
lhs_var.promotable_type):
lhs_var.perform_assignment(
self.promote_types(lhs_var.type, rhs_var.type))
return rhs_node
#------------------------------------------------------------------------
# Loops and Control Flow
#------------------------------------------------------------------------
def _get_iterator_type(self, node, iterator_type, target_type):
"Get the type of an iterator Variable"
if iterator_type.is_iterator:
base_type = iterator_type.base_type
elif iterator_type.is_range:
base_type = Py_ssize_t
else:
base_type = typesystem.index_type(iterator_type)
return base_type
def visit_For(self, node):
target = node.target
#if not isinstance(target, ast.Name):
# self.error(node.target,
# "Only assignment to target names is supported.")
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
base_type = typesystem.element_type(node.iter.variable.type)
self.assign(node.target, None, rhs_var=Variable(base_type))
if self.analyse:
self.visitlist(node.body)
if self.analyse and node.orelse:
self.visitlist(node.orelse)
return node
def visit_booltest(self, node):
if isinstance(node.test, control_flow.ControlBlock):
node.test.body[0] = nodes.CoercionNode(
node.test.body[0], typesystem.bool_)
else:
node.test = nodes.CoercionNode(node.test, typesystem.bool_)
def visit_While(self, node):
self.generic_visit(node)
self.visit_booltest(node)
return node
visit_If = visit_While
def visit_IfExp(self, node):
self.generic_visit(node)
type_ = self.promote(node.body.variable, node.orelse.variable)
node.variable = Variable(type_)
node.test = nodes.CoercionNode(node.test, typesystem.bool_)
node.orelse = nodes.CoercionNode(node.orelse, type_)
node.body = nodes.CoercionNode(node.body, type_)
return node
#------------------------------------------------------------------------
# Return
#------------------------------------------------------------------------
def visit_Return(self, node):
if node.value is not None:
# 'return value'
self.ast.have_return = True
value = self.visit(node.value)
type = get_type(value)
assert type is not None
else:
# 'return'
value = None
if value is None or type.is_none:
# When returning None, set the return type to void.
# That way, we don't have to deal with the PyObject reference.
if self.return_variable.type is None:
self.return_variable.type = typesystem.void
value = None
elif self.return_variable.type is None:
self.return_variable.type = type
elif self.return_variable.type != type:
# TODO: in case of unpromotable types, return object?
if self.given_return_type is None:
self.return_variable.type = self.promote_types_numeric(
self.return_variable.type, type)
value = nodes.DeferredCoercionNode(value, self.return_variable)
node.value = value
return node
#------------------------------------------------------------------------
# 'with' statement
#------------------------------------------------------------------------
def visit_With(self, node):
assert isinstance(node.context_expr, ast.Name), node.context_expr
if node.context_expr.id == 'nopython':
node = self.visit(nodes.WithNoPythonNode(
body=node.body, lineno=node.lineno,
col_offset=node.col_offset))
else:
node = self.visit(nodes.WithPythonNode(
body=node.body, lineno=node.lineno,
col_offset=node.col_offset))
if (node.body and isinstance(node.body[0], ast.Expr) and
node.body[0].value == 'WITH_BLOCK'):
node.body = node.body[1:]
return node
#------------------------------------------------------------------------
# Variable Assignments and References
#------------------------------------------------------------------------
def init_global(self, name_node):
global_name = name_node.id
globals = self.func_globals
is_builtin = (global_name not in globals and
getattr(builtins, global_name, None))
is_global = not is_builtin
# Determine the type of the global, i.e. a builtin, global
# or (numpy) module
if is_builtin:
type = typesystem.builtin_(global_name, getattr(builtins, global_name))
else:
# FIXME: analyse the bytecode of the entire module, to determine
# overriding of builtins
if isinstance(globals.get(global_name), types.ModuleType):
type = typesystem.module(globals.get(global_name))
else:
value = lookup_global(self.env, global_name, name_node)
type = typesystem.global_(value) # do away with this
variable = Variable(type, name=global_name, is_constant=True,
is_global=is_global, is_builtin=is_builtin,
constant_value=type.value)
self.symtab[global_name] = variable
return variable
def getvar(self, name_node):
local_variable = self.symtab[name_node.id]
if not local_variable.renameable:
variable = local_variable
else:
variable = name_node.variable
return variable
def visit_Name(self, node):
node.name = node.id
var = self.current_scope.lookup(node.id)
is_none = var and node.id in ('None', 'True', 'False')
in_closure_scope = self.closure_scope and node.id in self.closure_scope
if var and (var.is_local or is_none):
if isinstance(node.ctx, ast.Param) or is_none:
variable = self.symtab[node.id]
else:
# Local variable
variable = self.getvar(node)
elif in_closure_scope and not self.is_store(node.ctx):
# Free variable
# print node.id, node.ctx, self.ast.name
closure_var = self.closure_scope[node.id]
variable = Variable.from_variable(closure_var)
variable.is_local = False
variable.is_cellvar = False
variable.is_freevar = True
variable.promotable_type = False
self.symtab[node.id] = variable
else:
# Global or builtin
variable = self.init_global(node)
if variable.type and not variable.type.is_deferred:
if variable.type.is_global: # or variable.type.is_module:
# TODO: look up globals in dict at call time if not
obj = variable.type.value
# if not self.function_cache.is_registered(obj):
variable.type = self.type_from_pyval(obj)
elif variable.type.is_builtin:
# Rewrite builtin-ins later on, give other code the chance
# to handle them first
pass
node.variable = variable
if variable.type and variable.type.is_unresolved:
variable.type = variable.type.resolve()
return node
#------------------------------------------------------------------------
# Binary and Unary operations
#------------------------------------------------------------------------
def visit_BoolOp(self, node):
"and/or expression"
# NOTE: BoolOp.values can have as many items as possible.
# Only meta is doing 2 items.
# if len(node.values) != 2:
# raise AssertionError
assert len(node.values) >= 2
node.values = self.visitlist(node.values)
node.values[:] = nodes.CoercionNode.coerce(node.values, typesystem.bool_)
node.variable = Variable(typesystem.bool_)
return node
def _handle_floordiv(self, node):
dst_type = self.promote(node.left.variable, node.right.variable)
if dst_type.is_float or dst_type.is_int:
node.op = ast.Div()
node = nodes.CoercionNode(node, long_)
node = nodes.CoercionNode(node, dst_type)
return node
def _verify_pointer_type(self, node, v1, v2):
pointer_type = self.have_types(v1, v2, "is_pointer", "is_int")
if pointer_type is None:
raise error.NumbaError(
node, "Expected pointer and int types, got (%s, %s)" %
(v1.type, v2.type))
if not isinstance(node.op, ast.Add): # ast.Sub)):
# TODO: pointer subtraction
raise error.NumbaError(
node, "Can only perform pointer arithmetic with +")
if pointer_type.base_type.is_void:
raise error.NumbaError(
node, "Cannot perform pointer arithmetic on void *")
def visit_BinOp(self, node):
node.left = self.visit(node.left)
node.right = self.visit(node.right)
if nodes.is_bitwise(node.op):
# TODO: Do this better
typesystem.require(
[n for n in [node.left, node.right]
if self.is_resolved(n.variable.type)],
["is_int", 'is_object', 'is_bool'])
v1, v2 = node.left.variable, node.right.variable
coerce_operands = True
# Handle string formatting with %
if isinstance(node.op, ast.Mod) and v1.type.is_c_string:
promoted_type = object_
elif isinstance(node.op, ast.Sub) and \
v1.type.is_numpy_datetime and \
v2.type.is_numpy_datetime:
promoted_type = timedelta()
coerce_operands = False
elif isinstance(node.op, ast.Add) and \
((v1.type.is_numpy_datetime and v2.type.is_numpy_timedelta) or
(v2.type.is_numpy_datetime and v1.type.is_numpy_timedelta)):
promoted_type = datetime()
coerce_operands = False
elif isinstance(node.op, ast.Sub) and \
((v1.type.is_numpy_datetime and v2.type.is_numpy_timedelta) or
(v2.type.is_numpy_datetime and v1.type.is_numpy_timedelta)):
promoted_type = datetime()
coerce_operands = False
else:
promoted_type = self.promote(v1, v2)
if promoted_type.is_pointer:
self._verify_pointer_type(node, v1, v2)
elif coerce_operands and not ((v1.type.is_array and v2.type.is_array) or
(v1.type.is_unresolved or v2.type.is_unresolved)):
# Don't coerce arrays to lesser or higher dimensionality
# Broadcasting transforms should take care of this
node.left, node.right = nodes.CoercionNode.coerce(
[node.left, node.right], promoted_type)
node.variable = Variable(promoted_type)
if isinstance(node.op, ast.FloorDiv):
node = self._handle_floordiv(node)
return node
def visit_UnaryOp(self, node):
node.operand = self.visit(node.operand)
if isinstance(node.op, ast.Not):
node.operand = nodes.CoercionNode(node.operand, typesystem.bool_)
node.variable = Variable(typesystem.bool_)
else:
node.variable = Variable(node.operand.variable.type)
if isinstance(node.op, ast.Invert):
typesystem.require([node], ["is_int", "is_object"])
return node
def visit_Compare(self, node):
self.generic_visit(node)
lhs = node.left
comparators = node.comparators
types = [lhs.variable.type] + [c.variable.type for c in comparators]
result_type = bool_
if len(set(types)) != 1:
type = reduce(self.promote_types, types)
if type.is_array:
result_type = typesystem.array(bool_, type.ndim)
else:
node.left = nodes.CoercionNode(lhs, type)
node.comparators = [nodes.CoercionNode(c, type)
for c in comparators]
node.variable = Variable(result_type)
return node
#------------------------------------------------------------------------
# Indexing and Slicing
#------------------------------------------------------------------------
def _handle_struct_index(self, node, value_type):
slice_type = node.slice.variable.type
if not isinstance(node.slice, ast.Index) or not (
slice_type.is_int or slice_type.is_string):
raise error.NumbaError(node.slice,
"Struct index must be a single string "
"or integer")
if not isinstance(node.slice.value, nodes.ConstNode):
raise error.NumbaError(node.slice,
"Struct index must be constant")
field_idx = node.slice.value.pyval
if slice_type.is_int:
if field_idx > len(value_type.fields):
raise error.NumbaError(node.slice,
"Struct field index too large")
field_name, field_type = value_type.fields[field_idx]
else:
field_name = field_idx
return ast.Attribute(value=node.value, attr=field_name, ctx=node.ctx)
def assert_index(self, type, node):
if type.is_unresolved:
type.make_assertion('is_int', node, "Expected an integer")
elif not type.is_int:
self.error(node, "Excepted an integer")
def get_resolved_type(self, type):
if type.is_unresolved:
type.simplify()
type = type.resolve()
if type.is_promotion and len(type.types) == 2:
type1, type2 = type.types
if type1.is_deferred and type2.is_deferred:
return type
elif type1.is_deferred:
return type2, type1
elif type2.is_deferred:
return type1, type2
else:
return None, type
else:
return type, None
def visit_Subscript(self, node, visitchildren=True):
if visitchildren:
node.value = self.visit(node.value)
node.slice = self.visit(node.slice)
value = node.value
value_type = node.value.variable.type
deferred_type = deferred.create_deferred(self, node,
typesystem.DeferredIndexType)
if value_type and value_type.is_unresolved:
deferred_type.dependences.append(node.value)
deferred_type.update()
return node
slice_variable = node.slice.variable
slice_type = slice_variable.type
if value_type.is_array:
# Handle array indexing
if (slice_type.is_tuple and
isinstance(node.slice, ast.Index)):
node.slice = node.slice.value
slices = None
if (isinstance(node.slice, ast.Index) or
slice_type.is_ellipsis or slice_type.is_slice):
slices = [node.slice]
elif isinstance(node.slice, ast.ExtSlice):
slices = list(node.slice.dims)
elif isinstance(node.slice, ast.Tuple):
slices = list(node.slice.elts)
if slices is None:
if slice_type.is_tuple:
# result_type = value_type[slice_type.size:]
# TODO: use slice_variable.constant_value if available
result_type = typesystem.object_
else:
result_type = typesystem.object_
elif any(slice_node.variable.type.is_unresolved for slice_node in slices):
for slice_node in slices:
if slice_node.variable.type.is_unresolved:
deferred_type.dependences.append(slice_node)
deferred_type.update()
result_type = deferred_type
else:
result = numpy_support.unellipsify(node.value, slices, node)
result_type, node.value = result
elif value_type.is_carray:
# Handle C array indexing
if (not slice_variable.type.is_int and not
slice_variable.type.is_unresolved):
self.error(node.slice, "Can only index with an int")
if not isinstance(node.slice, ast.Index):
self.error(node.slice, "Expected index")
# node.slice = node.slice.value
result_type = value_type.base_type
elif value_type.is_struct:
node = self._handle_struct_index(node, value_type)
return self.visit(node)
elif value_type.is_pointer:
self.assert_index(slice_variable.type, node.slice)
result_type = value_type.base_type
elif value_type.is_object:
result_type = object_
elif value_type.is_string:
# Handle string indexing
if slice_type.is_int:
result_type = char
elif slice_type.is_slice:
result_type = c_string_type
elif slice_type.is_unresolved:
deferred_type.dependences.append(node.slice)
deferred_type.update()
result_type = deferred_type
else:
# TODO: check for insanity
node.value = nodes.CoercionNode(node.value, object_)
node.slice = nodes.CoercionNode(node.slice, object_)
result_type = object_
else:
op = ('sliced', 'indexed')[slice_variable.type.is_int]
raise error.NumbaError(node, "object of type %s cannot be %s" %
(value_type, op))
node.variable.type = result_type
return node
def visit_Index(self, node):
"Normal index"
node.value = self.visit(node.value)
variable = node.value.variable
type = variable.type
if (type.is_object and variable.is_constant and
variable.constant_value is None):
type = typesystem.newaxis
node.variable = Variable(type)
return node
def visit_Ellipsis(self, node):
return nodes.ConstNode(Ellipsis, typesystem.ellipsis)
def visit_Slice(self, node):
self.generic_visit(node)
type = typesystem.slice_
is_constant = False
const = None
values = [node.lower, node.upper, node.step]
constants = []
for value in values:
if value is None:
constants.append(None)
elif value.variable.is_constant:
constants.append(value.variable.constant_value)
else:
break
else:
is_constant = True
const = slice(*constants)
node.variable = Variable(type, is_constant=is_constant,
constant_value=const)
return node
def visit_ExtSlice(self, node):
self.generic_visit(node)
node.variable = Variable(typesystem.object_)
return node
#------------------------------------------------------------------------
# Constants
#------------------------------------------------------------------------
def visit_Num(self, node):
return nodes.ConstNode(node.n)
def visit_Str(self, node):
return nodes.ConstNode(node.s)
def visit_long(self, value):
return nodes.ConstNode(value, long_)
def _get_constants(self, constants):
items = []
constant_value = None
for i, item_node in enumerate(constants):
# long constants like 5L are direct values, not Nums!
if isinstance(item_node, long):
constants[i] = nodes.ConstNode(item_node, long_)
items.append(item_node)
elif item_node.variable.is_constant:
items.append(item_node.variable.constant_value)
else:
return None
return items
def _get_constant_list(self, node):
if not isinstance(node.ctx, ast.Load):
return None
return self._get_constants(node.elts)
def visit_Tuple(self, node):
self.visitlist(node.elts)
constant_value = self._get_constant_list(node)
if constant_value is not None:
constant_value = tuple(constant_value)
type = numba.typeof(constant_value)
else:
type = typesystem.tuple_(object_, size=len(node.elts))
node.variable = Variable(type, is_constant=constant_value is not None,
constant_value=constant_value)
return node
def visit_List(self, node):
node.elts = self.visitlist(node.elts)
constant_value = self._get_constant_list(node)
if constant_value:
type = numba.typeof(constant_value)
else:
type = typesystem.list_(object_, size=len(node.elts))
node.variable = Variable(type, is_constant=constant_value is not None,
constant_value=constant_value)
return node
def visit_Dict(self, node):
self.generic_visit(node)
constant_keys = self._get_constants(node.keys)
constant_values = self._get_constants(node.values)
if constant_keys and constant_values:
unify = self.promote_types
key_type = reduce(unify, (self.type_from_pyval(key)
for key in constant_keys))
value_type = reduce(unify, (self.type_from_pyval(key)
for key in constant_keys))
type = typesystem.dict_(key_type, value_type, size=len(node.keys))
variable = Variable(type, is_constant=True,
constant_value=dict(zip(constant_keys,
constant_values)))
else:
type = typesystem.dict_(object_, object_, size=len(node.keys))
variable = Variable(type)
node.variable = variable
return node
#------------------------------------------------------------------------
# Function and Method Calls
#------------------------------------------------------------------------
def _resolve_external_call(self, call_node, func_type, py_func, arg_types):
"""
Resolve a call to a function. If we know about the function,
generate a native call, otherwise go through PyObject_Call().
"""
if __debug__ and logger.getEffectiveLevel() < logging.DEBUG:
logger.debug('func_type = %r, py_func = %r, call_node = %s' %
(func_type, py_func, utils.pformat_ast(call_node)))
if not func_type.is_object and not func_type.is_known_value:
raise error.NumbaError(
call_node, "Cannot call object of type %s" % (func_type,))
flags = None # TODO: stub
signature = None
llvm_func = None
new_node = nodes.call_obj(call_node, py_func)
have_unresolved_argtypes = any(arg_type.is_unresolved
for arg_type in arg_types)
if func_type.is_jit_function:
llvm_func = func_type.jit_func.lfunc
signature = func_type.jit_func.signature
elif have_unresolved_argtypes and not func_type == object_:
result = self.function_cache.get_function(py_func, arg_types, flags)
if result is not None:
signature, llvm_func, _ = result
else:
new_node = deferred.create_deferred_call(
self, arg_types, call_node)
if (module_type_inference.is_registered(py_func) and
module_type_inference.can_handle_deferred(py_func)):
new_node = infer_call.infer_typefunc(self.context, call_node,
func_type, new_node)
elif self.function_cache.is_registered(py_func):
py_func = py_func.py_func
signature = typesystem.function(None, arg_types)
jitted_func = numba.jit(signature)(py_func)
signature = jitted_func.signature
llvm_func = jitted_func.lfunc
else:
# This should not be a function-cache method
# signature = self.function_cache.get_signature(arg_types)
new_node = self._resolve_return_type(func_type, new_node,
call_node, arg_types)
if llvm_func is not None:
# Generate a native call instead of an object call
assert signature is not None
new_node = nodes.NativeCallNode(signature, call_node.args,
llvm_func, py_func)
return new_node
def _resolve_method_calls(self, func_type, new_node, node):
"Resolve special method calls"
if ((func_type.base_type.is_complex or
func_type.base_type.is_float) and
func_type.attr_name == 'conjugate'):
assert isinstance(node.func, ast.Attribute)
if node.args or node.keywords:
raise error.NumbaError(
"conjugate method of complex number does not "
"take arguments")
if func_type.base_type.is_float:
return node.func.value
new_node = nodes.ComplexConjugateNode(node.func.value)
new_node.variable = Variable(func_type.base_type)
return new_node
def _infer_complex_math(self, func_type, new_node, node, argtype):
"Infer types for cmath.somefunc()"
# Check for cmath.{sqrt,sin,etc}
if (len(node.args) == 1 and
func_type.value.__name__ in mathmodule.mathsyms):
new_node = nodes.CoercionNode(new_node, complex128)
return new_node
def _resolve_return_type(self, func_type, new_node, node, argtypes):
"""
We are performing a call through PyObject_Call, but we may be able
to infer a more specific return value than 'object'.
"""
if ((func_type.is_module_attribute and func_type.module is cmath) or
(func_type.is_numpy_attribute and len(argtypes) == 1)):
new_node = self._infer_complex_math(
func_type, new_node, node, argtypes[0])
return infer_call.infer_typefunc(self.context, node,
func_type, new_node)
def _resolve_autojit_method_call(self, call_node, ext_type, attr):
from numba.exttypes import signatures
argtypes = tuple(a.variable.type for a in call_node.args)
argtypes = (ext_type,) + argtypes
if (attr, argtypes) not in ext_type.specialized_methods:
if ext_type.extclass is None:
raise error.NumbaError(
call_node, "Cannot yet call autojit methods from jit "
"methods (which includes the constructor).")
# Compile the autojit method
# TODO: Compile for the base class ext_type (the class owning
# TODO: the method)
untyped_method = ext_type.untyped_methods[attr]
method = untyped_method.clone()
method.signature = typesystem.function(None, argtypes)
compiler_impl = numba.wrapping.compiler.MethodCompiler(
self.env, ext_type.extclass, method)
compiler_impl.compile(method.signature)
# Retrieve specialized method
method = ext_type.specialized_methods[attr, argtypes]
# Update method signature
method_maker = signatures.MethodMaker()
method.signature = method_maker.make_method_type(method)
else:
method = ext_type.specialized_methods[attr, argtypes]
# Generate method access
extension_method_node = call_node.func
obj_node = extension_method_node.value
methodnode = nodes.ExtensionMethod(obj_node, attr, method)
# Generate method call
new_node = nodes.NativeFunctionCallNode(
method.signature, methodnode, call_node.args,
skip_self=True)
return new_node
def visit_Call(self, node, visitchildren=True):
if node.starargs or node.kwargs:
raise error.NumbaError("star or keyword arguments not implemented")
node.func = self.visit(node.func)
func_variable = node.func.variable
func_type = func_variable.type
func = infer_call.resolve_function(func_variable)
#if not self.analyse and func_type.is_cast and len(node.args) == 1:
# # Short-circuit casts
# no_keywords(node)
# return nodes.CastNode(node.args[0], func_type.dst_type)
if visitchildren:
self.visitlist(node.args)
self.visitlist(node.keywords)
# TODO: Resolve variable types based on how they are used as arguments
# TODO: in calls with known signatures
new_node = None
if func_type.is_autojit_extmethod:
assert isinstance(node.func, nodes.ExtensionMethod)
new_node = self._resolve_autojit_method_call(
node, node.func.ext_type, node.func.attr)
if func_type.is_function or func_type.is_extmethod:
# Native function call
no_keywords(node)
new_node = nodes.NativeFunctionCallNode(
func_variable.type, node.func, node.args,
skip_self=True)
elif func_type.is_method:
# Call to special object method
no_keywords(node)
new_node = self._resolve_method_calls(func_type, new_node, node)
elif func_type.is_closure:
assert node.func
# TODO: what if node.func is not an ast.Name?
# Call to closure/inner function
return nodes.ClosureCallNode(func_type, node)
elif func_type.is_pointer_to_function:
# Call to ctypes function
no_keywords(node)
new_node = nodes.PointerCallNode(
func_type.signature,
node.args,
func_type.ptr)
elif func_type.is_cast:
# Call of a numba type
# 1) double(value) -> cast value to double
# 2) double() or double(object_, double), ->
# this specifies a function signature
no_keywords(node)
if len(node.args) != 1 or node.args[0].variable.type.is_cast:
new_node = infer_call.parse_signature(node, func_type)
else:
new_node = nodes.CoercionNode(node.args[0], func_type.dst_type)
if new_node is None:
# All other type of calls:
# 1) call to compiled/autojitting numba function
# 2) call to some math or numpy math function (np.sin, etc)
# 3) call to special numpy functions (np.empty, etc)
# 4) generic call using PyObject_Call
if func_type.is_jit_function:
func = func_type.jit_func.py_func
arg_types = func_type.jit_func.signature.args
else:
arg_types = [a.variable.type for a in node.args]
new_node = self._resolve_external_call(node, func_type,
func, arg_types)
return new_node
def visit_CastNode(self, node):
if self.analyse:
arg = self.visit(node.arg)
return nodes.CoercionNode(arg, node.type)
else:
return node
#------------------------------------------------------------------------
# Attributes
#------------------------------------------------------------------------
def _resolve_module_attribute(self, node, type):
"Resolve attributes of the numpy module or a submodule"
attribute = getattr(type.module, node.attr)
# TODO: Do this better
result_type = None
if attribute is numpy.newaxis:
result_type = typesystem.newaxis
elif attribute is numba.NULL:
return typesystem.null
elif type.is_numpy_module or type.is_numpy_attribute:
result_type = typesystem.numpy_attribute(module=type.module,
attr=node.attr)
elif type.is_numba_module or type.is_math_module:
result_type = self.context.typemapper.from_python(attribute)
if result_type == object_:
result_type = None
if result_type is None:
if hasattr(type.module, node.attr):
result_type = self.type_from_pyval(getattr(type.module,
node.attr))
if result_type != object_:
return result_type
result_type = typesystem.module_attribute(module=type.module,
attr=node.attr)
return result_type
def _resolve_ndarray_attribute(self, array_node, array_attr):
"Resolve attributes of numpy arrays"
return
def is_store(self, ctx):
return isinstance(ctx, ast.Store)
def extattr_mangle(self, attr_name, type):
if attr_name.startswith("__") and not attr_name.endswith("__"):
attr_name = "_%s%s" % (type.name, attr_name)
return attr_name
def _resolve_extension_attribute(self, node, ext_type):
attr = self.extattr_mangle(node.attr, ext_type)
if attr in ext_type.methoddict:
method = ext_type.methoddict[attr]
return nodes.ExtensionMethod(node.value, attr, method)
if attr in ext_type.untyped_methods:
method = ext_type.untyped_methods[attr]
return nodes.AutojitExtensionMethod(node.value, attr, method)
if attr not in ext_type.attributedict:
if ext_type.is_resolved or not self.is_store(node.ctx):
raise error.NumbaError(
node, "Cannot access attribute %s of ext_type %s" % (
node.attr, ext_type.name))
# Infer the type for this extension attribute using a
# special Variable
variable = ExtensionAttributeVariable(ext_type, attr, type=None)
else:
variable = Variable(ext_type.attributedict[attr])
return nodes.ExtTypeAttribute(node.value, attr, variable,
node.ctx, ext_type)
def _resolve_struct_attribute(self, node, type):
type = nodes.struct_type(type)
if not node.attr in type.fielddict:
raise error.NumbaError(
node, "Struct %s has no field %r" % (type, node.attr))
if isinstance(node.ctx, ast.Store):
if not isinstance(node.value, (ast.Name, ast.Subscript,
nodes.StructVariable)):
raise error.NumbaError(
node, "Can only assign to struct attributes of "
"variables or array indices")
node.value.ctx = ast.Store()
return nodes.StructAttribute(node.value, node.attr, node.ctx,
node.value.variable.type)
def _resolve_complex_attribute(self, node, type):
# TODO: make conplex a struct type
if node.attr in ('real', 'imag'):
if self.is_store(node.ctx):
raise TypeError("Cannot assign to the %s attribute of "
"complex numbers" % node.attr)
result_type = type.base_type
else:
raise AttributeError("'%s' of complex type" % node.attr)
return result_type
def _resolve_datetime_attribute(self, node, type):
if node.attr in ('timestamp', 'units'):
if self.is_store(node.ctx):
raise TypeError("Cannot assign to the %s attribute of "
"datetime numbers" % node.attr)
result_type = getattr(type, node.attr)
elif node.attr == 'year':
result_type = int64
elif node.attr in ['month', 'day', 'hour', 'min', 'sec']:
result_type = int32
else:
raise AttributeError("'%s' of datetime type" % node.attr)
return result_type
def _resolve_timedelta_attribute(self, node, type):
if node.attr in ('diff', 'units'):
if self.is_store(node.ctx):
raise TypeError("Cannot assign to the %s attribute of "
"timedelta numbers" % node.attr)
result_type = getattr(type, node.attr)
else:
raise AttributeError("'%s' of timedelta type" % node.attr)
return result_type
def visit_Attribute(self, node, visitchildren=True):
if visitchildren:
node.value = self.visit(node.value)
type = node.value.variable.type
if type.is_unresolved:
result_type = deferred.create_deferred(self, node,
typesystem.DeferredAttrType)
elif node.attr == 'conjugate' and (type.is_complex or type.is_float):
result_type = typesystem.method(type, 'conjugate')
elif type.is_complex:
result_type = self._resolve_complex_attribute(node, type)
elif type.is_datetime:
result_type = self._resolve_datetime_attribute(node, type)
elif type.is_timedelta:
result_type = self._resolve_timedelta_attribute(node, type)
elif type.is_struct or (type.is_reference and
type.referenced_type.is_struct):
return self._resolve_struct_attribute(node, type)
elif type.is_module and hasattr(type.module, node.attr):
result_type = self._resolve_module_attribute(node, type)
elif (type.is_known_value and
module_type_inference.is_registered((type.value, node.attr))):
# Unbound method call, e.g. np.add.reduce
result_type = typesystem.known_value((type.value, node.attr),
is_object=True)
elif type.is_array and node.attr in ('data', 'shape', 'strides', 'ndim'):
# handle shape/strides/ndim etc
return nodes.ArrayAttributeNode(node.attr, node.value)
elif type.is_array and node.attr == "dtype":
# TODO: resolve as constant at compile time?
result_type = typesystem.numpy_dtype(type.dtype)
elif type.is_extension:
return self._resolve_extension_attribute(node, type)
else:
# use PyObject_GetAttrString
node.value = nodes.CoercionNode(node.value, object_)
result_type = object_
node.variable = Variable(result_type)
node.type = result_type
return node
def visit_ClosureScopeLoadNode(self, node):
return node
def visit_FuncDefExprNode(self, node):
return self.visit(node.func_def)
#------------------------------------------------------------------------
# Unsupported nodes
#------------------------------------------------------------------------
def visit_Global(self, node):
raise error.NumbaError(node, "Global keyword")
#------------------------------------------------------------------------
# Coercions
#------------------------------------------------------------------------
def visit_UntypedCoercion(self, node):
if self.analyse:
value = self.visit(node.node)
return nodes.CoercionNode(value, node.type)
return node
#------------------------------------------------------------------------
# User nodes
#------------------------------------------------------------------------
def visit_UserNode(self, node):
return node.infer_types(self)
#------------------------------------------------------------------------
# Nodes that should be deleted after type inference
#------------------------------------------------------------------------
def visit_MaybeUnusedNode(self, node):
return self.visit(node.name_node)
class TypeSettingVisitor(visitors.NumbaTransformer):
"""
Set node.type for all AST nodes after type inference from node.variable.
Allows for deferred coercions (may be removed in the future).
"""
def visit_FunctionDef(self, node):
self.generic_visit(node)
if node.flow:
for block in node.flow.blocks:
for phi in block.phi_nodes:
self.handle_phi(phi)
rettype = self.func_signature.return_type
if rettype.is_unresolved:
rettype = rettype.resolve()
assert not rettype.is_unresolved
self.func_signature.return_type = rettype
return node
def resolve(self, variable):
"""
Resolve any resolved types, and resolve any final disconnected
type graphs that haven't been simplified. This can be the case if
the type of a variable does not depend on the type of a sub-expression
which may be unresolved, e.g.:
y = 0
for i in range(...):
x = int(y + 4) # y is unresolved here, so we have
# promote(deferred(y), int)
y += 1
"""
if variable.type.is_unresolved:
variable.type = variable.type.resolve()
if variable.type.is_unresolved:
variable.type = typesystem.resolve_var(variable)
assert not variable.type.is_unresolved
def visit(self, node):
if hasattr(node, 'variable'):
self.resolve(node.variable)
node.type = node.variable.type
return super(TypeSettingVisitor, self).visit(node)
def handle_phi(self, node):
for incoming_var in node.incoming:
self.resolve(incoming_var)
self.resolve(node.variable)
node.type = node.variable.type
return node
def visit_Name(self, node):
return node
def visit_ExtSlice(self, node):
self.generic_visit(node)
types = [n.type for n in node.dims]
if all(type.is_numeric for type in types):
node.type = reduce(self.env.crnt.typesystem.promote, types)
if not node.type.is_int:
self.warn(node, "Truncating result index type %s "
"to Py_ssize_t" % node.type)
node.type = Py_ssize_t
else:
node.type = object_
return node
def visit_DeferredCoercionNode(self, node):
"Resolve deferred coercions"
self.generic_visit(node)
return nodes.CoercionNode(node.node, node.variable.type)
########NEW FILE########
__FILENAME__ = infer_call
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba
from numba import *
from numba import error, nodes
from numba.type_inference import module_type_inference
from numba import typesystem
if PY3:
import builtins
else:
import __builtin__ as builtins
debug = False
#debug = True
def resolve_function(func_variable):
"Get a function object given a function name"
func = None
func_type = func_variable.type
if func_type.is_builtin:
func = getattr(builtins, func_variable.name)
elif func_type.is_global:
func = func_type.value
elif func_type.is_module_attribute:
func = getattr(func_type.module, func_type.attr)
elif func_type.is_autojit_function:
func = func_type.autojit_func
elif func_type.is_jit_function:
func = func_type.jit_func
return func
def infer_typefunc(context, call_node, func_type, default_node):
func_var = call_node.func.variable
if func_var.is_constant:
func_type = typesystem.known_value(func_var.constant_value)
if (func_type.is_known_value and
module_type_inference.is_registered(func_type.value)):
# Try the module type inferers
result_node = module_type_inference.resolve_call_or_none(
context, call_node, func_type)
if result_node:
return result_node
return default_node
def parse_signature(node, func_type):
types = []
for arg in node.args:
if not arg.variable.type.is_cast:
raise error.NumbaError(arg, "Expected a numba type")
else:
types.append(arg.variable.type)
signature = func_type.dst_type(*types)
new_node = nodes.const(signature, typesystem.meta(signature))
return new_node
########NEW FILE########
__FILENAME__ = builtinmodule
# -*- coding: utf-8 -*-
"""
Type functions for Python builtins.
"""
from __future__ import print_function, division, absolute_import
import warnings
import ast
from numba import *
from numba import nodes
from numba import error
# from numba import function_util
from numba.symtab import Variable
from numba import typesystem
from numba.typesystem import get_type
from numba.type_inference.modules import utils
#----------------------------------------------------------------------------
# Utilities
#----------------------------------------------------------------------------
register_builtin = utils.register_with_argchecking
def cast(node, dst_type):
if len(node.args) == 0:
return nodes.ConstNode(0, dst_type)
else:
return nodes.CoercionNode(node.args[0], dst_type=dst_type)
#----------------------------------------------------------------------------
# Type Functions for Builtins
#----------------------------------------------------------------------------
# TODO: add specializer functions to insert coercions before late specialization
# TODO: don't rewrite AST here
@register_builtin((1, 2, 3), can_handle_deferred_types=True)
def range_(typesystem, node, start, stop, step):
node.variable = Variable(typesystem.range_)
node.args = nodes.CoercionNode.coerce(node.args, dst_type=Py_ssize_t)
return node
if not PY3:
@register_builtin((1, 2, 3), can_handle_deferred_types=True)
def xrange_(typesystem, node, start, stop, step):
return range_(typesystem, node, start, stop, step)
@register_builtin(1)
def len_(typesystem, node, obj):
# Simplify len(array) to ndarray.shape[0]
argtype = get_type(obj)
if argtype.is_array:
shape_attr = nodes.ArrayAttributeNode('shape', node.args[0])
new_node = nodes.index(shape_attr, 0)
return new_node
elif argtype.is_string:
return nodes.CoercionNode(nodes.typednode(node, size_t), Py_ssize_t)
return Py_ssize_t # Object call
@register_builtin((0, 1, 2), can_handle_deferred_types=True)
def _int(typesystem, node, x, base, dst_type=int_):
# Resolve int(x) and float(x) to an equivalent cast
if len(node.args) < 2:
return cast(node, dst_type)
node.variable = Variable(dst_type)
return node
if not PY3:
@register_builtin((0, 1, 2), can_handle_deferred_types=True)
def _long(typesystem, node, x, base):
return _int(typesystem, node, x, base)
@register_builtin((0, 1), can_handle_deferred_types=True)
def _float(typesystem, node, x):
return cast(node, double)
@register_builtin((0, 1), can_handle_deferred_types=True)
def _bool(context, node, x):
return cast(node, bool_)
@register_builtin((0, 1, 2), can_handle_deferred_types=True)
def complex_(typesystem, node, a, b):
if len(node.args) == 2:
args = nodes.CoercionNode.coerce(node.args, double)
return nodes.ComplexNode(real=args[0], imag=args[1])
else:
return cast(node, complex128)
'''@register_builtin((0, 1, 2, 3, 4, 5, 6), can_handle_deferred_types=True)
def datetime_(typesystem, node, a, b, c, d, e, f):
if len(node.args) == 6:
typelist = [int64, int32, int32, int32, int32, int32]
args = nodes.CoercionNode.coerce(node.args, typelist)
return nodes.DateTimeNode(year=args[0], month=args[1], day=args[2],
hour=args[3], min=args[4], sec=args[5])
else:
return cast(node, datetime)'''
def abstype(argtype):
if argtype.is_complex:
result_type = argtype.base_type
elif argtype.is_float or argtype.is_int:
result_type = argtype
else:
result_type = object_
return result_type
@register_builtin(1)
def abs_(typesystem, node, x):
argtype = typesystem.promote(long_, get_type(x))
dst_type = abstype(argtype)
node.variable = Variable(dst_type)
node.args = [nodes.CoercionNode(x, argtype)]
return node
@register_builtin((2, 3))
def pow_(typesystem, node, base, exponent, mod=None):
if mod:
warnings.warn(
"pow() with modulo (third) argument not natively supported")
return nodes.call_pyfunc(pow, [base, exponent, mod])
from . import mathmodule
dst_type = mathmodule.binop_type(typesystem, base, exponent)
result = mathmodule.infer_math_call(typesystem, node, base, exponent, mod)
if dst_type.is_int:
# TODO: Implement pow(int) in llvmmath
return nodes.CoercionNode(result, dst_type)
return result
@register_builtin((1, 2))
def round_(typesystem, node, number, ndigits):
argtype = get_type(number)
if len(node.args) == 1 and argtype.is_int:
# round(myint) -> float(myint)
return nodes.CoercionNode(node.args[0], double)
if argtype.is_float or argtype.is_int:
dst_type = double
else:
dst_type = object_
node.args[0] = nodes.CoercionNode(node.args[0], object_)
node.variable = Variable(dst_type)
return node # nodes.CoercionNode(node, double)
def minmax(typesystem, args, op):
if len(args) < 2:
return
res = args[0]
for arg in args[1:]:
lhs_type = get_type(res)
rhs_type = get_type(arg)
res_type = typesystem.promote(lhs_type, rhs_type)
if lhs_type != res_type:
res = nodes.CoercionNode(res, res_type)
if rhs_type != res_type:
arg = nodes.CoercionNode(arg, res_type)
lhs_temp = nodes.TempNode(res_type)
rhs_temp = nodes.TempNode(res_type)
res_temp = nodes.TempNode(res_type)
lhs = lhs_temp.load(invariant=True)
rhs = rhs_temp.load(invariant=True)
expr = ast.IfExp(ast.Compare(lhs, [op], [rhs]), lhs, rhs)
body = [
ast.Assign([lhs_temp.store()], res),
ast.Assign([rhs_temp.store()], arg),
ast.Assign([res_temp.store()], expr),
]
res = nodes.ExpressionNode(body, res_temp.load(invariant=True))
return res
@register_builtin(None)
def min_(typesystem, node, *args):
return minmax(typesystem, args, ast.Lt())
@register_builtin(None)
def max_(typesystem, node, *args):
return minmax(typesystem, args, ast.Gt())
@register_builtin(0)
def globals_(typesystem, node):
return typesystem.dict_of_obj
# return nodes.ObjectInjectNode(func.__globals__)
@register_builtin(0)
def locals_(typesystem, node):
raise error.NumbaError("locals() is not supported in numba functions")
@register_builtin(1)
def ord_(typesystem, node, expr):
type = get_type(expr)
if type.is_int and type.typename in ("char", "uchar"):
return nodes.CoercionNode(expr, int_)
elif type.is_string:
# TODO:
pass
@register_builtin(1)
def chr_(typesystem, node, expr):
type = get_type(expr)
if type.is_int:
return nodes.CoercionNode(expr, char)
########NEW FILE########
__FILENAME__ = mathmodule
# -*- coding: utf-8 -*-
"""
Resolve calls to math functions.
During type inference this annotates math calls, and during
final specialization it produces LLVMIntrinsicNode and MathCallNode
nodes.
"""
from __future__ import print_function, division, absolute_import
import math
import cmath
import collections
try:
import __builtin__ as builtins
except ImportError:
import builtins
import numpy as np
from numba import *
from numba import nodes
from numba.symtab import Variable
from numba.typesystem import get_type, rank
from numba.type_inference.modules import utils
#----------------------------------------------------------------------------
# Utilities
#----------------------------------------------------------------------------
register_math_typefunc = utils.register_with_argchecking
def binop_type(typesystem, x, y):
"Binary result type for math operations"
x_type = get_type(x)
y_type = get_type(y)
return typesystem.promote(x_type, y_type)
#----------------------------------------------------------------------------
# Determine math functions
#----------------------------------------------------------------------------
# sin(double), sinf(float), sinl(long double)
mathsyms = [
'sin',
'cos',
'tan',
'sqrt',
'acos',
'asin',
'atan',
'sinh',
'cosh',
'tanh',
'asinh',
'acosh',
'atanh',
'log',
'log2',
'log10',
'erfc',
'floor',
'ceil',
'exp',
'exp2',
'expm1',
'rint',
'log1p',
]
n_ary_mathsyms = {
'hypot' : 2,
'atan2' : 2,
'logaddexp' : 2,
'logaddexp2': 2,
'pow' : (2, 3),
}
math2ufunc = {
'asin' : 'arcsin',
'acos' : 'arccos',
'atan' : 'arctan',
'asinh': 'arcsinh',
'acosh': 'arccosh',
'atanh': 'arctanh',
'atan2': 'arctan2',
'pow' : 'power',
}
ufunc2math = dict((v, k) for k, v in math2ufunc.items())
#----------------------------------------------------------------------------
# Math Type Inferers
#----------------------------------------------------------------------------
# TODO: Move any rewriting parts to lowering phases
def mk_infer_math_call(default_result_type):
def infer(typesystem, call_node, *args):
"Resolve calls to llvmmath math calls"
# signature is a generic signature, build a correct one
type = reduce(typesystem.promote, map(get_type, call_node.args))
if type.is_numeric and rank(type) < rank(default_result_type):
type = default_result_type
elif type.is_array and type.dtype.is_int:
type = typesystem.array(double, type.ndim)
call_node.args[:] = nodes.CoercionNode.coerce(call_node.args, type)
# TODO: Remove the abuse below
nodes.annotate(typesystem.env, call_node, is_math=True)
call_node.variable = Variable(type)
return call_node
return infer
infer_math_call = mk_infer_math_call(double)
infer_cmath_call = mk_infer_math_call(complex128)
# ______________________________________________________________________
# abs()
def abs_(typesystem, node, x):
from . import builtinmodule
argtype = get_type(x)
nodes.annotate(typesystem.env, node, is_math=True)
if argtype.is_array and argtype.dtype.is_numeric:
# Handle np.abs() on arrays
dtype = builtinmodule.abstype(argtype.dtype)
result_type = argtype.add('dtype', dtype)
node.variable = Variable(result_type)
else:
node = builtinmodule.abs_(typesystem, node, x)
return node
register_math_typefunc(1)(abs_, np.abs)
#----------------------------------------------------------------------------
# Register Type Functions
#----------------------------------------------------------------------------
def register_math(infer_math_call, nargs, value):
register = register_math_typefunc(nargs)
register(infer_math_call, value)
def npy_name(name):
return math2ufunc.get(name, name)
id_name = lambda x: x
# ______________________________________________________________________
def reg(mod, register, getname):
"""Register all functions listed in mathsyms and n_ary_mathsyms"""
nargs = lambda f: n_ary_mathsyms.get(f, 1)
for symname in mathsyms + list(n_ary_mathsyms):
if hasattr(mod, getname(symname)):
register(nargs(symname), getattr(mod, getname(symname)))
reg(math, partial(register_math, infer_math_call), id_name)
reg(cmath, partial(register_math, infer_cmath_call), id_name)
reg(np, partial(register_math, infer_math_call), npy_name)
########NEW FILE########
__FILENAME__ = numbamodule
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numba
from numba import typesystem
from numba.type_inference.module_type_inference import register
@register(numba)
def typeof(expr_type):
from numba import nodes
type = typesystem.meta(expr_type)
return nodes.const(expr_type, type)
########NEW FILE########
__FILENAME__ = numpymodule
# -*- coding: utf-8 -*-
"""
Infer types for NumPy functionality. This includes:
1) Figuring out dtypes
e.g. np.double -> double
np.dtype('d') -> double
2) Function calls such as np.empty/np.empty_like/np.arange/etc
"""
from __future__ import print_function, division, absolute_import
import warnings
from functools import reduce
import numpy as np
from numba import *
from numba import typesystem, error
from numba.type_inference.module_type_inference import (register,
register_inferer,
register_unbound)
from numba.typesystem import get_type
#------------------------------------------------------------------------
# Type Definitions
#------------------------------------------------------------------------
index_array_t = npy_intp[:]
#------------------------------------------------------------------------
# Some utilities
#------------------------------------------------------------------------
def promote(typesystem, *types):
return reduce(typesystem.promote, map(array_from_type, types))
def resolve_attribute_dtype(dtype, default=None):
"Resolve the type for numpy dtype attributes"
if dtype.is_numpy_dtype:
return dtype
if dtype.is_known_value:
numpy_attr = dtype.value
if isinstance(numpy_attr, np.dtype):
return typesystem.from_numpy_dtype(numpy_attr)
elif issubclass(numpy_attr, np.generic):
return typesystem.from_numpy_dtype(np.dtype(numpy_attr))
elif numpy_attr is not None:
try:
dtype = np.dtype(numpy_attr)
except TypeError:
warnings.warn("Unable to infer dtype for '%s'" % numpy_attr)
else:
return typesystem.from_numpy_dtype(dtype)
return None
def get_dtype(dtype_arg, default_dtype=None):
"""
Simple helper function to map an AST node dtype keyword
argument => NumPy dtype.
'"""
if dtype_arg is None:
if default_dtype is None:
return None
return typesystem.numpy_dtype(default_dtype)
else:
return resolve_attribute_dtype(dtype_arg)
def promote_to_array(dtype):
"Promote scalar to 0d array type"
if not dtype.is_array:
dtype = typesystem.array_(dtype, 0)
return dtype
def demote_to_scalar(type):
"Demote 0d arrays to scalars"
if type and type.is_array and type.ndim == 0:
return type.dtype
return type
def array_from_object(a):
"""
object -> array type:
array_from_object(ASTNode([[1, 2], [3, 4]])) => int64[:, :]
"""
return array_from_type(get_type(a))
def array_from_type(type):
if type.is_array:
return type
elif type.is_tuple or type.is_list:
dtype = array_from_type(type.base_type)
if dtype.is_array:
return dtype.add('ndim', dtype.ndim + 1)
elif not type.is_object:
return typesystem.array_(dtype=type, ndim=0)
return object_
#------------------------------------------------------------------------
# Resolution of NumPy calls
#------------------------------------------------------------------------
@register(np)
def dtype(obj, align):
"Parse np.dtype(...) calls"
if obj is None:
return None
return get_dtype(obj)
def empty_like(a, dtype, order):
"Parse the result type for np.empty_like calls"
if a is None:
return None
if a.is_array:
if dtype:
dtype_type = get_dtype(dtype)
if dtype_type is None:
return a
dtype = dtype_type.dtype
else:
dtype = a.dtype
return typesystem.array(dtype, a.ndim)
register_inferer(np, 'empty_like', empty_like)
register_inferer(np, 'zeros_like', empty_like)
register_inferer(np, 'ones_like', empty_like)
def empty(shape, dtype, order):
if shape is None:
return None
dtype = get_dtype(dtype, float64)
if dtype is None:
return object_
if shape.is_int:
ndim = 1
elif shape.is_tuple or shape.is_list:
ndim = shape.size
else:
return None
return typesystem.array(dtype.dtype, ndim)
register_inferer(np, 'empty', empty)
register_inferer(np, 'zeros', empty)
register_inferer(np, 'ones', empty)
@register(np)
def arange(start, stop, step, dtype):
"Resolve a call to np.arange()"
# NOTE: dtype must be passed as a keyword argument, or as the fourth
# parameter
dtype_type = get_dtype(dtype, npy_intp)
if dtype_type is not None:
# return a 1D array type of the given dtype
return dtype_type.dtype[:]
@register(np)
def dot(typesystem, a, b, out):
"Resolve a call to np.dot()"
if out is not None:
return out
lhs_type = promote_to_array(a)
rhs_type = promote_to_array(b)
dtype = typesystem.promote(lhs_type.dtype, rhs_type.dtype)
dst_ndim = lhs_type.ndim + rhs_type.ndim - 2
result_type = typesystem.array(dtype, dst_ndim)
return result_type
@register(np)
def array(object, dtype, order, subok):
type = array_from_type(object)
if type.is_array and dtype is not None:
type = type.add('dtype', dtype)
elif dtype is not None:
return dtype
else:
return type
@register(np, pass_in_types=False)
def datetime64(datetime_string):
return nodes.NumpyDateTimeNode(datetime_string)
@register(np, pass_in_types=False)
def timedelta64(delta, units):
return nodes.NumpyTimeDeltaNode(delta, units)
@register(np)
def nonzero(a):
return _nonzero(array_from_type(a))
def _nonzero(type):
if type.is_array:
return typesystem.tuple_(index_array_t, type.ndim)
else:
return typesystem.tuple_(index_array_t)
@register(np)
def where(typesystem, condition, x, y):
if x is None and y is None:
return nonzero(condition)
return promote(typesystem, x, y)
@register(np)
def vdot(typesystem, a, b):
lhs_type = promote_to_array(a)
rhs_type = promote_to_array(b)
dtype = typesystem.promote(lhs_type.dtype, rhs_type.dtype)
return dtype
@register(np)
def inner(typesystem, a, b):
lhs_type = promote_to_array(a)
rhs_type = promote_to_array(b)
dtype = typesystem.promote(lhs_type.dtype, rhs_type.dtype)
if lhs_type.ndim == 0:
result_ndim = rhs_type.ndim
elif rhs_type.ndim == 0:
result_ndim = lhs_type.ndim
else:
result_ndim = lhs_type.ndim + rhs_type.ndim - 2
if result_ndim == 0:
result_type = dtype
else:
result_type = typesystem.array(dtype, result_ndim)
return result_type
@register(np)
def outer(typesystem, a, b):
result_type = promote(typesystem, a, b)
# promote() converts scalar types to 0-dim arrays, so it should
# always return an array type. Ensure this continues to hold...
assert result_type.is_array
return result_type.dtype[:, :]
@register(np, pass_in_types=False)
def tensordot(typesystem, a, b, axes):
'''Typing function for numpy.tensordot().
Defaults to Python object for any caller that isn't using the
default argument to axes.
Otherwise, it is similar to inner(), but subtracts four dimensions
from the result instead of two.
Without symbolic execution of the actual axes argument, this can't
determine the number of axes to sum over, so it punts. This
typing function could use an array type of unknown dimensionality,
were one available. See:
https://www.pivotaltracker.com/story/show/43687249
'''
lhs_type = array_from_object(a)
rhs_type = array_from_object(b)
if lhs_type.ndim < 1:
raise error.NumbaError(a, 'First argument to numpy.tensordot() '
'requires array of dimensionality >= 1.')
elif rhs_type.ndim < 1:
raise error.NumbaError(b, 'First argument to numpy.tensordot() '
'requires array of dimensionality >= 1.')
dtype = typesystem.promote(lhs_type.dtype, rhs_type.dtype)
if axes is None:
result_ndim = lhs_type.ndim + rhs_type.ndim - 4
if result_ndim < 0:
raise error.NumbaError(a, 'Arguments to numpy.tensordot() should '
'have combined dimensionality >= 4 (when '
'axes argument is not specified).')
result_type = typesystem.array(dtype, result_ndim)
else:
# XXX Issue warning to user?
result_type = object_
return result_type
@register(np)
def einsum(typesystem, subs, operands, kws):
# XXX Issue warning to user?
# XXX Attempt type inference in case where subs is a string?
return object_
@register(np)
def kron(typesystem, a, b):
#raise NotImplementedError("XXX")
return object_
@register(np)
def trace(typesystem, a, offset, axis1, axis2, dtype, out):
#raise NotImplementedError("XXX")
return object_
#------------------------------------------------------------------------
# numpy.linalg
#------------------------------------------------------------------------
@register(np.linalg)
def cholesky(typesystem, a):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def cond(typesystem, x, p):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def det(typesystem, a):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def eig(typesystem, a):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def eigh(typesystem, a, UPLO):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def eigvals(typesystem, a):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def eigvalsh(typesystem, a, UPLO):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def inv(typesystem, a):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def lstsq(typesystem, a, b, rcond):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def matrix_power(typesystem, M, n):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def matrix_rank(typesystem, M, tol):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def norm(typesystem, x, ord):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def pinv(typesystem, a, rcond):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def qr(typesystem, a, mode):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def slogdet(typesystem, a):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def solve(typesystem, a, b):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def svd(typesystem, a, full_matrices, compute_uv):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def tensorinv(typesystem, a, ind):
#raise NotImplementedError("XXX")
return object_
@register(np.linalg)
def tensorsolve(typesystem, a, b, axes):
#raise NotImplementedError("XXX")
return object_
########NEW FILE########
__FILENAME__ = numpyufuncs
# -*- coding: utf-8 -*-
"""
Type inference for NumPy binary ufuncs and their methods.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import *
from numba import typesystem
from numba.typesystem import numpy_support
from numba.type_inference.module_type_inference import (module_registry,
register,
register_inferer,
register_unbound)
from numba.typesystem import get_type
from numba.type_inference.modules.numpymodule import (get_dtype,
array_from_type,
promote,
promote_to_array,
demote_to_scalar)
#----------------------------------------------------------------------------
# Utilities
#----------------------------------------------------------------------------
def array_of_dtype(a, dtype, static_dtype, out):
if out is not None:
return out
a = array_from_type(a)
if a.is_array:
dtype = _dtype(a, dtype, static_dtype)
if dtype is not None:
return a.add('dtype', dtype)
def _dtype(a, dtype, static_dtype):
if static_dtype:
return static_dtype
elif dtype:
return dtype.dtype
elif a.is_array:
return a.dtype
elif not a.is_object:
return a
else:
return None
#------------------------------------------------------------------------
# Ufunc Type Strings
#------------------------------------------------------------------------
def numba_type_from_sig(ufunc_signature):
"""
Convert ufunc type signature string (e.g. 'dd->d') to a function
"""
args, ret = ufunc_signature.split('->')
to_numba = lambda c: numpy_support.map_dtype(np.dtype(c))
signature = to_numba(ret)(*map(to_numba, args))
return signature
def find_signature(args, signatures):
for signature in signatures:
if signature.args == args:
return signature
def find_ufunc_signature(typesystem, argtypes, signatures):
"""
Map (float_, double) and [double(double, double),
int_(int_, int_),
...]
to double(double, double)
"""
signature = find_signature(tuple(argtypes), signatures)
if signature is not None:
return signature
argtype = reduce(typesystem.promote, argtypes)
if not argtype.is_object:
args = (argtype,) * len(argtypes)
return find_signature(args, signatures)
return None
class UfuncTypeInferer(object):
"Infer types for arbitrary ufunc"
def __init__(self, ufunc):
self.ufunc = ufunc
self.signatures = set(map(numba_type_from_sig, ufunc.types))
def infer(self, typesystem, argtypes):
signature = find_ufunc_signature(typesystem, argtypes, self.signatures)
if signature is None:
return None
else:
return signature.return_type
def register_arbitrary_ufunc(ufunc):
"Type inference for arbitrary ufuncs"
ufunc_infer = UfuncTypeInferer(ufunc)
def infer(typesystem, *args, **kwargs):
if len(args) != ufunc.nin:
return object_
# Find the right ufunc signature
argtypes = [type.dtype if type.is_array else type for type in args]
result_type = ufunc_infer.infer(typesystem, argtypes)
if result_type is None:
return object_
# Determine output ndim
ndim = 0
for argtype in args:
if argtype.is_array:
ndim = max(argtype.ndim, ndim)
return typesystem.array(result_type, ndim)
module_registry.register_value(ufunc, infer)
# module_registry.register_unbound_dotted_value
#----------------------------------------------------------------------------
# Ufunc type inference
#----------------------------------------------------------------------------
def binary_map(typesystem, a, b, out):
if out is not None:
return out
return promote(typesystem, a, b)
def binary_map_bool(typesystem, a, b, out):
type = binary_map(typesystem, a, b, out)
if type.is_array:
return type.add('dtype', bool_)
elif type.is_numeric:
return bool_
else:
return object_
def reduce_(a, axis, dtype, out, static_dtype=None):
if out is not None:
return out
dtype_type = _dtype(a, dtype, static_dtype)
if axis is None:
# Return the scalar type
return dtype_type
if dtype_type:
# Handle the axis parameter
if axis.is_tuple and axis.is_sized:
# axis=(tuple with a constant size)
return typesystem.array(dtype_type, a.ndim - axis.size)
elif axis.is_int:
# axis=1
return typesystem.array(dtype_type, a.ndim - 1)
else:
# axis=(something unknown)
return object_
def reduce_bool(a, axis, dtype, out):
return reduce_(a, axis, dtype, out, bool_)
def accumulate(a, axis, dtype, out, static_dtype=None):
return demote_to_scalar(array_of_dtype(a, dtype, static_dtype, out))
def accumulate_bool(a, axis, dtype, out):
return accumulate(a, axis, dtype, out, bool_)
def reduceat(a, indices, axis, dtype, out, static_dtype=None):
return accumulate(a, axis, dtype, out, static_dtype)
def reduceat_bool(a, indices, axis, dtype, out):
return reduceat(a, indices, axis, dtype, out, bool_)
def outer(typesystem, a, b, static_dtype=None):
a = array_of_dtype(a, None, static_dtype, out=None)
if a and a.is_array:
return a.dtype[:, :]
def outer_bool(typesystem, a, b):
return outer(typesystem, a, b, bool_)
#------------------------------------------------------------------------
# Binary Ufuncs
#------------------------------------------------------------------------
binary_ufuncs_compare = (
# Comparisons
'greater',
'greater_equal',
'less',
'less_equal',
'not_equal',
'equal',
)
binary_ufuncs_logical = (
# Logical ufuncs
'logical_and',
'logical_or',
'logical_xor',
'logical_not',
)
binary_ufuncs_bitwise = (
# Bitwise ufuncs
'bitwise_and',
'bitwise_or',
'bitwise_xor',
'left_shift',
'right_shift',
)
binary_ufuncs_arithmetic = (
# Arithmetic ufuncs
'add',
'subtract',
'multiply',
'true_divide',
'floor_divide',
)
if not PY3:
binary_ufuncs_arithmetic = binary_ufuncs_arithmetic + ('divide', )
#------------------------------------------------------------------------
# Register our type functions
#------------------------------------------------------------------------
register_inferer(np, 'sum', reduce_)
register_inferer(np, 'prod', reduce_)
def register_arithmetic_ufunc(register_inferer, register_unbound, binary_ufunc):
register_inferer(np, binary_ufunc, binary_map)
register_unbound(np, binary_ufunc, "reduce", reduce_)
register_unbound(np, binary_ufunc, "accumulate", accumulate)
register_unbound(np, binary_ufunc, "reduceat", reduceat)
register_unbound(np, binary_ufunc, "outer", outer)
def register_bool_ufunc(register_inferer, register_unbound, binary_ufunc):
register_inferer(np, binary_ufunc, binary_map_bool)
register_unbound(np, binary_ufunc, "reduce", reduce_bool)
register_unbound(np, binary_ufunc, "accumulate", accumulate_bool)
register_unbound(np, binary_ufunc, "reduceat", reduceat_bool)
register_unbound(np, binary_ufunc, "outer", outer_bool)
for binary_ufunc in binary_ufuncs_bitwise + binary_ufuncs_arithmetic:
register_arithmetic_ufunc(register_inferer, register_unbound, binary_ufunc)
for binary_ufunc in binary_ufuncs_compare + binary_ufuncs_logical:
register_bool_ufunc(register_inferer, register_unbound, binary_ufunc)
########NEW FILE########
__FILENAME__ = utils
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import functools
try:
import __builtin__ as builtins
except ImportError:
import builtins
from numba import error
from numba.type_inference.module_type_inference import (register,
register_inferer,
register_unbound,
register_value)
def expect_n_args(node, name, nargs):
if not isinstance(nargs, tuple):
nargs = (nargs,)
if len(node.args) not in nargs:
expected = " or ".join(map(str, nargs))
raise error.NumbaError(
node, "builtin %s expects %s arguments" % (name,
expected))
def register_with_argchecking(nargs, can_handle_deferred_types=False):
if nargs is not None and not isinstance(nargs, tuple):
nargs = (nargs,)
def decorator(func, value=None):
@functools.wraps(func)
def infer(context, node, *args):
if nargs is not None:
expect_n_args(node, name, nargs)
need_nones = max(nargs) - len(args)
args += (None,) * need_nones
return func(context, node, *args)
if value is None:
name = infer.__name__.strip("_")
if name == 'datetime':
import datetime
value = datetime.datetime
else:
value = getattr(builtins, name)
else:
name = getattr(value, "__name__", "<unknown>")
register_value(value, infer, pass_in_types=False, pass_in_callnode=True,
can_handle_deferred_types=can_handle_deferred_types)
return func # wrapper
return decorator
########NEW FILE########
__FILENAME__ = module_type_inference
# -*- coding: utf-8 -*-
"""
Support for type functions for external code.
See modules/numpy*.py for type inference for NumPy.
"""
from __future__ import print_function, division, absolute_import
import ast
import inspect
import numba
from numba import *
from numba import typesystem, error, nodes
from numba.typesystem import get_type, typeset, Type
import logging
debug = False
#debug = True
logger = logging.getLogger(__name__)
#logger.setLevel(logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
#----------------------------------------------------------------------------
# Exceptions
#----------------------------------------------------------------------------
class ValueAlreadyRegistered(error.NumbaError):
"""
Raised when a type inferer is registered multiple times for the same
value.
"""
class UnmatchedTypeError(error.NumbaError):
"""
Raised when no matching specialization is found for a registered
signature (`register_callable`).
"""
#----------------------------------------------------------------------------
# Global Registry for Type Functions
#----------------------------------------------------------------------------
class ModuleTypeInfererRegistry(object):
"Builds the module type inferers for the modules we can handle"
def __init__(self):
super(ModuleTypeInfererRegistry, self).__init__()
# value := (typefunc, pass_in_types, pass_in_callnode)
# function calls: (np.add)
# { value : value }
# unbound methods: (np.add.reduce)
# { (value, unbound_dotted_path, False) : value }
# bound methods: (obj.method where type(obj) is registered)
# { (type, bound_dotted_path, True) : value }
self.value_to_inferer = {}
# { value : (module, 'attribute') } (e.g. {np.add : (np, 'add')})
self.value_to_module = {}
def is_registered(self, value, func_type=None):
try:
hash(value)
except TypeError:
return False # unhashable object
else:
return value in self.value_to_inferer
def register_inferer(self, module, attr, inferer, **kwds):
"""
Register an type function (a type inferer) for a known function value.
E.g. np.add() can be mapped as follows:
module=np, attr='add', inferrer=my_inferer
"""
value = getattr(module, attr)
if self.is_registered(value):
raise ValueAlreadyRegistered((value, module, inferer))
self.value_to_module[value] = (module, attr)
self.register_value(value, inferer, **kwds)
def register_value(self, value, inferer, pass_in_types=True,
pass_in_callnode=False, can_handle_deferred_types=False):
flags = dict(
pass_in_types=pass_in_types,
pass_in_callnode=pass_in_callnode,
can_handle_deferred_types=can_handle_deferred_types,
)
self.value_to_inferer[value] = (inferer, flags)
def register_unbound_method(self, module, attr, method_name,
inferer, **kwds):
"""
Register an unbound method or dotted attribute path
(allow for transience).
E.g. np.add.reduce() can be mapped as follows:
module=np, attr='add', method_name='reduce',
inferrer=my_inferer
"""
self.register_unbound_dotted(module, attr, method_name, inferer,
**kwds)
def register_unbound_dotted(self, module, attr, dotted_path, inferer,
**kwds):
"""
Register an type function for a dotted attribute path of a value,
E.g. my_module.my_obj.foo.bar() can be mapped as follows:
module=my_module, attr='my_obj', dotted_path='foo.bar',
inferrer=my_inferer
"""
value = getattr(module, attr)
self.register_unbound_dotted_value(value, dotted_path, inferer, **kwds)
def register_unbound_dotted_value(self, value, dotted_path,
inferer, **kwds):
if self.is_registered((value, dotted_path)):
raise ValueAlreadyRegistered((value, inferer))
self.register_value((value, dotted_path), inferer, **kwds)
def get_inferer(self, value, func_type=None):
return self.value_to_inferer[value]
def lookup_module_attribute(self, value):
"Return the module (or None) to which a registered value belongs"
if self.is_registered(value) and value in self.value_to_module:
return self.value_to_module[value]
module_registry = ModuleTypeInfererRegistry()
#----------------------------------------------------------------------------
# Dispatch Functions for the Type Inferencer
#----------------------------------------------------------------------------
def module_attribute_type(obj):
"""
See if the object is registered to any module which might handle
type inference on the object.
"""
result = module_registry.lookup_module_attribute(obj)
if result is not None:
module, attr = result
return typesystem.module_attribute(module=module, attr=attr)
return None
def parse_args(call_node, arg_names):
"""
Parse positional and keyword arguments.
"""
result = dict.fromkeys(arg_names)
# parse positional arguments
i = 0
for i, (arg_name, arg) in enumerate(zip(arg_names, call_node.args)):
result[arg_name] = arg
arg_names = arg_names[i:]
if arg_names:
# parse keyword arguments
for keyword in call_node.keywords:
if keyword.arg in result:
result[keyword.arg] = keyword.value
return result
def _build_arg(pass_in_types, node):
if pass_in_types:
return get_type(node)
return node
def dispatch_on_value(context, call_node, func_type): # TODO: Pass in typesystem here
"""
Dispatch a call of a module attribute by value.
For instance, a method
def empty(shape, dtype, order):
...
would be called with those arguments. Parameters not present as
arguments in user code are None.
Returns the result type, or None
"""
inferer, flags = get_inferer(func_type.value)
# Detect needed arguments
argspec = inspect.getargspec(inferer)
# Pass in additional arguments (context and call_node)
argnames = argspec.args
if argnames and argnames[0] in ("context", "typesystem"):
argnames.pop(0)
# TODO: Remove this and reference in mathmodule.infer_unary_math_call
context.env.crnt.typesystem.env = context.env
args = [context.env.crnt.typesystem]
else:
args = []
if flags['pass_in_callnode']:
argnames.pop(0)
args.append(call_node)
# Parse argument names from introspection
method_kwargs = parse_args(call_node, argnames)
# Build keyword arguments
for argname, node in method_kwargs.iteritems():
if node is not None:
method_kwargs[argname] = _build_arg(flags['pass_in_types'], node)
if argspec.varargs and len(argnames) < len(call_node.args):
# In the case of *args, build positional list and pass any additional
# arguments as keywords
extra_args = call_node.args[len(argnames):]
args.extend(method_kwargs.pop(argname) for argname in argnames)
args.extend(_build_arg(flags['pass_in_types'], arg) for arg in extra_args)
if argspec.keywords:
# Handle **kwargs
for keyword in call_node.keywords:
if keyword.arg not in argnames:
method_kwargs[keyword.arg] = keyword.value
return inferer(*args, **method_kwargs)
def resolve_call(context, call_node, obj_call_node, func_type):
"""
Find the right type inferrer function for a call to an attribute
of a certain module.
call_node: the original ast.Call node that we need to resolve
the type for
obj_call_node: the nodes.ObjectCallNode that would replace the
ast.Call unless we override that with another node.
func_type: module_attribute
|__________> module: Python module
|__________> attr: Attribute name
|__________> value: Attribute value
Returns a new AST node that should replace the ast.Call node.
"""
result = dispatch_on_value(context, call_node, func_type)
if result is not None and not isinstance(result, ast.AST):
assert isinstance(result, Type), (Type, result)
type = result
result = obj_call_node
# result.variable = symtab.Variable(type)
result = nodes.CoercionNode(result, type)
return result
def resolve_call_or_none(context, call_node, func_type):
if (func_type.is_known_value and
is_registered(func_type.value)):
# Try the module type inferers
new_node = nodes.call_obj(call_node, None)
return resolve_call(context, call_node, new_node, func_type)
def can_handle_deferred(py_func):
"Return whether the type function can handle deferred argument types"
inferer, flags = get_inferer(py_func)
return flags['can_handle_deferred_types']
#----------------------------------------------------------------------------
# User-exposed functions to register type functions
#----------------------------------------------------------------------------
is_registered = module_registry.is_registered
register_inferer = module_registry.register_inferer
register_value = module_registry.register_value
get_inferer = module_registry.get_inferer
register_unbound = module_registry.register_unbound_method
def register(module, **kws):
"""
@register(module)
def my_type_function(arg1, ..., argN):
...
"""
def decorator(inferer):
register_inferer(module, inferer.__name__, inferer, **kws)
return inferer
return decorator
def register_callable(signature):
"""
signature := function | typeset(signature *)
@register_callable(signature)
def my_function(...):
...
"""
assert isinstance(signature, (typeset.typeset, Type))
# convert void return type to object_ (None)
def convert_void_to_object(sig):
if sig.return_type == void:
sig = sig.add('return_type', object_)
return sig
if isinstance(signature, typeset.typeset):
signature = typeset.typeset([convert_void_to_object(x)
for x in signature.types],
name=signature.name)
else:
assert isinstance(signature, Type)
signature = convert_void_to_object(signature)
def decorator(function):
def infer(typesystem, *args):
if signature.is_typeset:
specialization = signature.find_match(typesystem.promote, args)
else:
specialization = typeset.match(typesystem.promote, signature, args)
if specialization is None:
raise UnmatchedTypeError(
"Unmatched argument types for function '%s': %s" %
(function.__name__, args))
assert specialization.is_function
return specialization.return_type
register_value(function, infer)
return function
return decorator
#----------------------------------------------------------------------------
# Registry of internal Type Functions
#----------------------------------------------------------------------------
# Register type inferrer functions
from numba.type_inference.modules import (numbamodule,
numpymodule,
numpyufuncs,
builtinmodule,
mathmodule)
########NEW FILE########
__FILENAME__ = test_extension_type_inference
from numba import *
@jit # Test compiling this class
class Test(object):
@void(double,double)
def __init__(self, a, b):
self.a = a
self.b = b
for i in range(self.a, self.b):
pass
self.i = i
if __name__ == "__main__":
pass
########NEW FILE########
__FILENAME__ = test_typesets
import numba
from numba import *
from numba.typesystem import typeset, promote
from numba.environment import NumbaEnvironment
def s(type):
return type(type, type)
def typeset_matching():
numeric = typeset.typeset([int_, longlong])
n = numeric(numeric, numeric)
f = numba.floating(numba.floating, numba.floating)
signatures = [n, f, object_(object_, object_)]
ts = typeset.typeset(signatures)
assert ts.find_match(promote, [float_, float_]) == s(float_)
assert ts.find_match(promote, [float_, double]) == s(double)
# assert ts.find_match(promote, [longdouble, float_]) == s(longdouble)
assert ts.find_match(promote, [int_, int_]) == s(int_)
# assert ts.find_match(promote, [int_, longlong]) == s(longlong)
# assert ts.find_match(promote, [short, int_]) == s(int_)
# np.result_type(np.short, np.ulonglong) -> np.float64
# assert ts.find_match(promote, [short, ulonglong]) is None
if __name__ == '__main__':
typeset_matching()
########NEW FILE########
__FILENAME__ = test_type_inference
#! /usr/bin/env python
# ______________________________________________________________________
'''test_type_inference
Test type inference.
'''
# ______________________________________________________________________
from numba import *
from numba import typesystem
from numba import decorators
import unittest
import numpy
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
# ______________________________________________________________________
def _simple_func(arg):
if arg > 0.:
result = 22.
else:
result = 42.
return result
simple_func = decorators.autojit(backend='ast')(_simple_func)
def _for_loop(start, stop, inc):
acc = 0
for value in range(start, stop, inc):
acc += value
print(value)
return acc
for_loop = decorators.autojit(backend='ast')(_for_loop)
def arange():
a = numpy.arange(10)
b = numpy.arange(10, dtype=numpy.double)
return a, b
def empty_like(a):
b = numpy.empty_like(a)
c = numpy.zeros_like(a, dtype=numpy.int32)
d = numpy.ones_like(a)
dtype = np.float32
def _empty(N):
# default dtype
a1 = numpy.empty(N)
a2 = numpy.empty((N,))
a3 = numpy.empty([N])
# Given dtype
a4 = numpy.empty(N, dtype)
a5 = numpy.empty(N, dtype=dtype)
a6 = numpy.empty(N, np.float32)
a7 = numpy.empty(N, dtype=np.float32)
# Test dimensionality
a8 = np.empty((N, N), dtype=np.int64)
def _empty_arg(N, empty, zeros, ones):
a1 = empty([N])
a2 = zeros([N])
a3 = ones([N])
@autojit
def assert_array_dtype(A, value, empty, zeros, ones):
if value < 2:
A = empty([A.shape[0], A.shape[1]], dtype=A.dtype)
elif value < 4:
A = zeros([A.shape[0], A.shape[1]], dtype=A.dtype)
elif value < 6:
A = ones([A.shape[0], A.shape[1]], dtype=A.dtype)
else:
pass
# 'A' must have a consistent array type here
return A
def slicing(a):
n = numpy.newaxis
# 0D
b = a[0]
c = a[9]
# 1D
d = a[:]
e = a[...]
# 2D
f = a[n, ...]
g = a[numpy.newaxis, :]
h = a[..., numpy.newaxis]
i = a[:, n]
j = a[n, numpy.newaxis, 0]
k = a[numpy.newaxis, 0, n]
l = a[0, n, n]
def none_newaxis(a):
n = None
# 2D
f = a[None, ...]
#g = a[n, :]
h = a[..., None]
#i = a[:, n]
#j = a[n, None, 0]
k = a[None, 0, numpy.newaxis]
l = a[0, n, numpy.newaxis]
def func_with_signature(a):
if a > 1:
return float(a)
elif a < 5:
return int(a)
elif a > 10:
return object()
return a + 1j
def arg_rebind(a):
a = 0
a = 0.0
a = "hello"
# ______________________________________________________________________
from numba.control_flow.tests.test_cfg_type_infer import infer, functype
class TestTypeInference(unittest.TestCase):
def test_simple_func(self):
self.assertEqual(simple_func(-1.), 42.)
self.assertEqual(simple_func(1.), 22.)
def test_simple_for(self):
self.assertEqual(for_loop(0, 10, 1), 45)
def test_type_infer_simple_func(self):
sig, symtab = infer(_simple_func, functype(None, [double]))
self.assertEqual(sig.return_type, double)
# TODO: Re-enable once we flesh out the exact type promotion rules
# def test_type_infer_for_loop(self):
# sig, symtab = infer(_for_loop, functype(None, [int_, int_, int_]))
# self.assertTrue(symtab['acc'].type.is_int)
# self.assertEqual(symtab['value'].type, Py_ssize_t)
# self.assertEqual(sig.return_type, Py_ssize_t)
def test_type_infer_arange(self):
sig, symtab = infer(arange, functype())
self.assertEqual(symtab['a'].type, npy_intp[:])
self.assertEqual(symtab['b'].type, double[:])
def test_empty_like(self):
sig, symtab = infer(empty_like, functype(None, [double[:]]))
self.assertEqual(symtab['b'].type, double[:])
self.assertEqual(symtab['c'].type, int32[:])
self.assertEqual(symtab['d'].type, double[:])
def test_empty(self):
sig, symtab = infer(_empty, functype(None, [int_]))
for i in range(1, 4):
self.assertEqual(symtab['a%d' % i].type, double[:])
for i in range(4, 8):
self.assertEqual(symtab['a%d' % i].type, float_[:])
self.assertEqual(symtab['a8'].type, int64[:, :])
def test_empty_arg(self):
from numba import typesystem as nt
empty_t = nt.module_attribute(module=np, attr='empty')
zeros_t = nt.module_attribute(module=np, attr='zeros')
ones_t = nt.module_attribute(module=np, attr='ones')
sig, symtab = infer(_empty_arg, functype(None, [int_, empty_t,
zeros_t, ones_t]))
for i in range(1, 4):
self.assertEqual(symtab['a%d' % i].type, double[:])
def test_dtype_attribute(self):
A = np.empty((10, 10), dtype=np.float32)
A_result = assert_array_dtype(A, 3, np.empty, np.zeros, np.ones)
assert np.all(A_result == 0)
A_result = assert_array_dtype(A, 5, np.empty, np.zeros, np.ones)
assert np.all(A_result == 1)
def test_slicing(self):
sig, symtab = infer(slicing, functype(None, [double[:]]))
self.assertEqual(symtab['n'].type, typesystem.newaxis)
self.assertEqual(symtab['b'].type, double)
self.assertEqual(symtab['c'].type, double)
self.assertEqual(symtab['d'].type, double[:])
self.assertEqual(symtab['e'].type, double[:])
self.assertEqual(symtab['f'].type, double[:, :])
self.assertEqual(symtab['g'].type, double[:, :])
self.assertEqual(symtab['h'].type, double[:, :])
self.assertEqual(symtab['i'].type, double[:, :])
self.assertEqual(symtab['j'].type, double[:, :])
self.assertEqual(symtab['k'].type, double[:, :])
self.assertEqual(symtab['l'].type, double[:, :])
def test_none_newaxis(self):
sig, symtab = infer(none_newaxis, functype(None, [double[:]]))
self.assertEqual(symtab['f'].type, double[:, :])
#self.assertEqual(symtab['g'].type, double[:, :])
self.assertEqual(symtab['h'].type, double[:, :])
#self.assertEqual(symtab['i'].type, double[:, :])
#self.assertEqual(symtab['j'].type, double[:, :, :])
self.assertEqual(symtab['k'].type, double[:, :])
self.assertEqual(symtab['l'].type, double[:, :])
def test_return_type(self):
sig, symtab = infer(func_with_signature, functype(int_, [int_]))
assert sig == int_(int_)
sig, symtab = infer(func_with_signature, functype(int_, [float_]))
assert sig == int_(float_)
sig, symtab = infer(func_with_signature, functype(float_, [int_]))
assert sig == float_(int_)
def test_rebind_arg(self):
sig, symtab = infer(arg_rebind, functype(int_, [int_]),
allow_rebind_args=True)
assert sig == int_(int_)
assert symtab['a'].type == c_string_type
# try:
# sig, symtab = infer(arg_rebind, functype(int_, [int_]),
# allow_rebind_args=False)
# except minierror.UnpromotableTypeError as e:
# msg = str(sorted(e.args, key=str))
# self.assertEqual("[(double, const char *)]", msg)
# else:
# raise Exception("Expected an unpromotable type error")
# ______________________________________________________________________
if __name__ == "__main__":
# TestTypeInference('test_dtype_attribute').debug()
unittest.main()
########NEW FILE########
__FILENAME__ = test_user_type_inference
from numba import *
from numba import register, register_callable, typeof, typeset
#----------------------------------------------------------------------------
# Type functions
#----------------------------------------------------------------------------
@register_callable(int32(double))
def func(arg):
return int(arg)
@register_callable(typeset([int16(float_), int32(double), numeric(numeric)]))
def func_typeset_simple(arg):
return int(arg)
@register_callable(numeric(numeric, numeric))
def func_typeset_binding(arg1, arg2):
return int(arg1) + int(arg2)
#----------------------------------------------------------------------------
# Use of type functions
#----------------------------------------------------------------------------
@autojit
def use_user_type_function():
return typeof(func(10.0))
@autojit
def use_typeset_function_simple():
return typeof(func_typeset_simple(10.0))
@autojit
def use_typeset_function_binding(type1, type2):
return typeof(func_typeset_binding(type1(10.0), type2(12.0)))
#----------------------------------------------------------------------------
# Test functions
#----------------------------------------------------------------------------
def test_register_callable():
assert use_user_type_function() == int32
assert use_typeset_function_simple() == int32
assert use_typeset_function_binding(double, double) == double
assert use_typeset_function_binding(float_, double) == double
assert use_typeset_function_binding(int_, float_) == float_
assert use_typeset_function_binding(int_, long_).itemsize == long_.itemsize
if __name__ == '__main__':
test_register_callable()
########NEW FILE########
__FILENAME__ = ufunc_builder
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
import types
from numba import visitors, nodes, error, functions, traits
class UFuncBuilder(object):
"""
Create a Python ufunc AST function. Demote the types of arrays to scalars
in the ufunc and generate a return.
"""
ufunc_counter = 0
def __init__(self):
self.operands = []
def register_operand(self, node):
"""
Register a sub-expression as something that will be evaluated
outside the kernel, and the result of which will be passed into the
kernel. This can be a variable name:
a + b
->
f(arg1, arg2):
return arg1 + arg2
For which 'a' and 'b' are the operands.
"""
result = ast.Name(id='op%d' % len(self.operands), ctx=ast.Load())
self.operands.append(node)
return result
def build_ufunc_ast(self, tree):
args = [ast.Name(id='op%d' % i, ctx=ast.Param())
for i, op in enumerate(self.operands)]
arguments = ast.arguments(args=args,
vararg=None,
kwarg=None,
defaults=[],
)
body = ast.Return(value=tree)
func = ast.FunctionDef(name='ufunc%d' % self.ufunc_counter,
args=arguments, body=[body], decorator_list=[])
UFuncBuilder.ufunc_counter += 1
# print ast.dump(func)
return func
def compile_to_pyfunc(self, ufunc_ast, globals=()):
"Compile the ufunc ast to a function"
# Build ufunc AST module
module = ast.Module(body=[ufunc_ast])
functions.fix_ast_lineno(module)
# Create Python ufunc function
d = dict(globals)
exec(compile(module, '<ast>', 'exec'), d, d)
d.pop('__builtins__')
py_ufunc = d[ufunc_ast.name]
assert isinstance(py_ufunc, types.FunctionType), py_ufunc
return py_ufunc
def save(self):
"""
Save the state of the builder to allow processing other parts of
the tree.
"""
state = self.operands
self.operands = []
return state
def restore(self, state):
"Restore saved state"
self.operands = state
@traits.traits
class UFuncConverter(ast.NodeTransformer):
"""
Convert a Python array expression AST to a scalar ufunc kernel by demoting
array types to scalar types.
"""
build_ufunc_ast = traits.Delegate('ufunc_builder')
operands = traits.Delegate('ufunc_builder')
def __init__(self, env):
self.ufunc_builder = UFuncBuilder()
self.env = env
def demote_type(self, node):
node.type = self.demote(node.type)
if hasattr(node, 'variable'):
node.variable.type = node.type
def demote(self, type):
if type.is_array:
return type.dtype
return type
def visit_BinOp(self, node):
if node.type.is_array:
self.demote_type(node)
node.left = self.visit(node.left)
node.right = self.visit(node.right)
else:
node = self.generic_visit(node)
return node
def visit_UnaryOp(self, node):
self.demote_type(node)
node.operand = self.visit(node.operand)
return node
def visit_Call(self, node):
if nodes.query(self.env, node, "is_math") and node.type.is_array:
self.demote_type(node)
node.args = list(map(self.visit_scalar_or_array, node.args))
else:
node = self.generic_visit(node)
return node
def visit_CoercionNode(self, node):
return self.visit(node.node)
def _generic_visit(self, node):
super(UFuncBuilder, self).generic_visit(node)
def visit_scalar_or_array(self, node):
if node.type.is_array:
node = self.visit(node)
self.demote_type(node)
return node
else:
return self.generic_visit(node)
def generic_visit(self, node):
"""
Register Name etc as operands to the ufunc
"""
result = self.ufunc_builder.register_operand(node)
result.type = node.type
self.demote_type(result)
return result
########NEW FILE########
__FILENAME__ = library
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm.core
def declare(numba_cdef, env, global_module):
"""
Declare a NumbaCDefinition in the current translation environment.
"""
# print numba_cdef
specialized_cdef = numba_cdef(env, global_module)
lfunc = specialized_cdef.define(global_module) #, optimize=False)
assert lfunc.module is global_module
return specialized_cdef, lfunc
registered_utilities = []
def register(utility):
registered_utilities.append(utility)
return utility
def load_utilities():
from . import library
from . import numbacdef
from . import refcounting
class CBuilderLibrary(object):
"""
Library of cbuilder functions.
"""
def __init__(self):
self.module = llvm.core.Module.new("cbuilderlib")
self.funcs = {}
def declare_registered(self, env):
"Declare all utilities in our module"
load_utilities()
for registered_utility in registered_utilities:
self.declare(registered_utility, env, self.module)
def declare(self, numba_cdef, env, llvm_module):
if numba_cdef not in self.funcs:
specialized_cdef, lfunc = declare(numba_cdef, env, self.module)
self.funcs[numba_cdef] = specialized_cdef, lfunc
else:
specialized_cdef, lfunc = self.funcs[numba_cdef]
name = numba_cdef._name_
lfunc_type = specialized_cdef.signature()
lfunc = llvm_module.get_or_insert_function(lfunc_type, name)
return lfunc
def link(self, llvm_module):
"""
Link the CBuilder library into the target module.
"""
llvm_module.link_in(self.module, preserve=True)
########NEW FILE########
__FILENAME__ = numbacdef
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import llvm.core
from llvm_cbuilder import builder
from_numba = builder.CStruct.from_numba_struct
class NumbaCDefinition(builder.CDefinition):
"""
Numba utility simplifying dealing with llvm_cbuilder.
"""
def __init__(self, env, llvm_module):
# Environments
self.env = env
self.func_env = env.translation.crnt
self.context = env.context
self.llvm_module = llvm_module
self.set_signature(self.env, self.context)
type(self)._name_ = type(self).__name__
super(NumbaCDefinition, self).__init__()
#------------------------------------------------------------------------
# Convenience Methods
#------------------------------------------------------------------------
def external_cfunc(self, func_name):
"Get a CFunc from an external function"
signature, lfunc = self.env.context.external_library.declare(
self.llvm_module,
func_name)
assert lfunc.module is self.llvm_module
return builder.CFunc(self, lfunc)
def cbuilder_cfunc(self, numba_cdef):
"Get a CFunc from a NumbaCDefinition"
lfunc = self.env.context.cbuilder_library.declare(numba_cdef, self.env,
self.llvm_module)
assert lfunc.module is self.llvm_module
return builder.CFunc(self, lfunc)
#------------------------------------------------------------------------
# CDefinition stuff
#------------------------------------------------------------------------
def signature(self):
argtypes = [type for name, type in self._argtys_]
return llvm.core.Type.function(self._retty_, argtypes)
def __call__(self, module):
lfunc = super(NumbaCDefinition, self).__call__(module)
# lfunc.linkage = llvm.core.LINKAGE_LINKONCE_ODR
return lfunc
def set_signature(self, env, context):
"""
Set the cbuilder signature through _argtys_ and optionally the
_retty_ attributes.
"""
########NEW FILE########
__FILENAME__ = refcounting
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
from numba import llvm_types
from numba import typedefs
from numba.utility.cbuilder.library import register
from numba.utility.cbuilder.numbacdef import NumbaCDefinition, from_numba
from llvm_cbuilder import shortnames
#------------------------------------------------------------------------
# Utilities
#------------------------------------------------------------------------
p_py_ssize_t = shortnames.pointer(shortnames.py_ssize_t)
def ob_refcnt(obj_p):
return deref(p_refcnt(obj_p))
def p_refcnt(obj_p):
return obj_p.cast(p_py_ssize_t)
def deref(obj_p):
return obj_p[0]
def const(ctemp, val):
return ctemp.parent.constant(shortnames.py_ssize_t, val)
def add_refcnt(obj_p, refcnt):
refcnt = const(obj_p, refcnt)
refs = ob_refcnt(obj_p)
refs += refcnt
def not_null(ptr):
return ptr.cast(shortnames.py_ssize_t) != const(ptr, 0)
#------------------------------------------------------------------------
# Base Refcount Class
#------------------------------------------------------------------------
# TODO: Support tracing refcount operations (debug mode)
# TODO: Support refcount error checking (testing/debug mode)
class Refcounter(NumbaCDefinition):
def set_signature(self, env, context):
PyObject = typedefs.PyObject_HEAD
self._argtys_ = [
('obj', PyObject.pointer().to_llvm(context)),
]
self._retty_ = shortnames.void
#------------------------------------------------------------------------
# Refcount Implementations
#------------------------------------------------------------------------
@register
class Py_INCREF(Refcounter):
"LLVM inline version of Py_INCREF"
def body(self, obj):
add_refcnt(obj, 1)
self.ret()
@register
class Py_DECREF(Refcounter):
"LLVM inline version of Py_DECREF"
def body(self, obj):
refcnt = ob_refcnt(obj)
one = self.constant(refcnt.type, 1)
Py_DecRef = self.external_cfunc('Py_DecRef')
with self.ifelse(refcnt > one) as ifelse:
with ifelse.then():
# ob_refcnt > 1, just decrement
add_refcnt(obj, -1)
with ifelse.otherwise():
# ob_refcnt == 1, dealloc
Py_DecRef(obj)
self.ret()
@register
class Py_XINCREF(Refcounter):
"LLVM inline version of Py_XINCREF"
def body(self, obj):
with self.ifelse(not_null(obj)) as ifelse:
with ifelse.then():
add_refcnt(obj, 1)
self.ret()
@register
class Py_XDECREF(Refcounter):
"LLVM inline version of Py_XDECREF"
def body(self, obj):
py_decref = self.cbuilder_cfunc(Py_DECREF)
with self.ifelse(not_null(obj)) as ifelse:
with ifelse.then():
py_decref(obj)
self.ret()
########NEW FILE########
__FILENAME__ = math_utilities
import numba as nb
def py_modulo(restype, argtypes):
if restype.is_float:
def py_modulo(a, n):
r = rem(a, n)
if (r != 0) and (r < 0) ^ (n < 0):
r += n
return r
instr = 'frem'
else:
assert restype.is_int
def py_modulo(a, n):
r = rem(a, n)
if r != 0 and (r ^ n) < 0:
r += n
return r
if restype.is_unsigned:
instr = 'urem'
else:
instr = 'srem'
rem = nb.declare_instruction(restype(restype, restype), instr)
return nb.jit(restype(*argtypes))(py_modulo)
########NEW FILE########
__FILENAME__ = virtuallookup
# -*- coding: utf-8 -*-
"""
Virtual method lookup written in Numba.
"""
from __future__ import division, absolute_import
import ctypes.util
import numba
from numba import *
from numba.exttypes import virtual
table_t = virtual.PyCustomSlots_Table
table_t_pp = table_t.pointer().pointer()
char_p = char.pointer()
void_p = void.pointer()
uint16_p = uint16.pointer()
# This is a bad idea!
# displacements_offset = table_t.offsetof('d')
displacements_offset = ctypes.sizeof(table_t.to_ctypes())
#------------------------------------------------------------------------
# Some libc functions
#------------------------------------------------------------------------
libc = ctypes.CDLL(ctypes.util.find_library('c'))
abort = libc.abort
abort.restype = None
abort.argtypes = []
printf = libc.printf
printf.restype = ctypes.c_int
printf.argtypes = [ctypes.c_char_p]
@jit(void_p(table_t_pp, uint64), wrap=False, nopython=True)
def lookup_method(table_pp, prehash):
"""
Look up a method in a PyCustomSlots_Table ** given a prehash.
PyCustomSlots *vtab = atomic_load(table_pp)
f = (prehash >> vtab->r) & vtab->m_f
g = vtab->d[prehash & vtab->m_g]
PyCustomSlot_Entry *entry = vtab->entries[f ^ g]
if (entry->id == prehash) {
void *vmethod = entry.ptr
call vmethod(obj, ...)
} else {
PyObject_Call(obj, "method", ...)
}
Note how the object stores a vtable **, instead of a vtable *. This
indirection allows producers of the table to generate a new table
(e.g. after adding a specialization) and replace the table for all
live objects.
We then atomically load the table, to allow using the table without
the GIL (and having the GIL holding thread update the table and rewrite
the pointer).
Hence the table pointer should *not* be cached by callers, since the
first table miss can trigger a compilation you will want to find the
second time around.
"""
table_p = table_pp[0]
table = table_p[0]
displacements = uint16_p(char_p(table_p) + displacements_offset)
# Compute f
f = (prehash >> table.r) & table.m_f
# Compute g
# g = table.d[prehash & table.m_g]
g = displacements[prehash & table.m_g]
entry = table.entries[f ^ g]
if entry.id == prehash:
return entry.ptr
else:
return numba.NULL
@jit(void_p(table_t_pp, uint64, char_p), wrap=False, nopython=True)
def lookup_and_assert_method(table_pp, prehash, method_name):
result = lookup_method(table_pp, prehash)
if result == numba.NULL:
# printf("Error: expected method %s to be available\n", method_name)
# print "Error: expected method", method_name, "to be available"
printf("NumbaError: expected method ")
printf(method_name)
printf(" to be available\n")
abort()
return result
########NEW FILE########
__FILENAME__ = utils
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys
import opcode
import ast
import pprint
try:
import __builtin__ as builtins
except ImportError:
import builtins
import numba
from .minivect.complex_support import Complex64, Complex128, Complex256
from .minivect import miniast, minitypes
def is_builtin(name):
return hasattr(builtins, name)
def itercode(code):
"""Return a generator of byte-offset, opcode, and argument
from a byte-code-string
"""
i = 0
extended_arg = 0
n = len(code)
while i < n:
c = code[i]
num = i
op = ord(c)
i = i + 1
oparg = None
if op >= opcode.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i = i + 2
if op == opcode.EXTENDED_ARG:
extended_arg = oparg * 65536
delta = yield num, op, oparg
if delta is not None:
abs_rel, dst = delta
assert abs_rel == 'abs' or abs_rel == 'rel'
i = dst if abs_rel == 'abs' else i + dst
def debugout(*args):
'''This is a magic function. If you use it in compiled functions,
Numba should generate code for displaying the received value.'''
if __debug__:
print(("debugout (non-translated): %s" % (''.join((str(arg)
for arg in args)),)))
def process_signature(sigstr, name=None):
'''
Given a signature string consisting of a return type, argument
types, and possibly a function name, return a signature object.
'''
sigstr = sigstr.replace('*', '.pointer()')
parts = sigstr.split()
types_dict = dict(numba.__dict__, d=numba.double, i=numba.int_)
loc = {}
# FIXME: Need something more robust to differentiate between
# name ret(arg1,arg2)
# and ret(arg1, arg2) or ret ( arg1, arg2 )
if len(parts) < 2 or '(' in parts[0] or '[' in parts[0] or '('==parts[1][0]:
signature = eval(sigstr, loc, types_dict)
else: # Signature has a name
signature = eval(' '.join(parts[1:]), loc, types_dict)
signature = signature.add('name', parts[0])
if name is not None:
signature = signature.add('name', name)
return signature
def process_sig(sigstr, name=None):
signature = process_signature(sigstr, name)
return signature.name, signature.return_type, signature.args
class NumbaContext(miniast.LLVMContext):
# debug = True
# debug_elements = True
# Accept dynamic arguments
astbuilder_cls = miniast.DynamicArgumentASTBuilder
shape_type = minitypes.npy_intp.pointer()
strides_type = shape_type
optimize_broadcasting = False
def init(self):
self.astbuilder = self.astbuilder_cls(self)
self.typemapper = None
def is_object(self, type):
return super(NumbaContext, self).is_object(type) or type.is_array
# def promote_types(self, *args, **kwargs):
# return self.typemapper.promote_types(*args, **kwargs)
def get_minivect_context():
return NumbaContext()
context = get_minivect_context()
def ast2tree (node, include_attrs = True):
'''Transform a Python AST object into nested tuples and lists.'''
def _transform(node):
if isinstance(node, ast.AST):
fields = ((a, _transform(b))
for a, b in ast.iter_fields(node))
if include_attrs:
attrs = ((a, _transform(getattr(node, a)))
for a in node._attributes
if hasattr(node, a))
return (node.__class__.__name__, dict(fields), dict(attrs))
return (node.__class__.__name__, dict(fields))
elif isinstance(node, list):
return [_transform(x) for x in node]
return node
if not isinstance(node, ast.AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _transform(node)
def tree2ast(node, namespace):
'''Given an AST represented as tuples and lists, attempt to
reconstruct the AST object, given a namespace that defines the
node constructors.'''
def _construct(node):
if isinstance(node, tuple):
node_len = len(node)
if node_len in (2, 3) and hasattr(namespace, node[0]):
ctor = getattr(namespace, node[0])
assert dict == type(node[1])
kwargs = dict((k, _construct(v))
for k, v in node[1].items())
if node_len == 3:
kwargs.update((k, _construct(v))
for k, v in node[2].items())
try:
node = ctor(**kwargs)
except Exception as exn:
raise Exception('Could not construct %s given %r: %r' %
(node[0], kwargs, exn))
else:
node = tuple(_construct(x) for x in node)
elif isinstance(node, list):
node = [_construct(x) for x in node]
return node
return _construct(node)
def pformat_ast (node, include_attrs = True, **kws):
'''Transform a Python AST object into nested tuples and lists, and
return as a string formatted using pprint.pformat().'''
return pprint.pformat(ast2tree(node, include_attrs), **kws)
def dump(node, *args, **kws):
'''Transform a Python AST object into nested tuples and lists, and
pretty-print the result.'''
print((pformat_ast(node, *args, **kws)))
class TypedProperty(object):
'''Defines a class property that does a type check in the setter.'''
def __new__(cls, ty, doc, default=None):
rv = super(TypedProperty, cls).__new__(cls)
cls.__init__(rv, ty, doc, default)
return property(rv.getter, rv.setter, rv.deleter, doc)
def __init__(self, ty, doc, default=None):
self.propname = '_numba_property_%d' % (id(self),)
self.default = default
self.ty = ty
self.doc = doc
def getter(self, obj):
return getattr(obj, self.propname, self.default)
def setter(self, obj, new_val):
if not isinstance(new_val, self.ty):
raise ValueError(
'Invalid property setting, expected instance of type(s) %r '
'(got %r).' % (self.ty, type(new_val)))
setattr(obj, self.propname, new_val)
def deleter(self, obj):
delattr(obj, self.propname)
class WriteOnceTypedProperty(TypedProperty):
def __init__(self, ty, doc, default=None):
super(WriteOnceTypedProperty, self).__init__(ty, doc, default)
def setter(self, obj, *args, **kws):
assert not hasattr(obj, self.propname)
return super(WriteOnceTypedProperty, self).setter(obj, *args, **kws)
#------------------------------------------------------------------------
# File Utilities
#------------------------------------------------------------------------
# file name encodings (function copied from Cython)
def decode_filename(filename):
if isinstance(filename, unicode):
return filename
try:
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
filename = filename.decode(filename_encoding)
except UnicodeDecodeError:
pass
return filename
#------------------------------------------------------------------------
# General Purpose
#------------------------------------------------------------------------
def hashable(x):
try:
hash(x)
except TypeError:
return False
else:
return True
########NEW FILE########
__FILENAME__ = validate
# -*- coding: utf-8 -*-
"""
Initial AST validation and normalization.
"""
from __future__ import print_function, division, absolute_import
import ast
from numba import error, nodes
class ValidateAST(ast.NodeVisitor):
"Validate AST"
#------------------------------------------------------------------------
# Validation
#------------------------------------------------------------------------
def visit_GeneratorExp(self, node):
raise error.NumbaError(
node, "Generator comprehensions are not yet supported")
def visit_SetComp(self, node):
raise error.NumbaError(
node, "Set comprehensions are not yet supported")
def visit_DictComp(self, node):
raise error.NumbaError(
node, "Dict comprehensions are not yet supported")
# def visit_Raise(self, node):
# if node.tback:
# raise error.NumbaError(
# node, "Traceback argument to raise not supported")
def visit_For(self, node):
if not isinstance(node.target, (ast.Name, ast.Attribute,
nodes.TempStoreNode)):
raise error.NumbaError(
node.target, "Only a single target iteration variable is "
"supported at the moment")
self.generic_visit(node)
def visit_With(self, node):
self.visit(node.context_expr)
if node.optional_vars:
raise error.NumbaError(
node.context_expr,
"Only 'with python' and 'with nopython' is "
"supported at this moment")
self.generic_visit(node)
########NEW FILE########
__FILENAME__ = visitors
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import ast
import ast as ast_module
try:
import __builtin__ as builtins
except ImportError:
import builtins
import types
from numba import functions, PY3
from numba import nodes
from numba.nodes.metadata import annotate, query
from numba.typesystem.promotion import have_properties
try:
import numbers
except ImportError:
# pre-2.6
numbers = None
from numba import error, PY3
import logging
logger = logging.getLogger(__name__)
class CooperativeBase(object):
def __init__(self, *args, **kwargs):
pass
def _flatmap(func, sequence):
result = []
for elem in sequence:
res = func(elem)
if res is not None:
if isinstance(res, list):
result.extend(res)
else:
result.append(res)
return result
class NumbaVisitorMixin(CooperativeBase):
_overloads = None
func_level = 0
def __init__(self, context, func, ast, locals=None,
func_signature=None, nopython=0,
symtab=None, **kwargs):
assert locals is not None
super(NumbaVisitorMixin, self).__init__(
context, func, ast, func_signature=func_signature,
nopython=nopython, symtab=symtab, **kwargs)
self.env = kwargs.get('env', None)
self.context = context
self.ast = ast
self.function_cache = context.function_cache
self.symtab = symtab
self.func_signature = func_signature
self.nopython = nopython
self.llvm_module = kwargs.pop('llvm_module', None)
self.locals = locals
#self.local_scopes = [self.symtab]
self.current_scope = symtab
self.have_cfg = getattr(self.ast, 'flow', False)
self.closures = kwargs.get('closures')
self.is_closure = kwargs.get('is_closure', False)
self.kwargs = kwargs
if self.have_cfg:
self.flow_block = self.ast.flow.blocks[1]
else:
self.flow_block = None
self.func = func
if not self.valid_locals(func):
assert isinstance(ast, ast_module.FunctionDef)
locals, cellvars, freevars = determine_variable_status(self.env, ast,
self.locals)
self.names = self.global_names = freevars
self.argnames = tuple(name.id for name in ast.args.args)
argnames = set(self.argnames)
local_names = [local_name for local_name in locals
if local_name not in argnames]
self.varnames = self.local_names = list(self.argnames) + local_names
self.cellvars = cellvars
self.freevars = freevars
else:
f_code = self.func.__code__
self.names = self.global_names = f_code.co_names
self.varnames = self.local_names = list(f_code.co_varnames)
if PY3:
def recurse_co_consts(fco):
for _var in fco.co_consts:
if not isinstance(_var, types.CodeType):
continue
self.varnames.extend((_name for _name in _var.co_varnames
if not _name.startswith('.')))
recurse_co_consts(_var)
recurse_co_consts(f_code)
self.argnames = tuple(self.varnames[:f_code.co_argcount])
if f_code.co_cellvars:
self.varnames.extend(
cellvar for cellvar in f_code.co_cellvars
if cellvar not in self.varnames)
self.cellvars = set(f_code.co_cellvars)
self.freevars = set(f_code.co_freevars)
if func is None:
self.func_globals = kwargs.get('func_globals', None) or {}
self.module_name = self.func_globals.get("__name__", "")
else:
self.func_globals = func.__globals__
self.module_name = self.func.__module__
# Add variables declared in locals=dict(...)
self.local_names.extend(
local_name for local_name in self.locals
if local_name not in self.local_names)
if self.is_closure_signature(func_signature) and func is not None:
# If a closure is backed up by an actual Python function, the
# closure scope argument is absent
from numba import closures
self.argnames = (closures.CLOSURE_SCOPE_ARG_NAME,) + self.argnames
self.varnames.append(closures.CLOSURE_SCOPE_ARG_NAME)
# Just the globals we will use
self._myglobals = {}
for name in self.names:
try:
self._myglobals[name] = self.func_globals[name]
except KeyError:
# Assumption here is that any name not in globals or
# builtins is an attribtue.
self._myglobals[name] = getattr(builtins, name, None)
if self._overloads:
self.visit = self._visit_overload
self.visitchildren = self.generic_visit
@property
def func_name(self):
if "func_name" in self.kwargs:
return self.kwargs["func_name"]
return self.ast.name
@property
def qualified_name(self):
qname = self.kwargs.get("qualified_name", None)
if qname is None:
qname = "%s.%s" % (self.module_name, self.func_name)
return qname
@property
def current_env(self):
return self.env.translation.crnt
def annotate(self, node, key, value):
annotate(self.env, node, key, value)
def query(self, node, key):
return query(self.env, node, key)
def error(self, node, msg):
"Issue a terminating error"
raise error.NumbaError(node, msg)
def deferred_error(self, node, msg):
"Issue a deferred-terminating error"
self.current_env.error_env.collection.error(node, msg)
def warn(self, node, msg):
"Issue a warning"
self.current_env.error_env.collection.warning(node, msg)
def visit_func_children(self, node):
self.func_level += 1
self.generic_visit(node)
self.func_level -= 1
return node
def valid_locals(self, func):
if self.ast is None or self.env is None:
return True
return (func is not None and
query(self.env, self.ast, "__numba_valid_code_object",
default=True))
def invalidate_locals(self, ast=None):
ast = ast or self.ast
if query(self.env, ast, "variable_status_tuple"):
# Delete variable status of the function (local/free/cell status)
annotate(self.env, ast, variable_status_tuple=None)
if self.func and ast is self.ast:
# Invalidate validity of code object
annotate(self.env, ast, __numba_valid_code_object=False)
def _visit_overload(self, node):
assert self._overloads
try:
return super(NumbaVisitorMixin, self).visit(node)
except error.NumbaError as e:
# Try one of the overloads
cls_name = type(node).__name__
for i, cls_name in enumerate(self._overloads):
for overload_name, func in self._overloads[cls_name]:
try:
return func(self, node)
except error.NumbaError as e:
if i == len(self._overloads) - 1:
raise
assert False, "unreachable"
def add_overload(self, visit_name, func):
assert visit_name.startswith("visit_")
if not self._overloads:
self._overloads = {}
self._overloads.setdefault(visit_name, []).append(func)
def is_closure_signature(self, func_signature):
from numba import closures
return closures.is_closure_signature(func_signature)
def run_template(self, s, vars=None, **substitutions):
from numba import templating
func = self.func
if func is None:
d = dict(self.func_globals)
exec('def __numba_func(): pass', d, d)
func = d['__numba_func']
templ = templating.TemplateContext(self.context, s, env=self.env)
if vars:
for name, type in vars.iteritems():
templ.temp_var(name, type)
symtab, tree = templ.template_type_infer(
substitutions, symtab=self.symtab,
closure_scope=getattr(self.ast, "closure_scope", None),
func=func)
self.symtab.update(templ.get_vars_symtab())
return tree
def keep_alive(self, obj):
"""
Keep an object alive for the lifetime of the translated unit.
This is a HACK. Make live objects part of the function-cache
"""
functions.keep_alive(self.func, obj)
def have(self, t1, t2, p1, p2):
"""
Return whether the two variables have the indicated properties:
>>> have(int_, float_, "is_float", "is_int")
float_
If true, returns the type indicated by the first property.
"""
return have_properties(t1, t2, p1, p2)
def have_types(self, v1, v2, p1, p2):
return self.have(v1.type, v2.type, p1, p2)
def visitlist(self, list):
list[:] = _flatmap(self.visit, list)
return list
def is_complex(self, n):
if numbers:
return isinstance(n, numbers.Complex)
return isinstance(n, complex)
def is_real(self, n):
if numbers:
return isinstance(n, numbers.Real)
return isinstance(n, float)
def is_int(self, n):
if numbers:
return isinstance(n, numbers.Int)
return isinstance(n, (int, long))
def visit_CloneNode(self, node):
return node
def visit_ControlBlock(self, node):
#self.local_scopes.append(node.symtab)
self.setblock(node)
self.visitlist(node.phi_nodes)
self.visitlist(node.body)
#self.local_scopes.pop()
return node
def setblock(self, cfg_basic_block):
if cfg_basic_block.is_fabricated:
return
old = self.flow_block
self.flow_block = cfg_basic_block
if old is not cfg_basic_block:
self.current_scope = cfg_basic_block.symtab
self.changed_block(old, cfg_basic_block)
def changed_block(self, old_block, new_block):
"""
Callback for when a new cfg block is encountered.
"""
def handle_phis(self, reversed=False):
blocks = self.ast.flow.blocks
if reversed:
blocks = blocks[::-1]
for block in blocks:
for phi_node in block.phi_nodes:
self.handle_phi(phi_node)
class NumbaVisitor(ast.NodeVisitor, NumbaVisitorMixin):
"Non-mutating visitor"
def visitlist(self, list):
return _flatmap(self.visit, list)
class NumbaTransformer(NumbaVisitorMixin, ast.NodeTransformer):
"Mutating visitor"
class NoPythonContextMixin(object):
def visit_WithPythonNode(self, node, errorcheck=True):
if not self.nopython and errorcheck:
raise error.NumbaError(node, "Not in 'with nopython' context")
self.nopython -= 1
self.visitlist(node.body)
self.nopython += 1
return node
def visit_WithNoPythonNode(self, node, errorcheck=True):
if self.nopython and errorcheck:
raise error.NumbaError(node, "Not in 'with python' context")
self.nopython += 1
self.visitlist(node.body)
self.nopython -= 1
return node
class VariableFindingVisitor(NumbaVisitor):
"Find referenced and assigned ast.Name nodes"
function_level = 0
def __init__(self, *args, **kwargs):
self.params = set()
self.assigned = set()
self.referenced = set()
self.globals = set()
self.func_defs = []
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load):
add_to = self.referenced
elif isinstance(node.ctx, ast.Param):
add_to = self.params
else:
add_to = self.assigned
add_to.add(node.id)
def visit_Global(self, node):
self.globals.update(node.names)
def visit_Import(self, node):
self.assigned.update((alias.asname or alias.name.split('.', 1)[0])
for alias in node.names
if alias.name != '*')
visit_ImportFrom = visit_Import
def visit_FunctionDef(self, node):
if self.function_level == 0:
if node.args.vararg:
self.params.add(node.args.vararg)
if node.args.kwarg:
self.params.add(node.args.kwarg)
self.function_level += 1
self.generic_visit(node)
self.function_level -= 1
else:
if hasattr(node, 'name'):
self.assigned.add(node.name)
self.func_defs.append(node)
visit_Lambda = visit_FunctionDef
def visit_ClassDef(self, node):
self.assigned.add(node.name)
self.generic_visit(node)
def visit_ClosureNode(self, node):
self.assigned.add(node.name)
self.func_defs.append(node)
def determine_variable_status(env, ast, locals_dict):
"""
Determine what category referenced and assignment variables fall in:
- local variables
- free variables
- cell variables
"""
variable_status = query(env, ast, 'variable_status_tuple')
if variable_status:
return variable_status
v = VariableFindingVisitor()
v.visit(ast)
if not v.params.isdisjoint(v.globals):
raise error.NumbaError(
node, "Parameters cannot be declared global")
locals = v.params.union(v.assigned, locals_dict) - v.globals
freevars = v.referenced - locals
cellvars = set()
# Compute cell variables
for func_def in v.func_defs:
func_env = env.translation.make_partial_env(func_def, locals={})
inner_locals_dict = func_env.locals
inner_locals, inner_cellvars, inner_freevars = \
determine_variable_status(env, func_def,
inner_locals_dict)
cellvars.update(locals.intersection(inner_freevars))
# from pprint import pformat
# print(ast.name, "locals", pformat(locals),
# "cellvars", pformat(cellvars),
# "freevars", pformat(freevars),
# "locals_dict", pformat(locals_dict))
# print(ast.name, "locals", pformat(locals))
# Cache state
annotate(env, ast, variable_status_tuple=(locals, cellvars, freevars))
return locals, cellvars, freevars
########NEW FILE########
__FILENAME__ = astviz
# -*- coding: utf-8 -*-
"""
Visualize an AST.
"""
from __future__ import print_function, division, absolute_import
import os
import ast
import textwrap
from itertools import chain, imap, ifilter
from numba.viz.graphviz import render
# ______________________________________________________________________
# AST Constants
ast_constant_classes = (
ast.expr_context,
ast.operator,
ast.unaryop,
ast.cmpop,
)
# ______________________________________________________________________
# Utilities
is_ast = lambda node: (isinstance(node, (ast.AST, list)) and not
isinstance(node, ast_constant_classes))
class SomeConstant(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return repr(self.value)
def make_list(node):
if isinstance(node, list):
return node
elif isinstance(node, ast.AST):
return [node]
else:
return [SomeConstant(node)]
def nodes(node):
return [getattr(node, attr, None) for attr in node._fields]
def fields(node):
return zip(node._fields, nodes(node))
# ______________________________________________________________________
# Adaptor
class ASTGraphAdaptor(object):
def children(self, node):
return list(chain(*imap(make_list, ifilter(is_ast, nodes(node)))))
# ______________________________________________________________________
# Renderer
def strval(val):
if isinstance(val, ast_constant_classes):
return type(val).__name__ # Load, Store, Param
else:
return repr(val)
class ASTGraphRenderer(object):
def render(self, node):
all_fields = fields(node)
for attr in getattr(node, '_attributes', []):
if attr not in node._fields and hasattr(node, attr):
all_fields.append((attr, getattr(node, attr)))
all_fields = [(attr, v) for attr, v in all_fields if not is_ast(v)]
args = ",\n".join('%s=%s' % (a, strval(v)) for a, v in all_fields)
if args:
args = '\n' + args
return "%s(%s)" % (type(node).__name__, args)
def render_edge(self, source, dest):
# See which attribute of the source node matches the destination node
for attr_name, attr in fields(source):
if attr is dest or (isinstance(attr, list) and dest in attr):
# node.attr == dst_node or dest_node in node.attr
return attr_name
# ______________________________________________________________________
# Entry Point
def render_ast(ast, output_file):
render([ast], output_file, ASTGraphAdaptor(), ASTGraphRenderer())
# ______________________________________________________________________
# Test
if __name__ == '__main__':
source = textwrap.dedent("""
def func(a, b):
for i in range(10):
if i < 5:
print "hello"
""")
mod = ast.parse(source)
print(mod)
render_ast(mod, os.path.expanduser("~/ast.dot"))
########NEW FILE########
__FILENAME__ = cfgviz
# -*- coding: utf-8 -*-
"""
Visualize a CFG.
"""
from __future__ import print_function, division, absolute_import
import os
import ast
import textwrap
from numba import void
from numba import utils
from numba.viz import graphviz
# ______________________________________________________________________
# Utilities
def cf_from_source(source, func_globals):
"Render the SSA graph given python source code"
from numba import pipeline
from numba import environment
mod = ast.parse(source)
func_ast = mod.body[0]
env = environment.NumbaEnvironment.get_environment()
func_env, _ = pipeline.run_pipeline2(env, None, func_ast, void(),
pipeline_name='cf',
function_globals=dict(func_globals))
return func_env.symtab, func_env.flow #func_env.cfg
# ______________________________________________________________________
# Adaptor
class CFGGraphAdaptor(graphviz.GraphAdaptor):
def children(self, node):
return node.children
# ______________________________________________________________________
# Renderer
def fmtnode(node):
if isinstance(node, ast.Assign):
return "%s = %s" % (node.targets, node.value)
else:
return str(node)
fmtnode = utils.pformat_ast # ast.dump # str
class CFGGraphRenderer(graphviz.GraphRenderer):
def render(self, node):
return "%s\n%s" % (node, "; ".join(map(fmtnode, node.body)))
# ______________________________________________________________________
# Entry Points
def render_cfg(cfflow, output_file):
"Render the SSA graph given the flow.CFGFlow and the symbol table"
graphviz.render(cfflow.blocks, output_file,
CFGGraphAdaptor(), CFGGraphRenderer())
def render_cfg_from_source(source, output_file, func_globals=()):
"Render the SSA graph given python source code"
symtab, cfflow = cf_from_source(source, func_globals)
render_cfg(cfflow, output_file)
# ______________________________________________________________________
# Test
if __name__ == '__main__':
source = textwrap.dedent("""
def func():
# x_0
x = 0 # x_1
# x_2
for i in range(10):
if i < 5:
x = i # x_3
# x_4
x = x + i # x_5
y = x
x = i # x_6
""")
render_cfg_from_source(source, os.path.expanduser("~/cfg.dot"))
########NEW FILE########
__FILENAME__ = graphviz
# -*- coding: utf-8 -*-
"""
Graph visualization for abitrary graphs. An attempt to unify all the many
different graphs we want to visualize (numba.control_flow.graphviz,
numba.minivect.graphviz, etc).
"""
from __future__ import print_function, division, absolute_import
import os
import ast
import logging
import textwrap
import subprocess
from itertools import chain
from numba.minivect.pydot import pydot
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------
# Graphviz Mapper
#------------------------------------------------------------------------
class GraphvizGenerator(object):
"""
Render an arbitrary graph as a graphviz tree.
Nodes must be hashable.
"""
counter = 0
def __init__(self, graph_adaptor, graph_renderer,
graph_name, graph_type):
self.graph_adaptor = graph_adaptor
self.graph_renderer = graph_renderer
self.graph = pydot.Dot(graph_name, graph_type=graph_type)
# Graph Node -> PyDot Node
self.seen = {}
# { (source, dest) }
self.seen_edges = set()
# ______________________________________________________________________
# Add to pydot graph
def add_edge(self, source, dest):
"Add an edge between two pydot nodes and set the colors"
if (source, dest) in self.seen_edges:
return
self.seen_edges.add((source, dest))
edge = pydot.Edge(self.seen[source], self.seen[dest])
edge_label = self.graph_renderer.render_edge(source, dest)
if edge_label is not None:
edge.set_label(edge_label)
self.graph.add_edge(edge)
def create_node(self, node):
"Create a graphviz node from the miniast node"
label = self.graph_renderer.render(node)
self.counter += 1
pydot_node = pydot.Node(str(self.counter), label=label, shape='box')
self.graph.add_node(pydot_node)
return pydot_node
# ______________________________________________________________________
# Traverse Graph
def dfs(self, node):
"Visit children and add edges to their Graphviz nodes."
if node in self.seen:
return
pydot_node = self.create_node(node)
self.seen[node] = pydot_node
for child in self.graph_adaptor.children(node):
self.dfs(child)
self.add_edge(node, child)
#------------------------------------------------------------------------
# Graph Adaptors
#------------------------------------------------------------------------
class GraphAdaptor(object):
"""
Allow traversal of a foreign AST.
"""
def children(self, node):
"Return the children for this graph node"
#------------------------------------------------------------------------
# Graph Rendering
#------------------------------------------------------------------------
class GraphRenderer(object):
"""
Allow traversal of a foreign AST.
"""
def render(self, node):
"Return the label for this graph node"
def render_edge(self, source, dest):
"Return the label for this edge or None"
#------------------------------------------------------------------------
# Create image from dot
#------------------------------------------------------------------------
def write_image(dot_output):
prefix, ext = os.path.splitext(dot_output)
png_output = prefix + '.png'
fp = open(png_output, 'wb')
try:
p = subprocess.Popen(['dot', '-Tpng', dot_output],
stdout=fp.fileno(),
stderr=subprocess.PIPE)
p.wait()
except EnvironmentError as e:
logger.warn("Unable to write png: %s (did you install the "
"'dot' program?). Wrote %s" % (e, dot_output))
else:
logger.warn("Wrote %s" % png_output)
finally:
fp.close()
#------------------------------------------------------------------------
# Entry points
#------------------------------------------------------------------------
def render(G, output_file, adaptor, renderer,
graph_name="G", graph_type="digraph"):
"""
G: The graph: [node]
output_file: output dot file name
adaptor: GraphAdaptor
renderer: GraphRenderer
"""
gen = GraphvizGenerator(adaptor, renderer, graph_name, graph_type)
for root in G:
gen.dfs(root)
dotgraph = gen.graph
# output_file, ext = os.path.splitext(output_file)
# dotgraph.write(output_file + '.png', format='png')
dotgraph.write(output_file)
write_image(output_file)
########NEW FILE########
__FILENAME__ = ssaviz
# -*- coding: utf-8 -*-
"""
Visualize an SSA graph (the def/use chains).
"""
from __future__ import print_function, division, absolute_import
import os
import textwrap
from numba.viz.graphviz import render
from numba.viz.cfgviz import cf_from_source
from numba.control_flow.cfstats import NameAssignment
# ______________________________________________________________________
# Adaptor
class SSAGraphAdaptor(object):
def children(self, node):
return [nameref.variable for nameref in node.cf_references]
# ______________________________________________________________________
# Renderer
class SSAGraphRenderer(object):
def render(self, node):
if node.renamed_name:
return node.unmangled_name
return node.name
def render_edge(self, source, dest):
return "use"
# ______________________________________________________________________
# Entry Points
def render_ssa(cfflow, symtab, output_file):
"Render the SSA graph given the flow.CFGFlow and the symbol table"
cfstats = [stat for b in cfflow.blocks for stat in b.stats]
defs = [stat.lhs.variable for stat in cfstats
if isinstance(stat, NameAssignment)]
nodes = symtab.values() + defs
render(nodes, output_file, SSAGraphAdaptor(), SSAGraphRenderer())
def render_ssa_from_source(source, output_file, func_globals=()):
"Render the SSA graph given python source code"
symtab, cfflow = cf_from_source(source, func_globals)
render_ssa(cfflow, symtab, output_file)
# ______________________________________________________________________
# Test
if __name__ == '__main__':
source = textwrap.dedent("""
def func():
# x_0
x = 0 # x_1
# x_2
for i in range(10):
if i < 5:
x = i # x_3
# x_4
# x = x + i # x_5
y = x
x = i # x_6
""")
render_ssa_from_source(source, os.path.expanduser("~/ssa.dot"))
########NEW FILE########
__FILENAME__ = compiler
import inspect
from numba import typesystem
import numba.pipeline
from numba.exttypes import virtual
import numba.exttypes.entrypoints
import numba.decorators
from numba import functions
def resolve_argtypes(env, py_func, template_signature,
args, kwargs, translator_kwargs):
"""
Given an autojitting numba function, return the argument types.
These need to be resolved in order for the function cache to work.
TODO: have a single entry point that resolves the argument types!
"""
assert not kwargs, "Keyword arguments are not supported yet"
locals_dict = translator_kwargs.get("locals", None)
argcount = py_func.__code__.co_argcount
if argcount != len(args):
if argcount == 1:
arguments = 'argument'
else:
arguments = 'arguments'
raise TypeError("%s() takes exactly %d %s (%d given)" % (
py_func.__name__, argcount,
arguments, len(args)))
return_type = None
argnames = inspect.getargspec(py_func).args
argtypes = [typesystem.numba_typesystem.typeof(x) for x in args]
if template_signature is not None:
template_context, signature = typesystem.resolve_templates(
locals_dict, template_signature, argnames, argtypes)
return_type = signature.return_type
argtypes = list(signature.args)
if locals_dict is not None:
for i, argname in enumerate(argnames):
if argname in locals_dict:
new_type = locals_dict[argname]
argtypes[i] = new_type
return typesystem.function(return_type, tuple(argtypes))
class Compiler(object):
def __init__(self, env, py_func, nopython, flags, template_signature):
self.env = env
self.py_func = py_func
self.nopython = nopython
self.flags = flags
self.target = flags.pop('target', 'cpu')
self.template_signature = template_signature
def resolve_argtypes(self, args, kwargs):
signature = resolve_argtypes(self.env, self.py_func,
self.template_signature,
args, kwargs, self.flags)
return signature
def compile_from_args(self, args, kwargs):
signature = self.resolve_argtypes(args, kwargs)
return self.compile(signature)
def compile(self, signature):
"Compile the Python function with the given signature"
class FunctionCompiler(Compiler):
def __init__(self, env, py_func, nopython, flags, template_signature):
super(FunctionCompiler,self).__init__(env, py_func, nopython, flags, template_signature)
self.ast = functions._get_ast(py_func)
def compile(self, signature):
jitter = numba.decorators.jit_targets[(self.target, 'ast')]
dec = jitter(restype=signature.return_type,
argtypes=signature.args,
target=self.target, nopython=self.nopython,
env=self.env, func_ast=self.ast, **self.flags)
compiled_function = dec(self.py_func)
return compiled_function
class ClassCompiler(Compiler):
def resolve_argtypes(self, args, kwargs):
assert not kwargs
# argtypes = map(self.env.crnt.typesystem.typeof, args)
argtypes = map(numba.typeof, args) # TODO: allow registering a type system and using it here
signature = typesystem.function(None, argtypes)
return signature
def compile(self, signature):
py_class = self.py_func
return numba.exttypes.entrypoints.autojit_extension_class(
self.env, py_class, self.flags, signature.args)
#------------------------------------------------------------------------
# Autojit Method Compiler
#------------------------------------------------------------------------
class MethodCompiler(Compiler):
def __init__(self, env, extclass, method, flags=None):
super(MethodCompiler, self).__init__(env, method.py_func,
method.nopython, flags or {},
method.template_signature)
self.extclass = extclass
self.method = method
def compile(self, signature):
from numba.exttypes.autojitclass import autojit_method_compiler
return autojit_method_compiler(
self.env, self.extclass, self.method, signature)
########NEW FILE########
__FILENAME__ = runtests
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import sys
import numba.testing as testing
if '-m' in sys.argv:
result = testing.multitest()
else:
result = testing.test()
sys.exit(0 if result else 1)
########NEW FILE########
__FILENAME__ = test_user_exc
from numba.compiler import compile_isolated
from numba import types
from numba import unittest_support as unittest
from numba.targets.cpu import NativeError
import numpy as np
class TestUserExc(unittest.TestCase):
def test_unituple_index_error(self):
def pyfunc(a, i):
return a.shape[i]
cres = compile_isolated(pyfunc, (types.Array(types.int32, 1, 'A'),
types.int32))
cfunc = cres.entry_point
a = np.empty(2)
self.assertEqual(cfunc(a, 0), pyfunc(a, 0))
with self.assertRaises(NativeError):
cfunc(a, 2)
if __name__ == '__main__':
unittest.main()
########NEW FILE########
__FILENAME__ = versioneer
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Version: 0.7+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
append the following to your __init__.py:
from _version import __version__
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import re
import subprocess
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
GIT = "git"
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by github's download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%%s', no digits" %% ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %%d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %%s" %% ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
'''
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def do_vcs_install(versionfile_source, ipy):
run_command([GIT, "add", "versioneer.py"])
run_command([GIT, "add", versionfile_source])
run_command([GIT, "add", ipy])
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
run_command([GIT, "add", ".gitattributes"])
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.7+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile)
if ver:
if verbose: print("got version from file %s %s" % (versionfile, ver))
return ver
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
########NEW FILE########
| UTF-8 | Python | false | false | 2,953,372 | py | 16,703 | allPythonContent.py | 3,862 | 0.552561 | 0.542143 | 0.000002 | 93,617 | 29.547187 | 218 |
isaacaaa/server | 13,941,463,891,948 | 2c2c64cd5edb6960cf0451edab903a3c615bc5f3 | dc405b3efbafeab2770e3b97e3c03bdf03b4fa92 | /client.py | 07139d343ccbd120b1335889fe31d9553b3a6567 | [] | no_license | https://github.com/isaacaaa/server | 5ceb6ffd50affdd94e45ed2c29fd272e21f5a4bf | 4faf75c63dbc77dbba44314032583a0d1e55f161 | refs/heads/main | "2023-04-03T10:32:55.096272" | "2021-04-03T15:44:01" | "2021-04-03T15:44:01" | 349,131,666 | 0 | 1 | null | false | "2021-03-23T10:49:30" | "2021-03-18T15:50:09" | "2021-03-18T15:53:11" | "2021-03-23T10:49:29" | 2 | 0 | 0 | 0 | JavaScript | false | false |
import socket
class Client:
def __init__(self) -> None:
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.server.connect(("127.0.0.1", 20213))
def send_msg(self):
msg = b"This is a test from python client"
self.server.send(msg)
def run(self):
self.send_msg()
while True:
server = self.server
message, addr = server.recvfrom(1024)
print(message, addr)
break
client = Client()
client.run()
| UTF-8 | Python | false | false | 521 | py | 2 | client.py | 2 | 0.564299 | 0.535509 | 0 | 23 | 21.608696 | 70 |
zjajgyy/webServer_project4 | 3,770,981,315,056 | 2bd2583304495c218695a8e1e2f0c7665e370c98 | cea68be7423264081508cf28f569dcefd3c2c40e | /serverTest.py | 26348fefccd13ea398be539911e976f216fb2f97 | [] | no_license | https://github.com/zjajgyy/webServer_project4 | 5650280b9ca07c057150566948972bb0f7493e06 | bdc48747fd22a4c102df5df5964fa12f9ceac8e2 | refs/heads/master | "2021-01-19T02:09:21.119565" | "2016-11-10T07:28:10" | "2016-11-10T07:28:10" | 73,357,222 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, request, Response
import os
import imghdr
from PIL import Image
from io import BytesIO
app = Flask(__name__)
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpeg', 'bmp', 'gif'])
def isnum(num):
try:
if(float(num) > 0):
return True
else:
return False
except ValueError:
return False
@app.route('/resize', methods=['POST', 'GET'])
def resize():
upload_file = request.files['media']
scale = request.args.get('scale')
if (scale == "" or scale == None):
scale = "1"
elif (isnum(scale)):
scale = scale
else:
scale = "fail"
if (isnum(scale)):
scale = float(scale)
if scale!=0 and upload_file:
try:
im = Image.open(upload_file)
except OSError:
return Response(status=400)
(width, height) = im.size
imageType = im.format
print(width*scale, height*scale)
out = im.resize((int(width*scale), int(height*scale)), Image.ANTIALIAS)
#im.thumbnail((int(width*scale), int(height*scale)))
byte_io = BytesIO()
out.save(byte_io, imageType)
byte_io.seek(0)
return Response(byte_io, mimetype="image/"+imageType)
else:
return Response(status=400)
else :
return Response(status=400)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8889)
| UTF-8 | Python | false | false | 1,314 | py | 3 | serverTest.py | 1 | 0.611872 | 0.59589 | 0 | 52 | 24.25 | 77 |
deyh2020/RRAM-RADAR-Tuning | 3,229,815,452,211 | 131a4ba4a7f2a0e2d8bab6cd1e14d6c2888aa8fe | e3fa86932742b5b3cf504ee0c7b00b3fb13ad959 | /exptdata/misc/forming/form.py | c68b18a29915f2ea5db1aa59479f5afeef493aef | [] | no_license | https://github.com/deyh2020/RRAM-RADAR-Tuning | e341a180694758e5836f2ad7bba6e2ec1e734b9f | 5038af308a4bf0d8ca233f9699f31019d6580985 | refs/heads/master | "2023-06-26T08:49:31.009748" | "2021-07-27T02:33:47" | "2021-07-27T02:33:47" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib as mpl, numpy as np, pandas as pd
import matplotlib.pyplot as plt
# Load data and filter
data = pd.read_csv('data/form-5-1-20.csv', delimiter='\t', names=['addr', 'wlv', 'blv', 'rf', 'success'])
data = data[data['wlv'] == 2]
data = data[data['blv'] >= 2.5]
data = data[data['blv'] <= 4]
data = data[data['rf'] < 15e3]
print data
# LaTEX quality figures
mpl.rcParams.update(
{
'text.usetex': True,
'pgf.texsystem': 'lualatex',
'pgf.rcfonts': True,
}
)
plt.rc('font', family='serif', serif='Times', size=13)
# Means of final resistance
rf = data['rf']/1000
rf.hist(figsize=(4,3))
plt.title('Post-FORMing Resistance Distribution')
plt.xlabel('Post-FORMing Resistance (k$\\Omega$)')
plt.ylabel('Frequency')
plt.tight_layout()
plt.savefig('figs/form-rf-hist.eps')
plt.show()
# Means of final resistance
blv = data['blv']
blv.hist(figsize=(4,3))
plt.title('FORMing BL Voltage Distribution')
plt.xlabel('BL Voltage (V)')
plt.ylabel('Frequency')
plt.tight_layout()
plt.savefig('figs/form-bl-hist.eps')
plt.show()
| UTF-8 | Python | false | false | 1,048 | py | 672 | form.py | 25 | 0.670802 | 0.650763 | 0 | 40 | 25.2 | 105 |
ion9/eve-inc-waitlist | 9,921,374,496,480 | adfa84809558bb56cf824b0c64c2379a670c05dd | 5bd7b8bb5827844f1a8b984b3212c1eb35a9b3b3 | /migrations/versions/95eb3cae9e68_.py | 5c6a96cd6bb649b9ae9132c37f9898722c1409c6 | [
"MIT"
] | permissive | https://github.com/ion9/eve-inc-waitlist | f8a5ee96153e339b00ff2717555341597365fcea | 0be8a50b63c986ed0156c5b91963eb5845343ecb | refs/heads/master | "2021-07-07T09:52:17.179210" | "2017-09-19T13:04:04" | "2017-09-19T13:04:04" | 104,153,961 | 0 | 0 | null | true | "2017-09-20T02:16:48" | "2017-09-20T02:16:48" | "2017-09-06T17:08:21" | "2017-09-19T13:04:14" | 1,467 | 0 | 0 | 0 | null | null | null | """empty message
Revision ID: 95eb3cae9e68
Revises: aacfc1555e57
Create Date: 2016-12-30 20:43:41.455000
"""
# revision identifiers, used by Alembic.
revision = '95eb3cae9e68'
down_revision = 'aacfc1555e57'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('role_history',
sa.Column('entryID', sa.Integer(), nullable=False),
sa.Column('accountID', sa.Integer(), nullable=False),
sa.Column('byAccountID', sa.Integer(), nullable=False),
sa.Column('note', mysql.TEXT(), nullable=True),
sa.Column('time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['accountID'], ['accounts.id'], ),
sa.ForeignKeyConstraint(['byAccountID'], ['accounts.id'], ),
sa.PrimaryKeyConstraint('entryID')
)
op.create_index(op.f('ix_role_history_time'), 'role_history', ['time'], unique=False)
op.create_table('role_changes',
sa.Column('roleChangeID', sa.Integer(), nullable=False),
sa.Column('entryID', sa.Integer(), nullable=False),
sa.Column('roleID', sa.Integer(), nullable=False),
sa.Column('added', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['entryID'], ['role_history.entryID'], onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['roleID'], ['roles.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('roleChangeID')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ##
op.drop_table('role_changes')
op.drop_index(op.f('ix_role_history_time'), table_name='role_history')
op.drop_table('role_history')
### end Alembic commands ###
| UTF-8 | Python | false | false | 1,762 | py | 64 | 95eb3cae9e68_.py | 60 | 0.679909 | 0.654938 | 0 | 49 | 34.959184 | 107 |
QuentinGoss/sumo-windows10 | 10,548,439,722,958 | ebf9a63fcb91ec2e83e7574cdfeae52cc5149aeb | 2c98eec99d742b84089bc4f4bd2d06df5eac0d28 | /projects/grid2/reroute1/runner.py | 7fe5a9403b03a1f601b32292941502be8cc5b880 | [] | no_license | https://github.com/QuentinGoss/sumo-windows10 | 085896f282f5bb05bfbe90fd506699f6a77707b2 | ac1697b9fa857d0549d7847584427eec0c7d8f47 | refs/heads/master | "2020-04-03T18:04:21.236568" | "2019-10-29T17:22:41" | "2019-10-29T17:22:41" | 155,470,316 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
import optparse
import config # ./config.py
__IS_DEBUG_MODE = False # global flag used when calling debug()
###############################
# Import sumolib_and_traci and TracI
###############################
try:
sys.path.append(config.s_sumo_tools_dir) # Path to SUMO python modules
from sumolib import checkBinary
print("sumolib sucessfully imported.")
except ImportError:
sys.exit("Could not locate sumolib in " + config.s_sumo_tools_dir + ".")
import traci
###############################
# Uses optparse to add a --nogui option to run without using the gui.
###############################
def get_options():
opt_parser = optparse.OptionParser()
opt_parser.add_option("--nogui",action="store_true",default=False, help="run the commandline version of sumo")
opt_parser.add_option("--debug",action="store_true",default=False, help="Adds additional print statements for debugging.")
options, args = opt_parser.parse_args()
# Set our debug global so we only check once
global __IS_DEBUG_MODE
__IS_DEBUG_MODE = options.debug
return options
# end get_options()
###############################
# @param s_msg = message to be printed to console.
# Check if options.debug=true, then print to console.
###############################
def debug(s_msg):
global __IS_DEBUG_MODE
if __IS_DEBUG_MODE:
print(s_msg)
# end debug(s_msg)
###############################
# A quick pause
###############################
def pause():
input("Press return to continue...")
# end def pause()
###############################
# Generates a routefile
###############################
def generate_routefile():
debug("Starting to generate routefile...")
with open(config.s_route_file,"w") as routes:
print("<routes>", file=routes)
s_elements = generate_elements()
print(s_elements, file=routes)
print("</routes>", file=routes)
debug("Routefile created.")
# end generate_routefile
###############################
# Execute the TraCI control loop
###############################
def run():
n_step = 0
initialize()
while (n_step < config.n_time_steps):
traci.simulationStep()
timestep(n_step)
n_step += 1
# end while
traci.close()
# end run()
###############################
# Load in the neccesary libraries and launch SUMO + TraCI
###############################
def main():
if __name__ == "__main__":
debug("The main script is running.")
options = get_options()
debug("options.nogui=" + str(options.nogui))
# This script has been called from the command line.
# It will start sumo as a server, then connect and run.
if options.nogui:
s_sumo_binary = checkBinary('sumo')
else:
s_sumo_binary = checkBinary('sumo-gui')
debug("s_sumo_binary=" + s_sumo_binary)
# We need to generate a routefile for this simulation
generate_routefile()
# Have TraCI start sumo as a subprocess, then the python script
# can connect and run
debug("config.s_sumocfg_file="+config.s_sumocfg_file)
sumo_cmd = [s_sumo_binary, "-c", config.s_sumocfg_file]
traci.start(sumo_cmd)
run()
# End main
###################################################################
###################################################################
# START EDITING HERE
###################################################################
# Add imports here
import random
random.seed(config.n_seed)
###############################
# Global Variables
###############################
N_VEHICLES = 0
LLS_VEH_DATA = [] #[s_veh_id,s_exit_dest_edge,s_next_dest_edge]
###############################
# Add element(s) to routefiles
#
# @return string = The elements that will be added to the # routefile.
###############################
def generate_elements():
s_elements = "\t" + config.s_vtype + "\n"
return s_elements
# End def generate_elements()
###############################
# Initilize anything that needs to happen at step 0 here.
###############################
def initialize():
# Most of the vehicles are going to travel along the 4-lane highway
# so we'll create two starting points, one at either end.
traci.route.add("eastbound",["gneE52"])
traci.route.add("westbound",["-gneE50"])
debug("routes sucessfully added.")
return
# end def intialize
###############################
# Anything that happens within the TraCI control loop goes here.
# One pass of the loop == 1 timestep.
###############################
def timestep(n_step):
create_vehicles(n_step)
go_downtown(n_step)
# If the vehicles that went downtown have reached their destination.
# they will head towards their original exit.
for ls_row in LLS_VEH_DATA:
# 0 is the vehicle ID and 2 is the next destination
try:
if (traci.vehicle.getRoadID(ls_row[0]) == ls_row[2]):
# The vehicle has arived, send it on it's way. The exit destination
# 1 is the exit edge
traci.vehicle.changeTarget(ls_row[0],ls_row[1])
# Change the color to blue so we can recognize accomplished cars
traci.vehicle.setColor(ls_row[0],(0,0,255,0))
# remove the vehicle from the list since we no longer have a
LLS_VEH_DATA.remove(ls_row)
# end if
# This exception is called when a vehicle teleports beyond the ending
# destination and doesn't get properly removed from this list. I need
# to find some way to recognize when something teleports and remove
# it, or find handle it some other way.
except:
LLS_VEH_DATA.remove(ls_row)
#debug("\n" + ls_row[0] + " >> " + ls_row[2])
#debug(LLS_VEH_DATA)
#pause()
# for (ls_row in LLS_EXIT_DEST):
return
# end timestep
###############################
# Creates a vehicle
###############################
def create_vehicles(n_step):
# Check if the maximum amount of vehicles are in the simulation
if (config.n_vehicles_max <= len(traci.vehicle.getIDList())):
return
# Vehicle Creation
if (n_step % config.n_vehicle_spawn_rate == 0):
global N_VEHICLES
s_vehicle_id = "veh" + str(N_VEHICLES) # vehX
s_dest_edge = ""
# We want half of the vehicles to travel eastbound and half
# To travel westbound.
if (random.uniform(0.0,1.0) > 0.5):
traci.vehicle.add(s_vehicle_id, "eastbound", depart=n_step+1, pos=-4, speed=-3, lane=-6, typeID="chevy_s10")
s_dest_edge = "gneE50"
else:
traci.vehicle.add(s_vehicle_id, "westbound", depart=n_step+1, pos=-4, speed=-3, lane=-6, typeID="chevy_s10")
s_dest_edge = "-gneE52"
N_VEHICLES += 1
# Assign them a route.
traci.vehicle.changeTarget(s_vehicle_id,s_dest_edge)
# end if (n_step % N_VEHICLE_SPAWN_RATE == 0):
# end def create_vehicle
###############################
# Go downtown
# Sends a vehicle downtown and then reroutes to it's exit destination.
###############################
def go_downtown(n_step):
# Every x timesteps we're going to reroute a vehicle at random to
# some point in town.
if (n_step % config.n_go_downtown_rate == 0 and n_step != 0):
# Pick a vehicle at random to reroute.
ls_veh_ids = traci.vehicle.getIDList()
n_random_int = random.randint(0,len(ls_veh_ids)-1)
s_veh_id = ls_veh_ids[n_random_int]
# We'll makes going downtown red.
traci.vehicle.setColor(s_veh_id,(255,0,0,0))
# Store the exit destination edge before we change it's route.
s_exit_edge = traci.vehicle.getRoute(s_veh_id)[-1]
# Send it someplace downtown.
s_dest_edge = "-gneE35"
traci.vehicle.changeTarget(s_veh_id,s_dest_edge)
# Add it to LLS_VEH_DATA to be tracked.
global LLS_VEH_DATA
is_found = False
for ls_row in LLS_VEH_DATA:
if (s_veh_id == ls_row[0]):
is_found = True
if (not is_found):
LLS_VEH_DATA.append([s_veh_id,s_exit_edge,s_dest_edge])
#end if (n_step % n_reroute_rate == 0):
# end deg go_downtown()
###############################
# The main entry point of the script.
###############################
main()
| UTF-8 | Python | false | false | 8,183 | py | 209 | runner.py | 90 | 0.565318 | 0.557253 | 0 | 271 | 29.191882 | 124 |
duguyue100/spikefuel | 2,697,239,467,298 | 1e82dcacdb6cfb57cb3cdb5e730f707f38bdc1d9 | 699cbda2cb495879cdcfbfb540df9171e119ae69 | /spikefuel/__init__.py | afa2d7de96325a8d71ec87768cc8f7783bd899c2 | [
"MIT"
] | permissive | https://github.com/duguyue100/spikefuel | 18edbc3c9557c2eb981c11fad6d19039b36c7212 | e06713b62c0bc7f881dd75a5a4842723cce4aaab | refs/heads/master | "2020-12-12T23:22:02.520501" | "2016-11-17T15:09:01" | "2016-11-17T15:09:01" | 53,040,299 | 16 | 10 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Init the package of spikefuel.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
| UTF-8 | Python | false | false | 87 | py | 39 | __init__.py | 26 | 0.724138 | 0.689655 | 0 | 5 | 16.4 | 33 |
dolamroth/visual_novel | 11,398,843,214,317 | 624111d6c8a7739ca4767687af8c3ca8901dec30 | 5a5e3e838ee72d9e2922192e6e0f240340ef6540 | /visual_novel/translation/migrations/0006_betalink_sent_to_vk_model.py | 493e584c8260df5e422a93cb619bc48799f89a1d | [
"MIT"
] | permissive | https://github.com/dolamroth/visual_novel | 6c89787079cf48dc0d0d70ca7ad3be3da97ae741 | e3f1b13187e0fff5f6c94af907e0944b45f85fdc | refs/heads/develop | "2023-07-26T19:50:42.472442" | "2022-11-22T11:54:26" | "2022-11-22T11:54:26" | 124,779,991 | 8 | 4 | MIT | false | "2023-09-10T13:37:13" | "2018-03-11T17:15:56" | "2023-09-10T10:09:10" | "2023-09-10T13:37:12" | 1,937 | 4 | 2 | 0 | Python | false | false | # Generated by Django 2.0.2 on 2018-08-04 09:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('translation', '0005_add_translation_item_status'),
]
operations = [
migrations.CreateModel(
name='TranslationBetaLinkSendToVK',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vk_group_id', models.CharField(default='', max_length=255, verbose_name='ID ะณััะฟะฟั ะะ')),
('post_date', models.DateField(auto_now_add=True, verbose_name='ะะฐัะฐ')),
('link', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='translation.TranslationBetaLink', verbose_name='ะกััะปะบะฐ ะฝะฐ ะฟะฐัั')),
],
options={
'verbose_name': 'ะะตัะฐัััะปะบะฐ, ะพัะฟัะฐะฒะปะตะฝะฝะฐั ะฒ ะณััะฟะฟั ะะ',
'verbose_name_plural': 'ะะตัะฐัััะปะบะธ, ะพัะฟัะฐะฒะปะตะฝะฝัะต ะฒ ะณััะฟะฟั ะะ',
'db_table': 'translation_betalink_send_to_vk',
},
),
]
| UTF-8 | Python | false | false | 1,214 | py | 176 | 0006_betalink_sent_to_vk_model.py | 128 | 0.602837 | 0.583333 | 0 | 28 | 39.285714 | 158 |
Raj-Bisen/python | 8,899,172,246,831 | ae21a36d5ad52abe01d45043cc77bf8063178043 | 2dd53c9e439b580812ed98f7e27c809351a0ac97 | /DisplayFactors.py | 8a54529e64f8ee2151ec57ac9e6b04fc962ad101 | [] | no_license | https://github.com/Raj-Bisen/python | 9549fe2f3c5c4d8c4105746ae6d6cf39cf25c0c9 | dc155da665a857adcd2c4f74842120939f6dae96 | refs/heads/master | "2023-07-03T16:23:30.459838" | "2021-08-10T09:55:23" | "2021-08-10T09:55:23" | 246,358,977 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # accept number from user and display its factors
# input : 6
# output : 1 2 3
def DisplayFactors(no):
iCnt = 1
for i in range(1,int(no/2)+1): # no = 6
if no%iCnt == 0:
print (iCnt)
iCnt = iCnt +1
#while(iCnt <= no/2):
# if((no % iCnt) == 0):
# print(iCnt)
# iCnt = iCnt + 1
def main():
print("Enter the number")
value = int(input())
print("**************************")
DisplayFactors(value)
print("**************************")
if __name__=="__main__":
main() | UTF-8 | Python | false | false | 607 | py | 56 | DisplayFactors.py | 56 | 0.410214 | 0.38715 | 0 | 26 | 21.423077 | 49 |
jdutreve/request-reply-pypy | 18,382,460,029,953 | f8a4b1142901b93a01cfb28d6f0d5cc1af0967cd | 96577fb1b2853d6ec420d364aa3a4ef3a0b59e7c | /server.py | db1873c8eb7c39d704e89fa80645aa9227b674a9 | [] | no_license | https://github.com/jdutreve/request-reply-pypy | 1efd630bb53d68dcebd0d28fd9ae1ee3dd5b43f2 | 989daef3327544ac18bb401f23c2a87bc59d3b12 | refs/heads/main | "2023-01-24T00:39:11.143258" | "2020-11-15T10:07:32" | "2020-11-15T10:07:32" | 313,003,276 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# Paranoid Pirate worker
#
# Author: Daniel Lundin <dln(at)eintr(dot)org>
#
# restart DEALER socket when queue restarts (timeout to receive queue heartbeat)
# heartbeat to queue so queue unregisters this worker when unresponsive
#
import time
import sys
from random import randint
from datetime import datetime
import ctypes
import struct
from threading import Thread
from multiprocessing import Process
import zmq
Container = Thread
identities = [b'A', b'B', b'C']
HEARTBEAT = b''
cycle = 0
port = sys.argv[1]
load_duration = float(sys.argv[2])
bind_endpoint = ("tcp://*:" + port).encode()
connect_endpoint = ("tcp://192.168.0.22:" + port).encode()
def p(msg):
pass
print('%s %s' % (datetime.now().strftime('%M:%S:%f')[:-3], msg))
def on_request(request, identity):
address, control = request[:2]
reply = [address, control]
if control == HEARTBEAT:
reply[1] = HEARTBEAT
#p("I: RETURN PONG: %s" % reply)
else:
reply.append(b"ACK" + control + b"-" + str(port).encode() + b"-" + identity.encode())
# global cycle
# cycle += 1
# p("I: %s RETURN REPLY: %s, CYCLE=%d" % (identity, reply, cycle))
# time.sleep(load_duration) # Do some heavy work
return reply
def worker_socket(worker_identity, context):
worker = context.socket(zmq.REP)
worker.connect("inproc://routing.ipc")
worker.hwm = 100_000
time.sleep(.2) # Wait for threads to stabilize
while 1:
try:
while 1:
request = worker.recv_multipart(flags=zmq.NOBLOCK)
# reply = on_request(request, worker_identity)
worker.send_multipart(request, flags=zmq.NOBLOCK)
except zmq.Again:
time.sleep(.001)
def create_server_socket(context, bind, connect):
server = context.socket(zmq.ROUTER)
server.sndhwm = 600_000
server.rcvhwm = 600_000
server.identity = connect
server.probe_router = 1
server.router_mandatory = 1
server.bind(bind)
p("I: worker is ready at %s" % connect.decode())
return server
context = zmq.Context(1)
server_socket = create_server_socket(context, bind_endpoint, connect_endpoint)
# router_socket = context.socket(zmq.DEALER)
# router_socket.bind("inproc://routing.ipc")
# for identity in identities:
# Container(target=worker_socket, args=(identity.decode(), context)).start()
# time.sleep(1) # Wait for threads to stabilize
# poller = zmq.Poller()
# poller.register(router_socket, zmq.POLLIN)
# max = randint(90, 500)
counter = tmp = 0
not_yet_started = True
start = 0
timeit = 0
request = None
# try:
# while 1:
# try:
# while 1:
# # copy=True seems better for small messages
# request = server_socket.recv_multipart(flags=zmq.NOBLOCK, copy=True)
# if len(request) > 2:
# # print(request)
# counter += 1
# if not_yet_started:
# not_yet_started = False
# start = time.time()
# elif counter % 50_000 == 0:
# timeit = time.time()
# router_socket.send_multipart([b''] + request, flags=zmq.NOBLOCK)
# else:
# # return PONG
# server_socket.send_multipart(request, flags=zmq.NOBLOCK, copy=True)
# except zmq.Again:
# pass
# try:
# while 1:
# reply = router_socket.recv_multipart(flags=zmq.NOBLOCK)
# server_socket.send_multipart(reply[1:], flags=zmq.NOBLOCK)
# except zmq.Again:
# time.sleep(.002)
# except KeyboardInterrupt:
# duration = timeit - start
# print("%d requests, %ds duration, Average round trip cost: %d req/s " % (counter, duration, counter / duration))
# fmt = struct.Struct('I 9s')
# buffer = ctypes.create_string_buffer(fmt.size)
while 1:
try:
while 1:
hostname = server_socket.recv(flags=zmq.NOBLOCK|zmq.SNDMORE, copy=True)
request = server_socket.recv(flags=zmq.NOBLOCK, copy=True) # copy=True seems better for small messages
if not_yet_started:
not_yet_started = False
start = time.time()
if request:
# fmt.pack_into(buffer, 0, )
# request_id, msg = fmt.unpack_from(request[1], 0)
counter += 1
server_socket.send(hostname, flags=zmq.NOBLOCK | zmq.SNDMORE)
server_socket.send(request, flags=zmq.NOBLOCK)
if counter % 100_000 == 0:
print("Average round trip cost: %d requests %d, req/s " % (counter, counter / (time.time() - start)))
except zmq.Again:
hostname = server_socket.recv(zmq.SNDMORE) # blocking call
request = server_socket.recv(flags=zmq.NOBLOCK)
if not_yet_started:
not_yet_started = False
start = time.time()
if request:
counter += 1
server_socket.send(hostname, flags=zmq.NOBLOCK | zmq.SNDMORE)
server_socket.send(request, flags=zmq.NOBLOCK)
# except KeyboardInterrupt:
# duration = timeit - start
# print("%d requests, %ds duration, Average round trip cost: %d req/s " % (counter, duration, counter / duration))
# while 1:
# try:
# events = dict(poller.poll())
# if events.get(server) == zmq.POLLIN:
# p("I: RECEIVE REQUEST: %s" % request)
# router.send_multipart([b''] + request)
# if events.get(router) == zmq.POLLIN:
# reply = router.recv_multipart()
# server.send_multipart(reply[1:])
# p("I: RETURN REPLY: %s" % reply)
# if cycle > max and port in ['5555','5556'] and randint(0, 50000000) == 0:
# p("I: Simulating CPU overload ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
# time.sleep(randint(2, 6))
# if cycle > max and randint(0, 5950000) == 0:
# p("I: Simulating a crash")
# import _thread
# _thread.interrupt_main()
# break
# except:
# p("I: Interrupted!!!!!!!!!!")
# break
| UTF-8 | Python | false | false | 6,227 | py | 3 | server.py | 3 | 0.571704 | 0.553557 | 0 | 192 | 31.432292 | 125 |
Ending2015a/test_memorynet2 | 7,146,825,616,270 | 2272abe6e28400ba9c15dfa2e3d54cb07963522b | 7f05ca492a328c390316e8cea8400f64ed9c4c48 | /model.py | 80de48dd4ccdaaf1858e36204f5ad5a3209f693a | [] | no_license | https://github.com/Ending2015a/test_memorynet2 | c6294200282a30aabc12169ff097edcb7f1d09d9 | ad563866b5674e87b6346a45cd64d73315f10437 | refs/heads/master | "2021-05-14T03:46:24.701960" | "2018-01-08T03:27:05" | "2018-01-08T03:27:05" | 116,480,734 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import time
import random
import numpy as np
import tensorflow as tf
def position_encoding(sentence_size, embedding_size):
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size+1
le = embedding_size+1
for i in range(1, le):
for j in range(1, ls):
encoding[i-1, j-1] = (i - (embedding_size+1)/2) * (j - (sentence_size+1)/2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
encoding[:, -1] = 1.0
return np.transpose(encoding)
class MemNet(object):
def __init__(self, emb_map, input_size, embed_size=100, n_hop=6, memory_size=20, sentence_size=216, option_size=10,
sentence_encoding = position_encoding,
proj_initializer = tf.random_normal_initializer(stddev=0.1),
default_w2v_value=0,
decode_map=None):
self.dec_map = decode_map
self.input_size = input_size
self.embed_size = embed_size
self.n_hop = n_hop
self.memory_size = memory_size
self.sentence_size = sentence_size
self.option_size = option_size
self.sent_encoding = sentence_encoding
self.proj_initializer = proj_initializer
self._encoding = tf.constant(self.sent_encoding(self.sentence_size, self.embed_size), name='encoding') # [sentence_size, embed_size]
self.embedding = tf.constant(emb_map)
def word2vec_lookup(self, inputs):
return tf.nn.embedding_lookup(self.embedding, inputs)
def _inputs_projection(self, inputs, hop=0, reuse=tf.AUTO_REUSE):
with tf.variable_scope('embedding', reuse=reuse):
if hop==0:
A = tf.get_variable('A', [self.input_size, self.embed_size], initializer=self.proj_initializer)
else: # use adjacent weight tying A^{k+1} = C^k
A = tf.get_variable('C_{}'.format(hop-1), [self.input_size, self.embed_size], initializer=self.proj_initializer)
#x = tf.nn.embedding_lookup(A, inputs, name='input_vector')
A = tf.transpose(A, [1, 0])
shape = inputs.get_shape().as_list()
shape[0] = -1
shape[-1] = self.embed_size
x = tf.reshape(inputs, [-1, self.input_size])
x = tf.matmul(x, A, name='input_proj')
x = tf.reshape(x, shape)
return x
def _outputs_projection(self, outputs, hop=0, reuse=tf.AUTO_REUSE):
with tf.variable_scope('embedding', reuse=reuse):
C = tf.get_variable('C_{}'.format(hop), [self.input_size, self.embed_size], initializer=self.proj_initializer)
#x = tf.nn.embedding_lookup(C, outputs, name='output_vector')
#return x
C = tf.transpose(C, [1, 0])
shape = outputs.get_shape().as_list()
shape[0] = -1
shape[-1] = self.embed_size
x = tf.reshape(outputs, [-1, self.input_size])
x = tf.matmul(x, C, name='output_proj')
x = tf.reshape(x, shape)
return x
def _query_projection(self, query, reuse=tf.AUTO_REUSE):
with tf.variable_scope('embedding', reuse=reuse): # use adjacent weight tying B = A
B = tf.get_variable('A', [self.input_size, self.embed_size], initializer=self.proj_initializer)
#x = tf.nn.embedding_lookup(B, query, name='query_vector')
#return x
B = tf.transpose(B, [1, 0])
shape = query.get_shape().as_list()
shape[0] = -1
shape[-1] = self.embed_size
x = tf.reshape(query, [-1, self.input_size])
x = tf.matmul(x, B, name='query_proj')
x = tf.reshape(x, shape)
return x
def _unprojection(self, pred, reuse=tf.AUTO_REUSE):
with tf.variable_scope('embedding', reuse=reuse):
W = tf.get_variable('C_{}'.format(self.n_hop-1), [self.input_size, self.embed_size], initializer=self.proj_initializer)
WT = tf.transpose(W, [1, 0])
return tf.matmul(pred, WT)
def _fc(self, inputs, num_out, name):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
input_shape = inputs.get_shape()
feed_in = input_shape[-1].value
weights = tf.get_variable('weights', [feed_in, num_out], initializer=tf.truncated_normal_initializer(stddev=5e-2))
biases = tf.get_variable('biases', [num_out], initializer=tf.constant_initializer(0.0))
x = tf.nn.xw_plus_b(inputs, weights, biases, name=name)
return x
def build_model(self, sentences=None, query=None, options=None, answer=None):
if sentences == None:
sentences = tf.placeholder(tf.int32, [None, self.memory_size, self.sentence_size], name='sentences')
if query == None:
query = tf.placeholder(tf.int32, [None, self.sentence_size], name='query')
if answer == None:
answer = tf.placeholder(tf.int32, [None], name='answer')
if options == None:
options = tf.placeholder(tf.int32, [None, self.option_size], name='option')
e_sentences = self.word2vec_lookup(sentences)
e_query = self.word2vec_lookup(query)
e_answer = self.word2vec_lookup(answer)
e_options = self.word2vec_lookup(options)
with tf.variable_scope('MemN2N'):
emb_q = self._query_projection(e_query) # [batch_size, sentence_size, embed_size]
u = tf.reduce_sum(emb_q * self._encoding, 1) # [batch_size, embed_size]
for hop in range(self.n_hop):
emb_i = self._inputs_projection(e_sentences, hop) # [batch_size, memory_size, sentence_size, embed_size]
mem_i = tf.reduce_sum(emb_i*self._encoding, 2) # [batch_size, memory_size, embed_size]
emb_o = self._outputs_projection(e_sentences, hop) # same as emb_i
mem_o = tf.reduce_sum(emb_o*self._encoding, 2) # same as mem_i
uT = tf.transpose(tf.expand_dims(u, -1), [0, 2, 1])
# [batch_size, embed_size, 1] -> [batch_size, 1, embed_size]
p = tf.nn.softmax(tf.reduce_sum(mem_i*uT, 2)) # inner product [batch_size, memory_size]
p = tf.expand_dims(p, -1) # [batch_size, memory_size, 1]
o = tf.reduce_sum(mem_o*p, 1) # [batch_size, embed_size]
u = o + u # [batch_size, embed_size]
logits = self._unprojection(u) # [batch_size, embed_size]
e_answer = tf.reshape(e_answer, [-1, self.embed_size])
loss = tf.reduce_mean(tf.square(logits-e_answer))
logt = tf.expand_dims(logits, 1)
# mean square
sel_p = tf.reduce_mean(tf.square(logt-e_options), 2)
mse_select = tf.argmin(sel_p, 1)
# cosine similarity
sel_norm = tf.nn.l2_normalize(logt, 1)
opt_norm = tf.nn.l2_normalize(e_options, 2)
sel_p = tf.reduce_sum(sel_norm * opt_norm, 2)
cos_select = tf.argmax(sel_p, 1)
#loss = tf.reduce_mean(1 - tf.reduce_sum(logits*ans_norm, 1))
#logits = tf.nn.softmax(self._unembedding(u)) #a_hat [batch_size * option_size, vocab_size]
#cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=onehot, logits=logits)
#loss = tf.reduce_mean(cross_entropy)
class Handle(object):
pass
handle = Handle()
handle.sentences = sentences
handle.options = options
handle.query = query
handle.answer = answer
handle.selection = cos_select
handle.cos_select = cos_select
handle.mse_select = mse_select
handle.debug = loss
return handle, loss
def build_sampler(self, sentences=None, query=None, options=None):
if sentences == None:
sentences = tf.placeholder(tf.int32, [None, self.memory_size, self.sentence_size], name='sentences')
if query == None:
query = tf.placeholder(tf.int32, [None, self.sentence_size], name='query')
if options == None:
options = tf.placeholder(tf.int32, [None, self.option_size], name='option')
e_sentences = self.word2vec_lookup(sentences)
e_query = self.word2vec_lookup(query)
e_options = self.word2vec_lookup(options)
with tf.variable_scope('MemN2N'):
emb_q = self._query_projection(e_query) # [batch_size, sentence_size, embed_size]
u = tf.reduce_sum(emb_q * self._encoding, 1) # [batch_size, embed_size]
for hop in range(self.n_hop):
emb_i = self._inputs_projection(e_sentences, hop) # [batch_size, memory_size, sentence_size, embed_size]
mem_i = tf.reduce_sum(emb_i*self._encoding, 2) # [batch_size, memory_size, embed_size]
emb_o = self._outputs_projection(e_sentences, hop) # same as emb_i
mem_o = tf.reduce_sum(emb_o*self._encoding, 2) # same as mem_i
uT = tf.transpose(tf.expand_dims(u, -1), [0, 2, 1])
# [batch_size, embed_size, 1] -> [batch_size, 1, embed_size]
p = tf.nn.softmax(tf.reduce_sum(mem_i*uT, 2)) # inner product [batch_size, memory_size]
p = tf.expand_dims(p, -1) # [batch_size, memory_size, 1]
o = tf.reduce_sum(mem_o*p, 1) # [batch_size, embed_size]
u = o + u # [batch_size, embed_size]
logits = self._unprojection(u) # [batch_size, embed_size]
logt = tf.expand_dims(logits, 1)
# mean square
sel_p = tf.reduce_mean(tf.square(logt-e_options), 2)
mse_select = tf.argmin(sel_p, 1)
# cosine similarity
sel_norm = tf.nn.l2_normalize(logt, 1)
opt_norm = tf.nn.l2_normalize(e_options, 2)
sel_p = tf.reduce_sum(sel_norm * opt_norm, 2)
cos_select = tf.argmax(sel_p, 1)
#logits = tf.nn.softmax(self._unembedding(u)) #a_hat [batch_size * option_size, vocab_size]
#cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=onehot, logits=logits)
#loss = tf.reduce_mean(cross_entropy)
class Handle(object):
pass
handle = Handle()
handle.sentences = sentences
handle.query = query
handle.options = options
handle.selection = cos_select
handle.cos_select = cos_select
handle.mse_select = mse_select
return handle, cos_select
| UTF-8 | Python | false | false | 10,678 | py | 5 | model.py | 5 | 0.568084 | 0.555535 | 0 | 268 | 38.83209 | 140 |
ChloeDumit/holbertonschool-higher_level_programming | 5,617,817,234,015 | 5b266cb0633f719d8504da2aea175835562fc8ff | 85cb34fe14b55018a1109f0345068e6e022c1ba9 | /0x0A-python-inheritance/6-base_geometry.py | 61cc5471fd5979f31bf642eb8b84e7f33d65b32f | [] | no_license | https://github.com/ChloeDumit/holbertonschool-higher_level_programming | 5b5d1f86ef01653c4d00ed5035a44bd309f0850f | 5e53eb817ef0c83e8fbda259ffabed6cd319b5b8 | refs/heads/main | "2023-04-24T01:40:52.454902" | "2021-05-12T14:14:33" | "2021-05-12T14:14:33" | 319,319,941 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
"""
Return true if object is an instance
of a class
"""
class BaseGeometry:
"""
class base geometry
"""
def area(self):
""" def area"""
raise Exception("area() is not implemented")
| UTF-8 | Python | false | false | 234 | py | 89 | 6-base_geometry.py | 82 | 0.58547 | 0.581197 | 0 | 14 | 15.714286 | 52 |
dave-shawley/imbi | 4,114,578,688,078 | 0278168b2c9eb0494b9f21f4830c9ed75e80f843 | 88f7fa9c22983f2002664b22afd589931e3f77b0 | /imbi/endpoints/project/__init__.py | 1999964099cdfb62c94d423cf72402e89e8bd41b | [
"BSD-3-Clause"
] | permissive | https://github.com/dave-shawley/imbi | cbc67525991fa6fc9e93fd74b2063debce263150 | 61e2950a437c0599a365432580c3ee363b875177 | refs/heads/main | "2023-06-29T05:30:44.392994" | "2020-11-23T18:42:57" | "2020-11-23T18:42:57" | 316,947,047 | 0 | 0 | BSD-3-Clause | true | "2020-11-29T12:33:14" | "2020-11-29T12:33:13" | "2020-11-23T18:43:06" | "2020-11-23T19:23:16" | 530 | 0 | 0 | 0 | null | false | false | """
Project Related Request Handlers
"""
from tornado import web
from imbi import common
from . import (dependencies,
dependency,
inventory,
link,
links,
options,
project)
URLS = [
web.url(r'^/project/$', project.RequestHandler),
web.url(r'^/project/(?P<id>{})$'.format(common.UUID_PATTERN),
project.RequestHandler, name='project'),
web.url(r'^/project/(?P<project_id>{})/link'.format(common.UUID_PATTERN),
link.RequestHandler),
web.url(r'^/project/(?P<project_id>{})/link/'
r'(?P<link_type>[\w_-]+)$'.format(common.UUID_PATTERN),
link.RequestHandler, name='project-link'),
web.url(r'^/project/(?P<project_id>{})/links'.format(common.UUID_PATTERN),
links.RequestHandler, name='project-links'),
web.url(r'^/project/(?P<project_id>{})/dependency'.format(
common.UUID_PATTERN),
dependency.RequestHandler),
web.url(r'^/project/(?P<project_id>{})/dependency/'
r'(?P<dependency_id>{})$'.format(
common.UUID_PATTERN, common.UUID_PATTERN),
dependency.RequestHandler, name='project-dependency'),
web.url(r'^/project/(?P<project_id>{})/dependencies'.format(
common.UUID_PATTERN),
dependencies.RequestHandler, name='project-dependencies'),
web.url(r'^/project/options$', options.RequestHandler,
name='project-options'),
web.url(r'^/projects/$', inventory.RequestHandler, name='projects')
]
| UTF-8 | Python | false | false | 1,580 | py | 172 | __init__.py | 126 | 0.586709 | 0.586709 | 0 | 40 | 38.5 | 78 |
michaelplesser/counting-winks | 4,801,773,444,140 | 4ed1ce0faed0addcd5f2682a78994b1086df33cb | d8339ee1a5c22b3deb12a5d66031a58c4686e090 | /counting-winks.py | b04a0a3307d5de58f461061f8a2f0572a15b4f03 | [] | no_license | https://github.com/michaelplesser/counting-winks | b4822368f6faa0d4687dde288a7644747505b730 | 08e2375b2262bb05c35544d2c1be09d0e782240c | refs/heads/master | "2020-04-29T17:59:56.781291" | "2019-03-23T15:47:22" | "2019-03-23T15:47:22" | 176,311,392 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
#import av # This module is only needed if processing the video data.
# It can be a pain in the ass to get its dependencies working.
# For the analysis, just use the data/*.npy files, no av needed!
import os
import sys
import scipy
import argparse
import scipy.stats
import numpy as np
import matplotlib.pyplot as plt
class load_data:
def __init__(self):
self.times = np.load('data/times.npy')
self.light1_data = np.load('data/Light1_data.npy')
self.light2_data = np.load('data/Light2_data.npy')
def input_args():
parser = argparse.ArgumentParser(description="counting-winks.py is a program to analyze the frequency of two blinking lights on the Boston skyline as viewed from Mission Hill.\
To run, use 'python3 counting-winks.py'. For further options, try 'python3 counting-winks.py -h', or '--help'.")
parser.add_argument('-b', action='store_true', help='Supress graphics, display no plots')
parser.add_argument('-t', action='store', type=float, help='How many seconds to analyze')
args = parser.parse_args()
return args
def process_video():
'''
Process the video file into managable data arrays.
Takes in a .mp4 and then extracts the time of each frame,
and the normalized R(GB) pixel value for each of the two lights.
These data arrays are saved as .npy files because the processing is
somewhat time consuming, so this way it can be done just once.
This is the only section that requires the 'av' library, which can be
difficult to install due to dependancy issues, so if you have the data
files (IE from git) don't bother installing 'av' unless you really want to.
'''
print('Processing video...')
intensity_L1 = [] # Light 1 data array
intensity_L2 = [] # Light 2 data array
times = []
vid_file = 'video_data.mp4'
vid = av.open(vid_file)
for frame in vid.decode(video=0):
print('\tCurrent time being processed: %02d:%05.2f:'% (int(frame.time//60), frame.time%60), end='\r')
times.append(frame.time)
arr = np.asarray(frame.to_image()) # Generate an RGB array for each pixel in the frame
pix_val_L1 = arr[420,713][0]/255. # Pixel RGB value (only take R, it's a red light) of light 1, and normalize to max RGB value (255)
pix_val_L2 = arr[360,473][0]/255. # Pixel RGB value (only take R, it's a red light) of light 2, and normalize to max RGB value (255)
intensity_L1.append(pix_val_L1)
intensity_L2.append(pix_val_L2)
### Save data files under ./data as .npy files
if not os.path.exists('./data'): os.mkdir('./data')
np.save('data/times',times)
np.save('data/Light1_data',intensity_L1)
np.save('data/Light2_data',intensity_L2)
def FFT(x, y):
'''
Perform the Fast Fourier Transform
'''
ft = np.abs(np.fft.fft(y)) # Perform the FFT
freqs = np.linspace(0, 1/(x[1]-x[0]), len(x)) # Generate the frequency data (transformed x-values)
## This is a rather subtle point related to fourier transforms
## Since the input is real-valued, only half of the output is actual info.
ft = ft[ :len(freqs)//2]
freqs = freqs[:len(freqs)//2]
return freqs, ft
def find_fundamental_frequency(freqs,amps,args):
'''
This function finds the fundamental frequency (first harmonic) of a transform.
We could just use the first non-trivial peak's frequency, but because the lights
are so close in frequency this isn't the best we can do. Indeed we care more about
the higher harmonics because they are more sensitive.
To find the fund. freq. we first identify all peaks corresponding to a square wave.
A useful piece of information is that a pure square wave has only odd harmonics.
The even harmonics in our distrbution are ignored (but maybe I'll use them later).*** (not quite true!)
Since the peaks should come in integer multiples of the fundamental frequency,
a linear fitting of the frequency value vs. harmonic number is what we want.
The slope of the linear fit is a better estimate of the fundamental frequency.
'''
print('\tAnalyzing light data...')
peaks = find_peaks(freqs, amps) # Function defined below
try:
peak_fs, peak_as = zip(*peaks) # Unzip [(x1,y1),(x2,y2),...] -> [x1, x2, ...], [y1, y2, ...]
except ValueError:
sys.exit("\nNot enough data given, no peaks found :( \nTry a longer -t\nBye bye!\n")
harmonics = [i/peak_fs[0] for i in peak_fs] # Harmonics are integer multiples of your fundamental frequency
slope, intercept, r, p, stderr = scipy.stats.linregress(harmonics, peak_fs) # Apply a linear fit (maybe unnecessary? To be investigated...)
line = slope*np.array(harmonics)+intercept
print('\t\tPlotting frequency spectrum with "found" peaks')
plt.plot(freqs, amps) # Plot spectrum
plt.plot(peak_fs, peak_as, 'ro') # Plot peaks as points
if not args.b : plt.show()
print('\t\tPlotting frequency vs harmonic number\n')
plt.plot(harmonics, peak_fs, 'o', harmonics, line)
if not args.b : plt.show()
return peaks, slope
def find_peaks(x,y):
'''
A basic peak-finding algorithm.
Search for samples with amplitude above some threshold which are the largest sample in some range
'''
peaks = []
for i, d in enumerate(zip(x, y)):
### These two parameters help define the peak search.
### They're manually tuned, but auto-tuning is TBD in the future
buf = 50 # A given y must be the largest in +-(buf) samples to be a "peak"
threshold = 100 # It also must be above a threshold value
if i<buf or i>(len(x)-buf): continue # Avoid index out of bound errors, we don't mind ignoring these regions in the peak hunt
if d[1] == max(y[i-buf:i+buf]) and d[1]>threshold: # Peak-finding logic
peaks.append([d[0], d[1]]) # Add the point as a peak
return peaks
def main():
args = input_args()
print('Beginning analysis...\n')
### Load the data produced in process_video()
try:
data = load_data()
except FileNotFoundError:
print('\tData files not found. Running process_video()')
process_video() # Process the video file to obtain pixel data, times, etc...
data = load_data() # Load the data
### As a fun way to play with time-frequency uncertainty, the -t <int> flag shortens the amount of data transformed (and gives worse results!)
if args.t:
data.times = [ti for ti in data.times if ti<args.t]
data.light1_data = data.light1_data[:len(data.times)]
data.light2_data = data.light2_data[:len(data.times)]
### Plot 10 seconds of light1's waveform for reference.
print('\tPlotting 10 seconds of light1 waveform\n')
plt.plot(data.times, data.light1_data) # Plot the waveform in the time domain
plt.gca().set_xlim([0,10]) # Set an axis range for 0,10 seconds on the x-axis
if not args.b: plt.show()
### Perform the fourier transform
freqs_1, ft_1 = FFT(data.times, data.light1_data)
freqs_2, ft_2 = FFT(data.times, data.light2_data)
### Analyze the data to find the fundamental frequency
peaks_1, f0_1 = find_fundamental_frequency(freqs_1, ft_1, args)
peaks_2, f0_2 = find_fundamental_frequency(freqs_2, ft_2, args)
#print(f0_1, f0_2)
period = 1. / abs(f0_1 - f0_2) / 60. # Beat period (in minutes)
print('Analysis complete\n')
print('\tLight 1 was found to have a frequency of {0:.4f} Hz and Light 2 had a frequency of {1:.4f} Hz'.format(f0_1,f0_2))
print('\tThe beat period (time for a full phase-cycle) is {0:2.2f} minutes.\n'.format(period))
return
if __name__=="__main__":
main()
| UTF-8 | Python | false | false | 8,490 | py | 2 | counting-winks.py | 1 | 0.605889 | 0.589164 | 0 | 187 | 44.40107 | 180 |
AmigoCap/PredicTrajec | 9,577,777,082,356 | 24a2b4b6d08cbde2427998774ec241d602e1b82d | 2be2c928215e58018e8f822fb037b8141ce3496b | /ressource python/speedClassification.py | d16153229cb93660abd99ff2a4784337abe6e16c | [] | no_license | https://github.com/AmigoCap/PredicTrajec | 54425a6eeb006060aacfc774763b6c1d603fc7bb | 40f586e29365116aefc40bcdca3a3badacec368c | refs/heads/master | "2021-01-25T13:28:33.668628" | "2018-06-18T15:18:01" | "2018-06-18T15:18:01" | 123,574,278 | 0 | 5 | null | null | null | null | null | null | null | null | null | null | null | null | null | from scipy.cluster.vq import vq,kmeans,whiten
import numpy as np
import distance
def applyKMeans(seg,k):
lV=[]
for ii, v in enumerate(seg['velocity']):
lV.append([float(ii),v])
features = np.array(lV)
whitened = whiten(features)
k=kmeans(whitened,k)
lK=[]
for ii in range(len(k[0])):
lK.append(k[0][ii][0])
return (sorted(lK),whitened)
def getBoundiaries(lK):
lBoundiaries=[0]
for ii in range (len(lK)-1):
lBoundiaries.append((lK[ii]+lK[ii+1])/2)
lBoundiaries.append(1000)
return lBoundiaries
def calcFirstSegmentation(lBoundiaries,whitened,bPadd):
lFirstSpeedSegmentation=[[] for ii in range (len(lBoundiaries)-1)]
for ii in range(len(whitened)):
for jj in range(len(lBoundiaries)-1):
if whitened[ii][0]>=lBoundiaries[jj] and whitened[ii][0]<lBoundiaries[jj+1]:
lFirstSpeedSegmentation[jj].append(whitened[ii][1])
if bPadd:
for kk in range(jj+1,len(lFirstSpeedSegmentation)):
lFirstSpeedSegmentation[kk].append(-1.0)
return lFirstSpeedSegmentation
def calcSpeedTrend(i,lowThreshold=0.2,highThreshold=1.2):
if i < lowThreshold:
return 0
elif i>highThreshold:
return 2
else:
return 1
def setToZero(i):
if i<=0:
return 0
else:
return i
def concacatenateLists(speedTrend,lS):
lSf=[lS[0]]
a=[speedTrend[0]]
for ii in range(0,len(speedTrend)-1):
if speedTrend[ii]==speedTrend[ii+1]:
lSf[-1]=lSf[-1]+list(filter(lambda x : x>=0 ,lS[ii+1]))
else:
lSf.append(list(map(setToZero,lS[ii+1])))
a.append(speedTrend[ii+1])
return lSf,a
def calcMean(lFirstSpeedSegmentation):
return([np.mean(list(filter(lambda x: x > 0, segment))) for segment in lFirstSpeedSegmentation])
def calcMedian(lFirstSpeedSegmentation):
return([np.median(list(filter(lambda x: x > 0, segment))) for segment in lFirstSpeedSegmentation])
def agglomerateSpeedSegments(lFirstSpeedSegmentation,lowThreshold,highThreshold,bMedian):
if bMedian:
lMeans=calcMedian(lFirstSpeedSegmentation)
else:
lMeans=calcMean(lFirstSpeedSegmentation)
speedTrend=[calcSpeedTrend(meanSpeed,lowThreshold,highThreshold) for meanSpeed in lMeans ]
(l,a)=(concacatenateLists(speedTrend,lFirstSpeedSegmentation))
return (l,a)
def initSpeedClass(df) :
size = df['velocity'].size
accelerations = []
for i in range(size - 1):
accelerations.append(-1)
accelerations.append(-1)
return accelerations
def cancelWhithen(lFirstSpeedSegmentation,segment_mouvement):
nwSeg=[]
c=0
for wSeg in lFirstSpeedSegmentation:
nwSeg.append([])
for ele in wSeg:
nwSeg[-1].append(segment_mouvement.velocity[c])
c+=1
return nwSeg
| UTF-8 | Python | false | false | 2,990 | py | 16 | speedClassification.py | 10 | 0.628094 | 0.61204 | 0 | 98 | 28.510204 | 102 |
hmlinaric/AlphaBot | 12,670,153,523,767 | 017a26a548d7dfaff536f8aee8ae0ceeed76242d | 7be748c1f08848314f4caec780515f14df2915fd | /rpi/AlphaBotRestlet.py | 3e6a58215cb74b6bc3e69ff3937a9284c4705023 | [] | no_license | https://github.com/hmlinaric/AlphaBot | e3893f8cbe9bf5e54463fb4534334ca00a921397 | 88c24a051c49ab6012a48e96833a1ae6e3bcc0b4 | refs/heads/master | "2021-01-25T04:48:19.535541" | "2017-06-08T16:41:25" | "2017-06-08T16:41:25" | 93,478,673 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask, request
from flask_restful import Resource, Api
from flask_restful import reqparse
from AlphaBot import AlphaBot
import sys
#import pyhsm
debug = True
app = Flask(__name__)
api = Api(app)
#to awoid CORS problems
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
Ab = AlphaBot()
##
## CLASS RESTlet
##
class AlphaBot_Motor(Resource):
def post(self):
print "POST_ENCODING"
parser = reqparse.RequestParser()
parser.add_argument('left' , required=True, type=int, help='Left speed motor missing!!!')
parser.add_argument('right', required=True, type=int, help='Right speed motor missing!!!')
args = parser.parse_args()
left = args['left']
right = args['right']
if(left > 100):
left=100
elif (left < -100):
left= -100
if(right > 100):
right=100
elif (right < -100):
right= -100
print "Left motor speed::" + str(left)
print "Right motor speed::" + str(right)
Ab.setMotor(left,right)
return {"ret": "OK"}
class AlphaBot_InfraRed(Resource):
def post(self):
dl,dr = Ab.getInfrared()
return {"ret": "OK", "left":dl, "right":dr}
class AlphaBot_lt_calibrate(Resource):
def post(self):
print "Start Calibrate"
for i in range(0,50):
Ab.LT_calibrate()
print "Finish Calibrate"
return {"ret": "OK"}
class AlphaBot_lt_read(Resource):
def post(self):
ret_val = Ab.LT_readLine()
return {"ret": "OK", "value":ret_val}
class AlphaBot_motorcount(Resource):
def post(self):
cleft,cright = Ab.USD_GetSpeedCounter()
return {"ret": "OK", "left":cleft, "right":cright}
api.add_resource(AlphaBot_Motor, '/AlphaBot/motor')
api.add_resource(AlphaBot_InfraRed, '/AlphaBot/infrared')
api.add_resource(AlphaBot_lt_calibrate, '/AlphaBot/lt_calibrate')
api.add_resource(AlphaBot_lt_read, '/AlphaBot/lt_read')
api.add_resource(AlphaBot_motorcount, '/AlphaBot/motorcount')
if __name__ == '__main__':
Ab.stop()
app.run(host="192.168.2.141",port=9999, debug=debug)
| UTF-8 | Python | false | false | 2,227 | py | 4 | AlphaBotRestlet.py | 3 | 0.680287 | 0.661877 | 0 | 88 | 24.261364 | 92 |
jmny-cell/School | 14,980,845,949,246 | 6596d6757287d23699def25e7db1be631ae3bf53 | 58a961432d93c042e31c2b3992be9bb42ea2e4b9 | /COMETPY (Computational Methods in Physics)/Project/MANAY_COMETPY_PROJECT.py | fc0d12e63cf82c2d660c31aa21dd705c27d32c04 | [] | no_license | https://github.com/jmny-cell/School | ebab0df69ad14b0bf9fd3335870b032a023f1375 | ec18011216df7f78556d2d4d6faf4afe743678ec | refs/heads/master | "2020-12-27T06:41:40.654989" | "2020-02-03T01:55:47" | "2020-02-03T01:55:47" | 237,798,289 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import random
import matplotlib.pyplot as plt
# Initialize values
m_init = 100 # initial wealth
num_agent = 500 # number of agents
num_transact = 10000 # number of transactions
num_sim = 1000 # number of Monte Carlo simulations
save_rate_list = [0.25, 0.50, 0.9]
# NOTE: So that the program runs decently quickly, num_sim = 1000 and num_transact = 10000, which
# will only approximate the equilibrium distribution. Larger values will make the curve smoother, but
# will result in the same shape.
# Define function that runs each simulation
def all_simulate(m_init, num_agent, num_transact, num_sim, save_rate):
agent_money = np.zeros(num_agent) # amount of money per agent
wealth_distrib = np.zeros(num_agent) # wealth distribution
for i in range(num_sim):
agent_money.fill(m_init) # same amount of initial wealth m_init per agent
agent_money = simulate_trade(num_agent, num_transact, agent_money, save_rate)
wealth_distrib += np.sort(agent_money) # cumulative wealth distribution after simulation
average_wealth_distrib = wealth_distrib / num_sim # compute average wealth distribution
return average_wealth_distrib
# Define function simulating trading between agents
def simulate_trade(num_agent, num_transact, agent_money, save_rate):
for i in range(num_transact):
agent_i = 1
agent_j = 1
while agent_i == agent_j: # Prevent agent from trading with himself
agent_i = random.randint(0, num_agent - 1)
agent_j = random.randint(0, num_agent - 1)
m_i = agent_money[agent_i]
m_j = agent_money[agent_j]
epsilon = random.random() # Choose from uniform (0, 1) distribution
# The following three are based on formulas in the problem
delta_m = (1 - save_rate) * (epsilon * m_j - (1 - epsilon) * m_i)
agent_money[agent_i] = m_i + delta_m
agent_money[agent_j] = m_j - delta_m
return agent_money
# Plot histogram and semilog plot when lambda = 0
result = all_simulate(m_init, num_agent, num_transact, num_sim, 0)
result = [money if money > 0.01 else 0.01 for money in result] # Replace all small values with 0.01 to smooth out log function
bin_size = 20 # Set bin size
num_bins = int(max(result) / bin_size) # Compute for number of bins
result_hist, bin_edges = np.histogram(result, bins = num_bins) # Get bin edges of histogram
bin_centers = 0.5 * (bin_edges[1:] + bin_edges[:-1]) # Center each bin
fig = plt.figure(figsize=(10,7))
ax1 = fig.add_subplot(1, 1, 1)
ax1.plot(bin_centers, result_hist / num_agent)
ax1.set_title("Wealth Distribution, $\\lambda = 0$")
ax1.set_xlabel("wealth")
ax1.set_ylabel("number of agents")
mean_wealth = np.mean(result)
beta = 1 / mean_wealth
fig = plt.figure(figsize=(10,7))
ax2 = fig.add_subplot(1, 1, 1)
ax2.semilogy(bin_centers, result_hist / num_agent, label = "log of wealth distribution")
ax2.semilogy(bin_centers, beta * np.exp(-beta * bin_centers), label = "Gibbs distribution")
ax2.set_title("Log of Wealth Distribution, $\\lambda = 0$ and Gibbs Distribution")
ax2.set_xlabel("wealth")
ax2.set_ylabel("log of number of agents")
ax2.set_xlim(0, 300) # Limit x-values to part where plot is linear
ax2.legend()
for rate in save_rate_list:
result = all_simulate(m_init, num_agent, num_transact, num_sim, rate)
result = [money if money > 0.01 else 0.01 for money in result] # Replace all small values with 0.01 to smooth out log function
bin_size = 20 # Set bin size
num_bins = int(max(result) / bin_size) # Compute for number of bins
result_hist, bin_edges = np.histogram(result, bins = num_bins) # Get bin edges of histogram
bin_centers = 0.5 * (bin_edges[1:] + bin_edges[:-1]) # Center each bin
fig = plt.figure(3, figsize=(10,7))
ax3 = fig.add_subplot(1, 1, 1)
ax3.plot(bin_centers, result_hist / num_agent, label = "$\lambda = %s$" % rate)
ax3.set_title("Wealth Distribution")
ax3.set_xlabel("wealth")
ax3.set_ylabel("number of agents")
mean_wealth = np.mean(result)
beta = 1 / mean_wealth
fig = plt.figure(4, figsize=(10,7))
ax4 = fig.add_subplot(1, 1, 1)
ax4.semilogy(bin_centers, result_hist / num_agent, label = "log of wealth distribution, $\lambda = %s$" % rate)
ax4.semilogy(bin_centers, beta * np.exp(-beta * bin_centers), label = "Gibbs distribution, $\lambda = %s$" % rate)
ax4.set_title("Log of Wealth Distribution and Gibbs Distribution")
ax4.set_xlabel("wealth")
ax4.set_ylabel("log of number of agents")
ax4.set_xlim(0, 300) # Limit x-values to part where plot is linear
ax3.legend()
ax4.legend()
"""
COMMENTS:
1) When we do not factor in saving (lambda = 0), the linear part of the semilog plot resembles
that of a Gibbs distribution with some constant added to it.
2) More income equality associated with a higher saving parameter lambda
3) When we factor in saving, the equilibrium income distributions do not resemble a Gibbs distribution
anymore. Perhaps a more conventional distribution for income distribution (e.g., Pareto) may be more
appropriate?
""" | UTF-8 | Python | false | false | 5,363 | py | 79 | MANAY_COMETPY_PROJECT.py | 17 | 0.656536 | 0.629871 | 0 | 118 | 44.457627 | 131 |
jlgrady1/mediamanager | 1,537,598,311,956 | ea3f34727509e3bb5a1dfc0cf3bbe629810e783c | 9dd97bf7f5bab93211772942eb19048935ba0146 | /sorter/migrations/0001_initial.py | 494530b3c7c2c02974f119d66b3bbeaf0a1476ef | [] | no_license | https://github.com/jlgrady1/mediamanager | 8c716885b831a63d64e57a05fab3335340a536ae | d92cb46aa0a1e78b9bb4bc0f43342f351e51c9d2 | refs/heads/master | "2020-05-24T14:03:32.584805" | "2015-03-20T14:45:58" | "2015-03-20T14:45:58" | 10,370,721 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Type'
db.create_table(u'sorter_type', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'sorter', ['Type'])
# Adding model 'Status'
db.create_table(u'sorter_status', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'sorter', ['Status'])
# Adding model 'MediaFile'
db.create_table(u'sorter_mediafile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sorter.Type'])),
('status', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sorter.Status'])),
('filepath', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal(u'sorter', ['MediaFile'])
# Adding model 'Action'
db.create_table(u'sorter_action', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('date_started', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('date_completed', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('mediafile', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sorter.MediaFile'])),
('command', self.gf('django.db.models.fields.CharField')(max_length=255)),
('completion', self.gf('django.db.models.fields.IntegerField')()),
('description', self.gf('django.db.models.fields.CharField')(max_length=255)),
('failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('acknowledged', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'sorter', ['Action'])
# Adding model 'MediaFolder'
db.create_table(u'sorter_mediafolder', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('folder', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sorter.MediaFolder'], null=True)),
))
db.send_create_signal(u'sorter', ['MediaFolder'])
# Adding model 'Configuration'
db.create_table(u'sorter_configuration', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
('type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sorter.Type'])),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'sorter', ['Configuration'])
def backwards(self, orm):
# Deleting model 'Type'
db.delete_table(u'sorter_type')
# Deleting model 'Status'
db.delete_table(u'sorter_status')
# Deleting model 'MediaFile'
db.delete_table(u'sorter_mediafile')
# Deleting model 'Action'
db.delete_table(u'sorter_action')
# Deleting model 'MediaFolder'
db.delete_table(u'sorter_mediafolder')
# Deleting model 'Configuration'
db.delete_table(u'sorter_configuration')
models = {
u'sorter.action': {
'Meta': {'object_name': 'Action'},
'acknowledged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'completion': ('django.db.models.fields.IntegerField', [], {}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mediafile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sorter.MediaFile']"})
},
u'sorter.configuration': {
'Meta': {'object_name': 'Configuration'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sorter.Type']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'sorter.mediafile': {
'Meta': {'object_name': 'MediaFile'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'filepath': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sorter.Status']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sorter.Type']"})
},
u'sorter.mediafolder': {
'Meta': {'object_name': 'MediaFolder'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sorter.MediaFolder']", 'null': 'True'})
},
u'sorter.status': {
'Meta': {'object_name': 'Status'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'sorter.type': {
'Meta': {'object_name': 'Type'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['sorter'] | UTF-8 | Python | false | false | 9,566 | py | 20 | 0001_initial.py | 9 | 0.583839 | 0.578612 | 0 | 157 | 59.936306 | 126 |
gabriel1898/auladeprogramacao | 4,363,686,802,840 | f3a535a4e6cfe8b6add36af9d62b4161acd2b456 | dc32ede9f7d9e81a5636655fec4107d37cae9c05 | /estruturaDecisao/exercicio11.py | 6e3ce78ff974ac2fb31c584f49b8b8375f859a40 | [] | no_license | https://github.com/gabriel1898/auladeprogramacao | d4abbfd296fec9b21d69ba4bfc44c530a7165faf | 743261defce20c9dfc0d9bbeaf2a8acdc0e0c2b5 | refs/heads/master | "2020-10-01T04:34:07.247053" | "2020-01-16T21:04:48" | "2020-01-16T21:04:48" | 227,456,434 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def exercicio11():
s = 750
if (700 < s < 1500):
a = s*1/10
print (a)
print ("10%")
print ("750")
print ("825") | UTF-8 | Python | false | false | 128 | py | 74 | exercicio11.py | 74 | 0.484375 | 0.304688 | 0 | 8 | 15.125 | 22 |
cbporch/perceptron | 12,197,707,141,339 | 806ba17b9a5add18daa95f17690235a79d475215 | c4d33fb436d951a2317d4500ffb2ddd104d3e7ca | /perceptron.py | 477b13bddcb021b4bb8060f5f9d25d96c38a69a8 | [
"MIT"
] | permissive | https://github.com/cbporch/perceptron | b71587513752abd01ad219361f415402fca17aad | f90379038ef5a2b7b37c3a3fca5188f63da6d426 | refs/heads/master | "2021-01-22T01:51:29.094071" | "2017-02-05T19:03:53" | "2017-02-05T19:03:53" | 81,014,776 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Implementation of a Perceptron Learning Algorithm, that can be abstracted to
various input sizes (NUM) or dimensions (DIM). Displays using pyplot.
"""
from matplotlib import pyplot as plt
import numpy as np
def f(x): # Target Function
return 0.5 * x + 1.25
NUM = 20
DIM = 3
PLOT = True if DIM == 3 else PLOT = False
w = np.random.rand(DIM) * 5
matrix = np.random.rand(NUM, DIM) * 5
y = [1] * NUM
for i in range(NUM):
matrix[i][0] = 1
if (f(matrix[i][1])) > matrix[i][2]:
y[i] = -1
if PLOT:
for x in range(NUM):
if y[x] == 1:
plt.plot(matrix[x][1], matrix[x][2], 'Dg')
else:
plt.plot(matrix[x][1], matrix[x][2], 'ro')
plt.plot([0,5], [f(0), f(5)], label='F') # plot Target function f(x)
plt.ylabel('Y Axis')
plt.xlabel('X Axis')
def next_w(w, y, x):
r = []
for i in range(DIM):
r.append(w[i] + y*x[i])
return r
def sign(k):
return 1 if float(k) > 0 else -1
def perceptron(w, x):
return sign(sum(w[i] * x[i] for i in range(DIM)))
def check(y=y, m=matrix):
for n in range(NUM):
if y[n] != perceptron(w, m[n]):
return n
return -1
t = 0
c = True
while c:
n = check()
c = False if n == -1 else w = next_w(w, y[n], matrix[n])
t += 1
print("t: {0}, w: {1}".format(t,w))
def g(x):
return ((-w[1]/w[2]) * x) + (-w[0]/w[2])
if PLOT:
plt.plot([0, 5], [g(0), g(5)])
plt.show()
| UTF-8 | Python | false | false | 1,442 | py | 1 | perceptron.py | 1 | 0.531207 | 0.501387 | 0 | 69 | 19.898551 | 76 |
shamssami/Selenium-Tutorial | 6,416,681,174,773 | 491d37736c0f705f21924b1fe73c96a65044735a | 92a7a80336e4c0547f68ab8f4071ccafd03360fe | /Tests/test_table_search.py | 5fee79cd11a73d85be6aea9eb69d1c12383dc313 | [] | no_license | https://github.com/shamssami/Selenium-Tutorial | 07e4626ba762ee40be377cfcd6a6318d51cbc353 | 13e8ad1e73def3f03ef93e6f40921e2935b0e920 | refs/heads/main | "2023-06-26T01:29:56.943819" | "2021-07-31T20:59:11" | "2021-07-31T20:59:11" | 384,167,996 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import allure
from allure_commons.types import AttachmentType
from selenium import webdriver
from Pages.table_filter_search import TableSearch
from Utils.Logger import Logging
from Utils.locators import TableSearchLocators
from Utils.test_data import TableSearchData
import time
@allure.severity(allure.severity_level.NORMAL)
class TestTableSearch:
logger = Logging.loggen()
driver = webdriver.Chrome(executable_path="C:\\selenium\\chromedriver_win32\\chromedriver.exe")
obj = TableSearch(driver)
@allure.severity(allure.severity_level.BLOCKER)
def test_table_search(self):
self.logger.info("*************** Test_001_Table_Search *****************")
self.logger.info("*************** Test Table Search Started *****************")
self.obj.open(TableSearchLocators.TableSearchUrl)
time.sleep(3)
# test form
self.obj.filter_table(TableSearchData.get_task())
time.sleep(2)
self.obj.clear_text_field()
self.obj.filter_table(TableSearchData.get_assignee())
time.sleep(2)
self.obj.clear_text_field()
self.obj.filter_table(TableSearchData.get_status())
time.sleep(2)
self.obj.clear_text_field()
time.sleep(3)
self.logger.info("*************** Test Table Search Finished *****************")
self.driver.save_screenshot(".\\screenshots\\" + "table_search.png")
allure.attach(self.driver.get_screenshot_as_png(), name="table_search", attachment_type=AttachmentType.PNG)
@allure.severity(allure.severity_level.BLOCKER)
def test_table_filter(self):
self.logger.info("*************** Test_002_Table_Filter *****************")
self.logger.info("*************** Test Table Filter Activation Started *****************")
time.sleep(3)
# check filter icon activation
disabled_value = self.obj.is_enabled_filter()
self.obj.click_filter_button()
enabled_value = self.obj.is_enabled_filter()
time.sleep(2)
# filter table should be disabled before clicking on filter icon
if disabled_value is False:
self.logger.info("Passed, Filter Icon Is Disabled")
else:
self.logger.error("Test Failed, The field should be disabled")
# filter table should be enabled after clicking on filter icon
if enabled_value is True:
self.logger.info("Passed, Filter Icon Is Enabled")
else:
self.logger.error("Test Failed, The field should be enabled")
# close browser
self.logger.info("*************** Test Table Filter Activation Buttons Finished *****************")
self.driver.save_screenshot(".\\screenshots\\" + "table_filter.png")
allure.attach(self.driver.get_screenshot_as_png(), name="table_filter",
attachment_type=AttachmentType.PNG)
self.driver.close()
# pytest -v -s --alluredir=".\AllureReports\TableSearch" Tests\test_table_search.py
# pytest -v --html=PytestReports\table_search.html Tests\test_table_search.py
| UTF-8 | Python | false | false | 3,103 | py | 96 | test_table_search.py | 25 | 0.63068 | 0.625846 | 0 | 78 | 38.782051 | 115 |
bgoonz/UsefulResourceRepo2.0 | 10,943,576,716,620 | 9b588d87826e6839b482b2cecefa13827bfc38ce | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_MY_ORGS/Web-Dev-Collaborative/blog-research/Data-Structures/HTML-only/DS-and-Algorithms-Prac/DS-n-Algos/Arrays/python/sum-avg/avg.py | 3dd28b1c461c41ab11ba562de496378c512c4459 | [
"MIT"
] | permissive | https://github.com/bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | "2023-03-17T01:22:05.254751" | "2022-08-11T03:18:22" | "2022-08-11T03:18:22" | 382,628,698 | 10 | 12 | MIT | false | "2022-10-10T14:13:54" | "2021-07-03T13:58:52" | "2022-08-23T18:47:59" | "2022-08-11T03:18:48" | 10,239,100 | 5 | 11 | 2 | null | false | false | # average of sum of lists
m = [1,43,656,8,54,908,4,5,23,78,435,89,45,476,89]
n = [234,56,90,675,56,786,90,564,8,657,87,64,354,2,75]
q = [34,76,76,564,34,32,16,67,25,98,90,345,235,64,134,76]
def avgSums(a,b,c):
summingUp = sum(a) + sum(b) + sum(c)
summed = summingUp / 3
return(summed)
print(avgSums(m,n,q))
# [Running] python -u "c:\0-a-A-October\00-weeks\08-my-website\Stable\Public\2-content\Data-Structures\DS-and-Algorithms-Prac\DS-n-Algos\Arrays\python\sum-avg\avg.py"
# 2892.6666666666665
#
# [Done] exited with code=0 in 0.186 seconds
| UTF-8 | Python | false | false | 546 | py | 107,966 | avg.py | 49,173 | 0.686813 | 0.450549 | 0 | 13 | 41 | 166 |
mkm-dz/punchoutAI | 549,755,816,363 | b3bce94483161fb77726e3b82191282caf2cdd02 | 5715d31e23089cb0bab4f2544205ad2a030d69a6 | /punchout-ai/classes/punchUtils.py | c71a5c2cd45ba15b4c4432e42b4d20ba7a84bbae | [
"MIT"
] | permissive | https://github.com/mkm-dz/punchoutAI | 4a5f6d5ff2984e137f4943f9da787b65d3845f1c | ab6cc41ffdc2011aab007a457daf9f9521391183 | refs/heads/master | "2023-03-04T05:47:17.970209" | "2023-02-24T17:49:28" | "2023-02-24T17:49:28" | 158,609,266 | 2 | 0 | MIT | false | "2023-02-24T17:49:29" | "2018-11-21T21:39:51" | "2022-02-13T02:54:42" | "2023-02-24T17:49:28" | 117 | 1 | 0 | 8 | Python | false | false | from classes.bizhawkClient import BizHawkClient
from gym import error, spaces
import numpy as np
from tensorflow.keras.utils import to_categorical
class punchUtils():
def __init__(self):
# We have an output space of 60 possible actions (moves)
# we map each one to a controller action
# First digit = timin 0-Low 1-Medium 2-High
# second digit = buttons 0-None 1-A 2-B 3-Start
# third digit = direction 0-None 1-Up 2-Right 3-Down 4-Left
self.actionMap={}
self.actionMap[0] = '000'
self.actionMap[1] = '001'
self.actionMap[2] = '002'
self.actionMap[3] = '003'
self.actionMap[4] = '004'
self.actionMap[5] = '010'
self.actionMap[6] = '011'
self.actionMap[7] = '012'
self.actionMap[8] = '013'
self.actionMap[9] = '014'
self.actionMap[10] = '020'
self.actionMap[11] = '021'
self.actionMap[12] = '022'
self.actionMap[13] = '023'
self.actionMap[14] = '024'
self.actionMap[15] = '030'
self.actionMap[16] = '031'
self.actionMap[17] = '032'
self.actionMap[18] = '033'
self.actionMap[19] = '034'
self.actionMap[20] = '100'
self.actionMap[21] = '101'
self.actionMap[22] = '102'
self.actionMap[23] = '103'
self.actionMap[24] = '104'
self.actionMap[25] = '110'
self.actionMap[26] = '111'
self.actionMap[27] = '112'
self.actionMap[28] = '113'
self.actionMap[29] = '114'
self.actionMap[30] = '120'
self.actionMap[31] = '121'
self.actionMap[32] = '122'
self.actionMap[33] = '123'
self.actionMap[34] = '124'
self.actionMap[35] = '130'
self.actionMap[36] = '131'
self.actionMap[37] = '132'
self.actionMap[38] = '133'
self.actionMap[39] = '134'
self.actionMap[40] = '200'
self.actionMap[41] = '201'
self.actionMap[42] = '202'
self.actionMap[43] = '203'
self.actionMap[44] = '204'
self.actionMap[45] = '210'
self.actionMap[46] = '211'
self.actionMap[47] = '212'
self.actionMap[48] = '213'
self.actionMap[49] = '214'
self.actionMap[50] = '220'
self.actionMap[51] = '221'
self.actionMap[52] = '222'
self.actionMap[53] = '223'
self.actionMap[54] = '224'
self.actionMap[55] = '230'
self.actionMap[56] = '231'
self.actionMap[57] = '232'
self.actionMap[58] = '233'
self.actionMap[59] = '234'
def sendCommand(self, command: str, buttons=None):
client = BizHawkClient()
if(buttons == None):
client.buttons = self.SetButtons(
False, False, False, False, False, False, False,'Low')
else:
client.buttons = buttons
client.Send(command)
def SetButtons(self, up, down, left, right, a, b, start, timing):
buttons = {'Up': up,
'Down': down,
'Left': left,
'Right': right,
'A': a,
'B': b,
'Start': start,
'Timing': timing}
return buttons
def castAgentActionToEmuAction(self, agentAction):
tempCommands = self.SetButtons(False,False,False,False,False,False,False,'Low')
if(agentAction[0] == 1):
tempCommands['Timing'] = 'Medium'
elif(agentAction[0] == 2):
tempCommands['Timing'] = 'High'
if(agentAction[1] == 1):
tempCommands['A'] = True
elif(agentAction[1] == 2):
tempCommands['B'] = True
elif(agentAction[1] == 3):
tempCommands['Start'] = True
if(agentAction[2] == 1):
tempCommands['Up'] = True
elif(agentAction[2] == 2):
tempCommands['Right'] = True
elif(agentAction[2] == 3):
tempCommands['Down'] = True
elif(agentAction[2] == 4):
tempCommands['Left'] = True
return tempCommands
def castEmuStateToObservation(self, state, state_shape):
castedSpaces = spaces.Dict({
'opponent_id': state.p2['character'],
'opponent_action': state.p2['action'],
'opponentTimer': state.p2['actionTimer'],
'hearts': state.p1['hearts'],
'stars': state.p1['stars'],
'blinkingPink': state.p1['blinkingPink'],
'bersekerAction': state.p1['bersekerAction']
})
# Each observation will be represented as a keras categorical value: a n
# bits number with a single "1" that represents the category, where n is
# the length of the dimension as specified in the spaces. We then flatten
# the result array to get a single binary string that represents the full
# state.
result_array = []
for item in castedSpaces.spaces.keys():
classes = state_shape.spaces[item]
result_array.append(to_categorical(np.unique(castedSpaces.spaces[item])[0], num_classes = np.unique(classes)[0].n, dtype ="int32"))
flattened_spaces = [item for sublist in result_array for item in sublist]
return flattened_spaces
def calculateActionFromIndex(self, index):
result ={}
semiAction=self.actionMap[index]
result[0]=int(semiAction[0])
result[1]=int(semiAction[1])
result[2]=int(semiAction[2])
return result
| UTF-8 | Python | false | false | 5,554 | py | 12 | punchUtils.py | 8 | 0.548614 | 0.487396 | 0 | 152 | 35.539474 | 143 |
open222333/PythonCode | 13,013,750,923,112 | a823b10f55b6135d08adb09a061d7b4407bf4f77 | 3ff6b318db4d0fdda60c763f475b7dafa4f0c9ba | /Kingreturn_Algorithm/ch19-Meeting/ch19-8.py | 702501e7d56ffb4ac2d8a24850a43ecfb5e5bc4c | [] | no_license | https://github.com/open222333/PythonCode | 493124dcbbb4d4ab18be3090bc35224e20959780 | b9baffd2598b2b1e131fdd6bb38536c685b633e5 | refs/heads/master | "2023-08-28T14:51:52.004948" | "2023-06-20T15:45:05" | "2023-06-20T15:45:05" | 356,302,028 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # ๆ้็คฆๅ้ก ๅๆ
่ฆๅๆผ็ฎๆณ
def gold(W, wt, val):
'''ๅๆ
่ฆๅๆผ็ฎๆณ'''
n = len(val)
table = [[0 for x in range(W + 1)] for x in range(n + 1)] # ๆๅๅ่กจๆ ผ
for r in range(n + 1): # ๅกซๅ
ฅ่กจๆ ผrow
for c in range(W + 1): # ๅกซๅ
ฅ่กจๆ ผcolumn
if r == 0 or c == 0:
table[r][c] = 0
elif wt[r - 1] <= c:
table[r][c] = max(val[r - 1] + table[r - 1][c - wt[r - 1]], table[r - 1][c])
else:
table[r][c] = table[r - 1][c]
return table[n][W]
value = [10, 16, 20, 22, 25] # ้็คฆ็ขๅผ
weight = [3, 4, 3, 5, 5] # ๅฎ้
้็คฆๆ้ไบบๅ
gold_weight = 10 # ็ธฝไบบๅ
print('ๆๅคง็ขๅผ = {}ๅ
ฌๆค'.format(gold(gold_weight, weight, value)))
| UTF-8 | Python | false | false | 774 | py | 1,002 | ch19-8.py | 926 | 0.458084 | 0.411677 | 0 | 21 | 30.809524 | 92 |
staguchi0703/ABC166 | 4,209,067,969,456 | 7a7cf7f5834c6f16068c89242fc8c3d5021ba210 | ad0af348da7b97f21559697103bbf242db526b86 | /E/resolve.py | 27d63309082f85774dddd484c57c2b634dfd005c | [
"MIT"
] | permissive | https://github.com/staguchi0703/ABC166 | ef923346c887e96d7e7a8739f4dad7cc3f7f9404 | 8913a37525d0ecdd58523117bd9a61f89a781bde | refs/heads/master | "2022-05-27T20:21:16.164872" | "2020-05-03T13:45:14" | "2020-05-03T13:45:14" | 260,901,341 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def resolve():
'''
code here
'''
N = int(input())
A_list = [int(item) for item in input().split()]
dp = [0 for _ in range(N+1)]
if __name__ == "__main__":
resolve()
| UTF-8 | Python | false | false | 195 | py | 4 | resolve.py | 4 | 0.461538 | 0.451282 | 0 | 11 | 16.727273 | 52 |
ArhinZi/Rubina | 16,071,767,639,096 | f7ba7ed3837176d4729337872d4eec9ebd968136 | e01bdfb8cf82550c36db97af33ad3d08e084169a | /adm/admin.py | e357746543b678e04a5fdf29e5a48d98f6136555 | [] | no_license | https://github.com/ArhinZi/Rubina | e4862f67bab7c6aafecddd61d7a8c445dd516c7b | 89c69d0de010ee105503f8d3ecd24b563ce5fc22 | refs/heads/master | "2018-10-11T23:48:40.762153" | "2018-06-14T15:06:47" | "2018-06-14T15:06:47" | 121,873,359 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Video
from .models import News
from .models import Static
from .models import Recalls
admin.site.register(Video)
admin.site.register(News)
admin.site.register(Static)
admin.site.register(Recalls)
| UTF-8 | Python | false | false | 280 | py | 18 | admin.py | 11 | 0.807143 | 0.807143 | 0 | 12 | 22.333333 | 32 |
TheChouzanOne/ProjectEuler | 6,846,177,877,255 | 4cd012b7c8e80c68b1536bc6a811fab9b8b8dc28 | 5e6bdead9022b93f0b050358239a92790d5040be | /P015/solution.py | 2575118ed1c395ab9b86bbfbd7728e6b787e51cc | [] | no_license | https://github.com/TheChouzanOne/ProjectEuler | d1e5603e466b2e7275e94e0f01d8f5d30a567ef0 | 11aac516616bd00080925eaab90057a961fe28d2 | refs/heads/master | "2020-05-16T00:02:26.025339" | "2019-04-22T09:10:11" | "2019-04-22T09:10:11" | 182,569,309 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from time import time
t=time()
ans = 1
for i in range(21,41):
ans *= i
for i in range(1,21):
ans /= i
print(ans)
print("Time: %s"%(time()-t)) | UTF-8 | Python | false | false | 151 | py | 26 | solution.py | 22 | 0.576159 | 0.523179 | 0 | 11 | 12.818182 | 28 |
Aasthaengg/IBMdataset | 18,923,625,922,208 | 1b34e2e65d058a1ce8825b747d700f4757090ea7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02416/s574642041.py | 9143d753a53fc50a3b0a02446871b1cf3ddffbca | [] | no_license | https://github.com/Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | "2023-04-22T10:22:44.763102" | "2021-05-13T17:27:22" | "2021-05-13T17:27:22" | 367,112,348 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | go = 1
while go == 1:
a = str(input())
alist = list(a)
if a == "0":
go = 0
exit
sum = 0
for i in alist:
i = int(i)
sum += i
if go == 1:
print(sum)
| UTF-8 | Python | false | false | 211 | py | 202,060 | s574642041.py | 202,055 | 0.369668 | 0.341232 | 0 | 13 | 15.230769 | 20 |
zzmjohn/deep_trading_notebook | 6,536,940,252,687 | b97ced9e054f61b1f264c7611f1e443710e2b1d6 | ea1ff148253a759c57fa970d758c5e21d86318ea | /pandas_ta.py | cd2a8f551606dd90b685755b73a65df392ca2124 | [
"MIT"
] | permissive | https://github.com/zzmjohn/deep_trading_notebook | 525ebab350297e9d278aaca755aca7d3bd3ee572 | 95d82cb399df57fe17e80527c21d9e3c1a2af015 | refs/heads/master | "2021-08-15T07:57:17.069207" | "2017-11-17T09:24:57" | "2017-11-17T09:24:57" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
def EMA(df, n, column='Close'):
"""
Exponential Moving Average
"""
result=pd.Series(df[column].ewm(span=n,min_periods=n - 1,adjust=True,ignore_na=False).mean(), name='EMA_' + str(n))
return df.join(result)
| UTF-8 | Python | false | false | 248 | py | 13 | pandas_ta.py | 7 | 0.637097 | 0.633065 | 0 | 8 | 29.875 | 119 |
dBounde13/Hangman | 16,355,235,471,349 | 0c32ae56a1ae8a4f97d6db3347a213ae4f6a9e62 | 7c699473b65883cf25dd7bba5a5b2425dd526640 | /Hangman/task/hangman/hangman.py | ada340a949354a66ca76e2df8ee4d570551d0c2d | [] | no_license | https://github.com/dBounde13/Hangman | d9488b9f7dea721fedee1daa67e8f2b49ea649e1 | 861c38cdd8f9a90fb02fb98eeafec3be5a635173 | refs/heads/master | "2023-06-15T19:35:55.459088" | "2021-07-13T10:07:44" | "2021-07-13T10:07:44" | 385,559,065 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
print('H A N G M A N\n')
user_type = input(f'Type "play" to play the game, "exit" to quit: ')
print()
while user_type == 'exit':
break
words_list = ['python', 'java', 'kotlin', 'javascript']
word = list(random.choice(words_list))
guesses = list('-' * len(word))
print(''.join(guesses))
count = 8
tried_letter = []
while count > 0:
print()
print(''.join(guesses))
user_letter = input('Input a letter: ')
if len(user_letter) != 1:
print("You should input a single letter")
tried_letter.append(user_letter)
continue
elif not user_letter.islower() or not user_letter.isalpha():
print("Please enter a lowercase English letter")
tried_letter.append(user_letter)
continue
elif user_letter in guesses or user_letter in tried_letter:
print("You've already guessed this letter")
continue
elif user_letter in word and user_letter not in guesses:
for i in range(len(word)):
if user_letter == word[i]:
guesses[i] = user_letter
if "-" not in guesses:
print("You guessed the word!")
print("You survived!")
exit()
else:
print("That letter doesn't appear in the word")
tried_letter.append(user_letter)
count -= 1
if count == 0:
print("You lost!")
| UTF-8 | Python | false | false | 1,403 | py | 24 | hangman.py | 12 | 0.578047 | 0.574483 | 0 | 44 | 30.886364 | 68 |
Ericonaldo/pysc2_minimap_agents | 5,815,385,767,698 | e654d1fd21076fe7338e599c13fca3077d0225c6 | 1af3088cfc2aca00e497722ba63c50bd5ff20bfe | /build_marines_agent_v3-dqn.py | 995aea39a97f06069ee1889cb0aba10c3bda1c32 | [] | no_license | https://github.com/Ericonaldo/pysc2_minimap_agents | e052886382f1f94621880b9f41eadec877ed13fc | 9e33f371bdfa2786ee492eb886f49eee01dedf48 | refs/heads/master | "2020-03-26T19:40:39.953087" | "2018-08-19T07:50:57" | "2018-08-19T07:50:57" | 145,279,578 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Chinese is noted by talor, eric
import random
import math
import os.path
import numpy as np
import pandas as pd
import tensorflow as tf
import collections
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_NO_OP = actions.FUNCTIONS.no_op.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_BUILD_SUPPLY_DEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id
_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id
_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id
_HARVEST_GATHER = actions.FUNCTIONS.Harvest_Gather_screen.id
_TRAIN_SCV = actions.FUNCTIONS.Train_SCV_quick.id
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
_PLAYER_ID = features.SCREEN_FEATURES.player_id.index
_PLAYER_SELF = 1
_PLAYER_HOSTILE = 4
_ARMY_SUPPLY = 5
_SCV_SUPPLY = 6
_PLAYER_BACKGROUND = 0
# added by taylor
_COLLECTED_MINERALS = 8 # ๅฝๅๆถ้ๅฐ็ๆฐดๆถๆฐ้
_FOOD_USED = 3 # ๅฝๅไบบๅฃ
_FOOD_CAP = 4 # ๅฝๅไบบๅฃไธ้
_TERRAN_COMMANDCENTER = 18
_TERRAN_SCV = 45
_TERRAN_SUPPLY_DEPOT = 19
_TERRAN_BARRACKS = 21
_NEUTRAL_MINERAL_FIELD = 341 # for detecting the location of mineral patches
_NOT_QUEUED = [0] # ็ฐๅจๆง่ก
_QUEUED = [1] # ๅปถ่ฟๆง่ก
_SELECT_ALL = [2]
DATA_FILE = 'sparse_agent_data_dqn_v3'
LOG_DIR = 'logs/'
WEIGHT_DIR = 'weights/'
ACTION_DO_NOTHING = 0 # 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 1 # 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 2 # 'buildbarracks'
ACTION_BUILD_MARINE = 3 # 'buildmarine'
ACTION_BUILD_SCV = 4 # 'buildscv'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
ACTION_BUILD_SCV,
]
MAX_SCV_NUM = 17 # ๆๅคง็SCVๆฐ้
POPULATION_THRESHOLD_ENABLE_BUILD_SUPPLY_DEPOT = 15 # ๅฝๅฏ็จไบบๅฃๆฐไฝไบ้ๅผๆถ๏ผๅปบ็ซ่กฅ็ป็ซ็ๅจไฝๆๆ
# added by eric
STATE = np.zeros(7) # ็ถๆๆฏ7็ปด
MEMORY_SIZE = 10000 # ่ฎฐๅฟๅบ
BATCH_SIZE = 32
UPDATE_PERIOD = 200 # ๆดๆฐ้ข็
DECAY_EPS = 200 # epsilon decay้ข็
class DeepQNetwork():
def __init__(self, sess=None, gamma = 0.8, epsilon=0.9):
self.gamma = gamma
self.epsilon = epsilon
self.action_dim = len(smart_actions)
self.state_dim = len(STATE)
self.network()
self.step = tf.Variable(0, trainable=False)
self.sess = sess
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
if os.path.isdir(DATA_FILE+'/'+WEIGHT_DIR):
self.saver.restore(self.sess, tf.train.latest_checkpoint(DATA_FILE+'/'+WEIGHT_DIR))
self.summary_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(DATA_FILE+'/'+LOG_DIR, graph=sess.graph)
self.summary_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), sess.run(self.step))
def net_frame(self, scope, collections_name, num_actions, inputs):
"basic net frame"
weights_init = tf.truncated_normal_initializer(0, 0.3)
bias_init = tf.constant_initializer(0.1)
with tf.variable_scope(scope):
with tf.variable_scope("layer1"):
weights1 = tf.get_variable(name = "weights", dtype=tf.float32, shape=[self.state_dim, 64], initializer=weights_init, collections=collections_name)
bias1 = tf.get_variable(name = "bias", dtype=tf.float32, shape=[64], initializer=bias_init, collections=collections_name)
wx_b = tf.matmul(inputs, weights1) + bias1
h1 = tf.nn.relu(wx_b)
with tf.variable_scope("layer2"):
weights2 = tf.get_variable(name = "weights", dtype=tf.float32, shape=[64, 64], initializer=weights_init, collections=collections_name)
bias2 = tf.get_variable(name = "bias", dtype=tf.float32, shape=[64], initializer=bias_init, collections=collections_name)
wx_b = tf.matmul(h1, weights2) + bias2
h2 = tf.nn.relu(wx_b)
with tf.variable_scope("layer3"):
weights3 = tf.get_variable(name = "weights", dtype=tf.float32, shape=[64, num_actions], initializer=weights_init, collections=collections_name)
bias3 = tf.get_variable(name = "bias", dtype=tf.float32, shape=[num_actions], initializer=bias_init, collections=collections_name)
q_out = tf.matmul(h2, weights3) + bias3
return q_out
def network(self):
"networks"
# q_network
self.inputs_q = tf.placeholder(dtype = tf.float32, shape = [None, self.state_dim], name = "inputs_q")
scope_var = "q_network"
clt_name_var = ["q_net_prmt", tf.GraphKeys.GLOBAL_VARIABLES] # ๅฎไนไบcollections
self.q_value = self.net_frame(scope_var, clt_name_var, self.action_dim, self.inputs_q)
# target_network
self.inputs_target = tf.placeholder(dtype = tf.float32, shape = [None, self.state_dim], name = "inputs_target")
scope_var = "target_network"
clt_name_var = ["target_net_prmt", tf.GraphKeys.GLOBAL_VARIABLES] # ๅฎไนไบcollections
self.q_target = self.net_frame(scope_var, clt_name_var, self.action_dim, self.inputs_target)
with tf.variable_scope("loss"):
self.target = tf.placeholder(dtype = tf.float32, shape = [None, self.action_dim], name="target")
self.loss = tf.reduce_mean(tf.square(self.q_value - self.target))
with tf.variable_scope("train"):
self.train_op = tf.train.RMSPropOptimizer(0.01).minimize(self.loss)
def learn(self, state, action, reward, state_next, done, step):
"train process"
if step % 500 == 0:
self.saver.save(self.sess, DATA_FILE+'/'+WEIGHT_DIR, global_step=self.step)
q, q_target = self.sess.run([self.q_value, self.q_target], feed_dict={self.inputs_q: state, self.inputs_target: state_next})
target = reward + self.gamma * np.max(q_target, axis=1)
self.reform_target = q.copy()
batch_index = np.arange(BATCH_SIZE, dtype = np.int32)
self.reform_target[batch_index, action] = target
loss, _ = self.sess.run([self.loss, self.train_op], feed_dict={self.inputs_q:state, self.target:self.reform_target})
def update_prmt(self):
"update target network parameters"
q_prmts = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "q_network")
target_prmts = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "target_network")
self.sess.run([tf.assign(t, q) for t,q in zip(target_prmts, q_prmts)]) #ๅฐQ็ฝ็ปๅๆฐ่ตๅผ็ปtarget
print("updating target-network parameters...")
def choose_action(self, current_state):
current_state = current_state[np.newaxis, :]
# array dim : (xx, ) --> (1, xx)
q = self.sess.run(self.q_value, feed_dict={self.inputs_q: current_state})
# e-greedy
if np.random.random() < self.epsilon:
action_chosen = np.random.randint(0, self.action_dim)
else:
action_chosen = np.argmax(q)
return action_chosen
def decay_epsilon(self):
pass
# if self.epsilon > 0.03:
# self.epsilon -= 0.02
Transition = collections.namedtuple("Transition", ["state", "action", "reward", "next_state", "done"])
class SparseAgent(base_agent.BaseAgent):
def __init__(self):
super(SparseAgent, self).__init__()
self.memory = []#memory for memory replay
self.global_step = 0
self.sess = tf.Session()
self.DQN = DeepQNetwork(self.sess)
self.previous_action = None
self.previous_state = None
# keep track of the command centre location
self.cc_y = None
self.cc_x = None
# track the sequence position within a multi-step action
self.move_number = 0
def __del__(self):
self.sess.close()
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def step(self, obs):
super(SparseAgent, self).step(obs)
if obs.last(): # done
reward = obs.reward
'''
#--------train
if len(self.memory) > MEMORY_SIZE:
self.memory.pop(0)
self.memory.append(Transition(self.previous_state, self.previous_action, reward, [-1], float(obs.last())))
if len(self.memory) > BATCH_SIZE * 4:
batch_trasition = random.sample(self.memory, BATCH_SIZE)
batch_state, batch_action, batch_reward, batch_next_state, batch_done = map(np.array, zip(*batch_trasition))
self.DQN.learn(batch_state, batch_action, batch_reward, batch_next_state, batch_done, self.global_step)
self.global_step += 1
print("trained",self.global_step)
if self.global_step % UPDATE_PERIOD == 0:
self.DQN.update_prmt()
if self.global_step % DECAY_EPS == 0:
self.DQN.decay_epsilon()
#--------train
'''
self.previous_action = None
self.previous_state = None
self.move_number = 0
return actions.FunctionCall(_NO_OP, [])
unit_type = obs.observation['screen'][_UNIT_TYPE]
if obs.first(): # first state = reset
player_y, player_x = (obs.observation['minimap'][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0 # ๆๅทฑๆนๅไฝไธๅจๅฐๅพไธๆน
self.cc_y, self.cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero() # ไบบๆๅบๅฐ
cc_y, cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
cc_count = 1 if cc_y.any() else 0
depot_y, depot_x = (unit_type == _TERRAN_SUPPLY_DEPOT).nonzero() # ไบบๆไพ็ป็ซ
supply_depot_count = int(round(len(depot_y) / 69)) # 69ๅบ่ฏฅๆฏๅปบ็ญๅฎฝๅบฆ
barracks_y, barracks_x = (unit_type == _TERRAN_BARRACKS).nonzero() # ไบบๆๅ
ต่ฅ
barracks_count = int(round(len(barracks_y) / 137)) # 137ไนๆฏๅปบ็ญๅฎฝๅบฆ
# move_number ่กจ็คบ่ฟ็ปญๅฎ็ฐ็ๅจไฝๅบๅ็index
if self.move_number == 0: # ่ฏฅๅ็ฌฌไธไธชๅจไฝไบ
self.move_number += 1
# current_state = np.zeros(8)
current_state = STATE
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = obs.observation['player'][_ARMY_SUPPLY]
# TODO(by taylor) - ็ถๆๆทปๅ ๏ผๅฝๅไบบๅฃๆฐ/ไบบๅฃๅฎน้๏ผ็ฟ็ฉ็ๆฐ้
current_state[4] = obs.observation['score_cumulative'][_COLLECTED_MINERALS] # ๅฝๅๆถ้ๅฐ็ๆฐดๆถๆฐ้
# print("minerals:",current_state[4])
current_state[5] = obs.observation['player'][_FOOD_USED] # ๅฝๅไบบๅฃ
current_state[6] = obs.observation['player'][_FOOD_CAP] # ๅฝๅไบบๅฃไธ้
if self.previous_action is not None:
#--------train
if len(self.memory) > MEMORY_SIZE:
self.memory.pop(0)
self.memory.append(Transition(self.previous_state, self.previous_action, 0, current_state, float(obs.last())))
if len(self.memory) > BATCH_SIZE * 4:
batch_trasition = random.sample(self.memory, BATCH_SIZE)
batch_state, batch_action, batch_reward, batch_next_state, batch_done = map(np.array, zip(*batch_trasition))
self.DQN.learn(batch_state, batch_action, batch_reward, batch_next_state, batch_done, self.global_step)
self.global_step += 1
if self.global_step and self.global_step % UPDATE_PERIOD == 0:
self.DQN.update_prmt()
if self.global_step and self.global_step % DECAY_EPS == 0:
self.DQN.decay_epsilon()
#--------train
rl_action = self.DQN.choose_action(current_state) # ้ๆฉๅจไฝ
self.previous_state = current_state
self.previous_action = rl_action
# smart_action, x, y = self.splitAction(self.previous_action)
smart_action = rl_action
# print("action:", smart_action)
if smart_action == ACTION_BUILD_BARRACKS: # ้ ๅ
ต่ฅ๏ผ ็ฌฌไธๆญฅๅ
้ไธญSCV
unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
target = [unit_x[i], unit_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target]) # ้ไธญSCV
elif smart_action == ACTION_BUILD_SUPPLY_DEPOT and obs.observation['player'][_FOOD_CAP] - obs.observation['player'][_FOOD_USED] < POPULATION_THRESHOLD_ENABLE_BUILD_SUPPLY_DEPOT:
# ้ ่กฅ็ป็ซ๏ผ็ฌฌไธๆญฅๅ
้ไธญSCV
unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()
if unit_y.any(): # ๆSCV
i = random.randint(0, len(unit_y) - 1) # ้ๆบ้ๆฉSCV
target = [unit_x[i], unit_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE: # ้ marine๏ผ็ฌฌไธๆญฅๅ
้ไธญๅ
ต่ฅ
if barracks_y.any():
i = random.randint(0, len(barracks_y) - 1) # ๅ
ต่ฅไฝ็ฝฎ๏ผๅไธญๅฟ
target = [barracks_x[i], barracks_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_SELECT_ALL, target])
elif smart_action == ACTION_BUILD_SCV and obs.observation['player'][_SCV_SUPPLY] < MAX_SCV_NUM: # ้ SCV๏ผๆๅคงSCVๆฐไธ่ถ
่ฟ17ไธช
target = [round(self.cc_x.mean()), round(self.cc_y.mean())] # ๆๆฅไธญๅฟ็ไฝ็ฝฎ
return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])
elif self.move_number == 1: # ่ฏฅๅ็ฌฌไบไธชๅจไฝ๏ผๅฐฑไธ็จ้ๆฐ้ๆฉๅจไฝไบ
self.move_number += 1
# smart_action, x, y = self.splitAction(self.previous_action)
smart_action = self.previous_action
if smart_action == ACTION_BUILD_SUPPLY_DEPOT and obs.observation['player'][_FOOD_CAP] - obs.observation['player'][_FOOD_USED] < POPULATION_THRESHOLD_ENABLE_BUILD_SUPPLY_DEPOT:
# ้ ่กฅ็ป็ซ๏ผ็ฌฌไบๆญฅๅผๅง้ ๅ
ต่ฅ
if _BUILD_SUPPLY_DEPOT in obs.observation['available_actions']:
blank_y, blank_x = (obs.observation['minimap'][_PLAYER_RELATIVE] == _PLAYER_BACKGROUND).nonzero() # ้ๆบ้ๆฉ้ๅ็น
if blank_y.any():
i = random.randint(0, len(blank_y) - 1)
m_x = blank_x[i]
m_y = blank_y[i]
target = [int(m_x), int(m_y)]
return actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_BARRACKS: # ้ ๅ
ต่ฅ๏ผ็ฌฌไบๆญฅๅผๅง้ ๅ
ต่ฅ
if _BUILD_BARRACKS in obs.observation['available_actions']:
blank_y, blank_x = (obs.observation['minimap'][_PLAYER_RELATIVE] == _PLAYER_BACKGROUND).nonzero()
if blank_y.any():
i = random.randint(0, len(blank_y) - 1)
m_x = blank_x[i]
m_y = blank_y[i]
target = [int(m_x), int(m_y)]
return actions.FunctionCall(_BUILD_BARRACKS, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE: # ้ marine๏ผ็ฌฌไบๆญฅๅผๅง้ marine
if _TRAIN_MARINE in obs.observation['available_actions']:
return actions.FunctionCall(_TRAIN_MARINE, [_QUEUED])
elif smart_action == ACTION_BUILD_SCV and obs.observation['player'][_SCV_SUPPLY] < MAX_SCV_NUM: # ้ SCV๏ผ็ฌฌไบๆญฅๅผๅง้ SCV
if _TRAIN_SCV in obs.observation['available_actions']:
return actions.FunctionCall(_TRAIN_SCV, [_QUEUED])
elif self.move_number == 2: # ่ฏฅๅ็ฌฌไธไธชๅจไฝ๏ผไธ็จ้ๅค้ๅจไฝ
self.move_number = 0 # ๅ
ๆธ
้ถ
# smart_action, x, y = self.splitAction(self.previous_action)
smart_action = self.previous_action
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT or (smart_action == ACTION_BUILD_SCV and obs.observation['player'][_SCV_SUPPLY] < MAX_SCV_NUM): # ้ ๅ
ต่ฅๅ้ ่กฅ็ป็ซ๏ผ็ฌฌไธๆญฅๅฝไปคSCV้็ฟ
if _HARVEST_GATHER in obs.observation['available_actions']:
unit_y, unit_x = (unit_type == _NEUTRAL_MINERAL_FIELD).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
m_x = unit_x[i]
m_y = unit_y[i]
target = [int(m_x), int(m_y)]
return actions.FunctionCall(_HARVEST_GATHER, [_QUEUED, target])
return actions.FunctionCall(_NO_OP, []) # ๅ
ถไป้ฝไธๅ
| UTF-8 | Python | false | false | 18,578 | py | 2 | build_marines_agent_v3-dqn.py | 1 | 0.569862 | 0.559067 | 0 | 410 | 42.604878 | 216 |
defbobo/pytest-testmon | 14,027,363,202,087 | f8b717581f5ec62fdd4379c7bb45704284bce7b8 | 377489abd7886ca652f768ca77f508efe26623f2 | /test/test_process_code.py | c564e81fc1bed5281c5ec083ccc01b2dbbc2e36e | [
"MIT"
] | permissive | https://github.com/defbobo/pytest-testmon | cbc92ce9b2c2b834e15d820a6a4a0ca2239dd70d | 6964c04ed4c8979cce006240c0de49d22bde5340 | refs/heads/master | "2021-05-13T20:50:14.424205" | "2018-01-06T09:18:07" | "2018-01-06T09:18:07" | 116,920,371 | 1 | 0 | null | true | "2018-01-10T06:57:36" | "2018-01-10T06:57:35" | "2018-01-10T06:57:25" | "2018-01-06T09:23:09" | 247 | 0 | 0 | 0 | null | false | null | # -- coding:utf8 --
from test.coveragepy.coveragetest import CoverageTest
import pytest
from testmon.process_code import Block, Module, checksum_coverage, read_file_with_checksum, process_encoding
try:
from StringIO import StringIO as MemFile
except ImportError:
from io import BytesIO as MemFile
def parse(source_code, file_name='a.py'):
return Module(source_code=source_code, file_name=file_name).blocks
def test_detect_encoding1():
lines = []
output = MemFile(b'#first comment\n# -- coding: abcd --')
assert process_encoding(lines, output) == None
assert lines == [b'#first comment\n']
assert process_encoding(lines, output) == 'abcd'
assert lines == [b'#first comment\n']
def test_detect_encoding2():
lines = []
output = MemFile(b'1\n2\n')
assert process_encoding(lines, output) == None
assert lines == [b'1\n']
assert process_encoding(lines, output) == None
assert lines == [b'1\n', b'2\n']
def test_detect_encoding2():
with open('test/samples/print1250r.py', 'rb') as f:
lines = []
process_encoding(lines, f) == 'cp1250'
assert lines == []
def test_read_file_with_checksum():
assert u'ลก' in read_file_with_checksum('test/samples/print1250r.py')[0]
def test_read_empty_file_with_checksum():
assert read_file_with_checksum('test/samples/empty.py')[0] == ''
def test_read_2lines_file_with_checksum():
assert read_file_with_checksum('test/samples/2lines.py')[0] == '#2ndline'
def test_module_with_1250():
code_repr = Module(None, 'test/samples/print1250r.py').blocks[0].code
assert "Str('\\xc5\\xa1')" in code_repr or "Str('ลก')" in Module(None, 'test/samples/print1250r.py').blocks[0].code
class TestSourceIntoBlocks(object):
def test_empty(self):
assert parse(source_code="") == []
def test_syntax_error(self):
parse(source_code="(")
def test_simple(self):
blocks = parse("""print('high')\nprint('low')""")
assert len(blocks) == 1
assert blocks[0].start == 1
assert blocks[0].end == 2
def test_2_blocks(self):
blocks = parse(
"""
print('left')
def a():
print('right') """
)
assert len(blocks) == 2
assert blocks[0].start == 4
assert blocks[0].end == 4
assert blocks[1].start == 2
assert blocks[1].end == 4
def test_change_one(self):
orig = parse("""
print('left')
def a():
print('right') """)
changed = parse("""
print('left')
def a():
print('left') """)
assert (orig[0].start,
orig[0].end,
orig[0].checksum) != (changed[0].start,
changed[0].end,
changed[0].checksum)
assert (orig[1].start,
orig[1].end,
orig[1].checksum) == (changed[1].start,
changed[1].end,
changed[1].checksum)
def test_same_even_names_but_different_blocks(self):
blocks = parse("""
print('left')
def a():
print(1)
def a():
print(1) """)
assert len(set([block.checksum for block in blocks])) == len(blocks)
def test_same_but_different_blocks(self):
blocks = parse("""
print('left')
def a():
print(1)
def b():
print(1) """)
assert len(set([block.checksum for block in blocks])) == len(blocks)
GLOBAL_BLOCK = Block(1, 8, 1000)
class TestchecksumCoverage(object):
def test_miss_before(self):
assert checksum_coverage([Block(2, 3, 101), GLOBAL_BLOCK, ], [1]) == [1000, ]
def test_hit_first(self):
assert checksum_coverage([Block(2, 3, 102), GLOBAL_BLOCK], [2]) == [1000, 102]
def test_hit_first2(self):
assert checksum_coverage([Block(2, 3, 102), Block(6, 7, 103), GLOBAL_BLOCK], [2]) == [1000, 102]
def test_hit_first3(self):
assert checksum_coverage([Block(2, 3, 102), Block(6, 7, 103), GLOBAL_BLOCK], [6]) == [1000, 103]
def test_miss_after(self):
assert checksum_coverage([GLOBAL_BLOCK, Block(1, 2, 103)], [3]) == [1000, ]
def test_hit_second(self):
assert checksum_coverage([GLOBAL_BLOCK, Block(2, 3, 101), Block(5, 6, 102)], [5]) == [1000, 102]
def test_hit_second_twice(self):
assert checksum_coverage([GLOBAL_BLOCK, Block(2, 3, 101), Block(4, 7, 102)], [5, 6]) == [1000, 102]
@pytest.mark.parametrize("lines", [[3, 5], [5, 3]])
def test_hit_both(self, lines):
assert checksum_coverage([GLOBAL_BLOCK, Block(2, 3, 101), Block(5, 6, 102)], lines) == [1000, 101, 102]
@pytest.mark.parametrize("lines", [[4, 7], [7, 4]])
def test_miss_both(self, lines):
assert checksum_coverage([GLOBAL_BLOCK, Block(2, 3, 101), Block(5, 6, 102)], lines) == [1000, ]
class CodeSample():
def __init__(self, source_code, expected_coverage=None, possible_lines=None):
self.source_code = source_code
self.expected_coverage = expected_coverage or {}
self.possible_lines = possible_lines or []
code_samples = {
1: CodeSample("""\
def add(a, b):
return a + b
assert add(1, 2) == 3
""",
[1, 2, 4]),
2: CodeSample("""\
def add(a, b):
return a + b
def subtract(a, b):
return a - b
assert add(1, 2) == 3
""",
[1, 2, 4, 7]),
'3': CodeSample("""\
class A(object):
def add(self, a, b):
return a + b
""",
[1, 2]),
'3b': CodeSample("""\
class A(object):
def add(self, a, b):
return a - b
""",
[1, 2]),
'classes': CodeSample("""\
class A(object):
def add(self, a, b):
return a + b
def subtract(self, a, b):
return a - b
""",
[1, 2, 4]),
'classes_b': CodeSample("""\
class A(object):
def add(self, a, b):
return a + b
def subtract(self, a, b):
return a - b - 1
""",
[1, 2, 4]),
'classes_c': CodeSample("""\
class A(object):
def add1(self, a, b):
return a + b
def subtract(self, a, b):
return a - b
""",
[1, 2, 4]),
}
class TestModule(object):
def test_base_diff(self):
blocks1 = parse("""\
a = 1
def identity(ob):
return ob
@identity
def method(st):
return 1
class Klass(object):
pass
for i in range(1):
pass """)
blocks2 = parse("""\
a = 1
def identity(ob):
return ob
@identity
def method(st):
return 5
class Klass(object):
pass
for i in range(1):
pass """)
assert blocks1[0] == blocks2[0]
assert blocks1[2] == blocks2[2]
assert blocks1[1] != blocks2[1]
def test_covdata_intersects_deps(self):
def checksum(code_sample):
module = Module(code_sample.source_code, 'a.py')
covdata = code_sample.expected_coverage
return checksum_coverage(module.blocks, covdata)
assert checksum(code_samples[1])[1] == checksum(code_samples[2])[1]
def test_3(self):
module1 = Module(code_samples['3'].source_code)
module2 = Module(code_samples['3b'].source_code)
assert len(module1.blocks) == len(module2.blocks) == 2
assert module1.blocks[0] != module2.blocks[0]
assert module1.blocks[1] == module2.blocks[1]
def test_classes(self):
module1 = Module(code_samples['classes'].source_code)
module2 = Module(code_samples['classes_b'].source_code)
assert len(module1.blocks) == len(module2.blocks) == 3
assert module1.blocks[0] == module2.blocks[0]
assert module1.blocks[1] != module2.blocks[1]
assert module1.blocks[2] == module2.blocks[2]
def test_classes_header(self):
module1 = Module(code_samples['classes'].source_code)
module2 = Module(code_samples['classes_c'].source_code)
assert len(module1.blocks) == len(module2.blocks) == 3
b1 = module1.blocks[0]
b2 = module2.blocks[0]
assert (b1.start,
b1.end,
b1.checksum) == (b2.start,
b2.end,
b2.checksum)
assert (b1.name) != (b2.name)
assert module1.blocks[1] == module2.blocks[1]
assert module1.blocks[2] != module2.blocks[2]
class TestCoverageAssumptions(CoverageTest):
def test_easy(self):
for name, mod_cov in code_samples.items():
if mod_cov.expected_coverage:
self.check_coverage(mod_cov.source_code,
cov_data=mod_cov.expected_coverage,
msg="This is for code_sample['{}']".format(name))
| UTF-8 | Python | false | false | 9,721 | py | 12 | test_process_code.py | 8 | 0.503138 | 0.467538 | 0.000206 | 328 | 28.631098 | 118 |
mcdavid109/dpf-nets | 2,456,721,295,769 | 74a46f5ca7100dea38324894043a87a22f90b875 | 74dcaf70e91d8153912bbfed465732d773c7d318 | /lib/datasets/cloud_transformations.py | ed2767b2cd4b7d5fd2eedf6b4b9aa482f26826aa | [] | no_license | https://github.com/mcdavid109/dpf-nets | e5905791ae20d9ef2824580bdb8e3512e4b1e31a | 386b7fbd5e4b1c57aeafafae28914473402d44a9 | refs/heads/master | "2023-01-07T00:20:06.952183" | "2020-10-30T02:33:33" | "2020-10-30T02:33:33" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from torchvision.transforms import Compose
class Scale2OrigCloud(object):
def __init__(self, **kwargs):
self.do_rescale = kwargs['cloud_rescale2orig']
self.do_recenter = kwargs['cloud_recenter2orig']
def __call__(self, sample):
if self.do_rescale:
sample['cloud'] = sample['orig_s'] * sample['cloud']
if 'eval_cloud' in sample:
sample['eval_cloud'] = sample['orig_s'] * sample['eval_cloud']
if self.do_recenter:
sample['cloud'] = sample['cloud'] + sample['orig_c'].reshape(-1, 1)
if 'eval_cloud' in sample:
sample['eval_cloud'] = sample['eval_cloud'] + sample['orig_c'].reshape(-1, 1)
return sample
class TranslateCloud(object):
def __init__(self, **kwargs):
self.shift = np.array(kwargs['cloud_translate_shift'], dtype=np.float32).reshape(-1, 1)
def __call__(self, sample):
sample['cloud'] -= self.shift
if 'eval_cloud' in sample:
sample['eval_cloud'] -= self.shift
return sample
class ScaleCloud(object):
def __init__(self, **kwargs):
self.scale = np.float32(kwargs.get('cloud_scale_scale'))
def __call__(self, sample):
sample['cloud'] /= self.scale
if 'eval_cloud' in sample:
sample['eval_cloud'] /= self.scale
return sample
class AddNoise2Cloud(object):
def __init__(self, **kwargs):
self.scale = np.float32(kwargs.get('cloud_noise_scale'))
def __call__(self, sample):
sample['cloud'] += np.random.normal(scale=self.scale, size=sample['cloud'].shape).astype(np.float32)
if 'eval_cloud' in sample:
sample['eval_cloud'] += np.random.normal(scale=self.scale, size=sample['eval_cloud'].shape).astype(np.float32)
return sample
class CenterCloud(object):
def __init__(self):
pass
def __call__(self, sample):
sample['cloud'] -= sample['cloud'].mean(axis=1, keepdims=True)
if 'eval_cloud' in sample:
sample['eval_cloud'] -= sample['eval_cloud'].mean(axis=1, keepdims=True)
return sample
def ComposeCloudTransformation(**kwargs):
cloud_transformation = []
if kwargs.get('cloud_rescale2orig') or kwargs.get('cloud_recenter2orig'):
cloud_transformation.append(Scale2OrigCloud(**kwargs))
if kwargs.get('cloud_translate'):
cloud_transformation.append(TranslateCloud(**kwargs))
if kwargs.get('cloud_scale'):
cloud_transformation.append(ScaleCloud(**kwargs))
if kwargs.get('cloud_noise'):
cloud_transformation.append(AddNoise2Cloud(**kwargs))
if kwargs.get('cloud_center'):
cloud_transformation.append(CenterCloud())
if len(cloud_transformation) == 0:
return None
else:
return Compose(cloud_transformation)
| UTF-8 | Python | false | false | 2,858 | py | 27 | cloud_transformations.py | 25 | 0.614066 | 0.604619 | 0 | 83 | 33.433735 | 122 |
janetzki/fact_extraction | 3,813,930,991,005 | 1d96d5699cf17f404c9f3b1374c65a1efdc95eee | d93b22136858c58b5f6f55e7dd9bee1bef17cc8a | /pattern_learning/pattern_cleaner.py | d589c11a2f25a4b8b07b85da60ddf2815750aa27 | [] | no_license | https://github.com/janetzki/fact_extraction | 23756b81c1ba28f1d28d84b20899ab00c81d6104 | 187023f93937985e10f593b032ea7f48c1d61060 | refs/heads/master | "2023-07-16T19:41:50.462284" | "2021-08-28T14:19:34" | "2021-08-28T14:19:34" | 73,001,582 | 5 | 0 | null | false | "2021-08-28T14:19:34" | "2016-11-06T15:57:58" | "2020-07-16T02:58:16" | "2021-08-28T14:19:34" | 119,514 | 6 | 0 | 16 | Python | false | false | from tqdm import tqdm
from storing_tools import PatternTool
from pattern_extraction import Pattern
import os
dir_path = os.path.dirname(os.path.abspath(__file__)) + '/'
class PatternCleaner(PatternTool):
def __init__(self, least_threshold_types, least_threshold_words,
patterns_input_path=dir_path + '../data/patterns_raw.pkl',
patterns_output_path=dir_path + '../data/patterns_cleaned.pkl'):
super(PatternCleaner, self).__init__(patterns_input_path, patterns_output_path)
self.least_threshold_types = least_threshold_types
self.least_threshold_words = least_threshold_words
@classmethod
def from_config_file(cls):
config_parser = cls.get_config_parser()
section = 'pattern_cleaner'
least_threshold_types = config_parser.getfloat(section, 'least_threshold_types')
least_threshold_words = config_parser.getfloat(section, 'least_threshold_words')
return cls(least_threshold_types, least_threshold_words)
def clean_patterns(self):
self.logger.print_info('Pattern cleaning...')
for relation, pattern in tqdm(self.relation_type_patterns.iteritems()):
self.relation_type_patterns[relation] = Pattern.clean_pattern(pattern,
self.least_threshold_words,
self.least_threshold_types)
self.relation_type_patterns = dict(
filter(lambda (rel, pat): pat is not None, self.relation_type_patterns.iteritems()))
self.logger.print_done('Pattern cleaning completed.')
if __name__ == '__main__':
pattern_cleaner = PatternCleaner.from_config_file()
pattern_cleaner.clean_patterns()
pattern_cleaner.save_patterns()
| UTF-8 | Python | false | false | 1,829 | py | 63 | pattern_cleaner.py | 57 | 0.632586 | 0.632586 | 0 | 39 | 45.897436 | 101 |
Chamalie-UOM/BackEnd | 566,935,707,293 | dde84e81a1278696f390ce69327da983ed4a0467 | 923759b372d7367ae07e5cca91d3df1709b8c276 | /phyloGenie/ml.py | 72e419763853ea3851cdf8d09928cf8bda9fa3b3 | [] | no_license | https://github.com/Chamalie-UOM/BackEnd | fbb74a0344da943df1741553a8aa5bdffc5c609e | 9f67cf00cbcea13778700b4512efd89fa434e383 | refs/heads/master | "2020-07-07T17:46:51.195757" | "2019-11-20T13:16:22" | "2019-11-20T13:16:22" | 203,426,932 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import sys
from Bio import Alphabet
from Bio.Phylo.Applications import PhymlCommandline
from io import StringIO
from Bio import AlignIO, SeqIO
from Bio import SeqIO
from Bio.Alphabet import IUPAC
class MlTreeConstructor:
'''def formatConversion(self, data_type, file):
temp = "{}.phylip".format(os.path.splitext(file)[0])
# file_Handler = open(temp, "wb")
# file_Handler.write(byte_file.read())
# file_Handler.close()
if data_type == 'DNA':
AlignIO.convert(file, "fasta", temp, "phylip", alphabet=Alphabet.generic_nucleotide)
else:
AlignIO.convert(file, "fasta", temp, "phylip", alphabet=Alphabet.generic_protein)
return temp '''
def converter(self, file, data_type):
base = os.path.splitext(file)[0]
if data_type == 'DNA':
return SeqIO.convert(file, "fasta",
base + ".phylip", "phylip",
alphabet=IUPAC.ambiguous_dna)
else:
return SeqIO.convert(file, "fasta",
base + ".phylip", "phylip",
alphabet=Alphabet.generic_protein)
def ml(self, data_type, input_file):
self.converter(input_file, data_type)
base = os.path.splitext(input_file)[0]
data_file = base + '.phylip'
if data_type == 'DNA':
phyml_cline = PhymlCommandline(input=data_file)
else:
phyml_cline = PhymlCommandline(input=data_file, datatype='aa')
stdout, stderr = phyml_cline()
os.rename(base + '.phylip_phyml_tree', base + '_ml.nw') # tree file is generated
stat_file = base + '.phylip_phyml_stats'
os.remove(data_file)
os.remove(stat_file)
| UTF-8 | Python | false | false | 1,798 | py | 24 | ml.py | 23 | 0.577308 | 0.57564 | 0 | 51 | 34.235294 | 96 |
poyuH/DiseaseRiskCalculator | 17,746,804,869,700 | 3f70c867fb0a8193e82bf8077c0919112d7aa28b | 9d9b1e86717cdf816360308dfff31a29c8e252d2 | /user/globalvalues.py | ad5c74abdade751c876b6e0cb70a3c3195482eb5 | [] | no_license | https://github.com/poyuH/DiseaseRiskCalculator | 2d9c7dd5adab05544e5da94d6dd942a86964aca4 | 6caf7b50debb7fc6a97715fb7e17bdbd29a49bee | refs/heads/master | "2020-06-03T14:03:02.901280" | "2019-09-11T02:47:47" | "2019-09-11T02:47:47" | 191,596,497 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
def get_birthyear():
cur_year = datetime.date.today().year
return [(year, year) for year in range(cur_year-100, cur_year + 1)]
def get_gender():
GENDER = (
( 0,'female' ),
( 1,'male' )
)
return GENDER
def get_race():
RACE = (
( 0, 'Other' ),
( 1, 'African American' )
)
return RACE
| UTF-8 | Python | false | false | 368 | py | 17 | globalvalues.py | 10 | 0.519022 | 0.497283 | 0 | 22 | 15.727273 | 71 |
WSJI0/BOJ | 68,719,514,737 | 700eb53603dd7bc82c78451548ecf250214e4a36 | 6e601105760f09d3c9f5306e18e4cf085f0bb4a2 | /10000-99999/10990.py | 62e442db10353f81e5828159e5f37c70b7d88e07 | [] | no_license | https://github.com/WSJI0/BOJ | 6412f69fddd46c4bcc96377e2b6e013f3bb1b524 | 160d8c13f72d7da835d938686f433e7b245be682 | refs/heads/master | "2023-07-06T15:35:50.815021" | "2023-07-04T01:39:48" | "2023-07-04T01:39:48" | 199,650,520 | 2 | 0 | null | false | "2020-04-20T09:03:03" | "2019-07-30T12:48:37" | "2020-04-19T11:34:50" | "2020-04-20T09:03:03" | 261 | 1 | 0 | 0 | Python | false | false | '''
10990๋ฒ
๋ณ ์ฐ๊ธฐ - 15
'''
n=int(input())
print(" "*(n-1)+"*")
if n>1:
for i in range(2,n+1):
print(" "*(n-i)+"*"+" "*((i-1)*2-1)+"*") | UTF-8 | Python | false | false | 155 | py | 739 | 10990.py | 736 | 0.380952 | 0.285714 | 0 | 12 | 11.333333 | 48 |
Gr3yG00se64/overwatch | 5,420,248,760,218 | 682efabebab7374e91d53d7a58b7c8d12311a35f | 839cd5616d63825192dffb3730dacb67619bda2f | /modules/maintenance/db_handler.py | a9fee5494a01f2ffb7d6522b31dab2bc848df453 | [] | no_license | https://github.com/Gr3yG00se64/overwatch | 1e0777005a28cc3e8e3b32d130935d6ee4163110 | 090f3d8aafe758ae29b4c5add346e1bc51cc923a | refs/heads/master | "2020-09-12T09:16:55.854617" | "2020-04-20T06:01:35" | "2020-04-20T06:01:35" | 222,378,902 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Local Dependencies
import config
#Package Dependencies
from pymongo import MongoClient
def remove_alerts(alerts):
#Establish DB Connection
connection = MongoClient(config.mongoURI)
#Set Up DB for Alerts
alertDB = connection["alerts"]
alertCollection = alertDB["alerts"]
if alerts:
for alert in alerts:
alertCollection.remove({'_id': alert.get('_id')})
def retrieve_alerts():
alerts = []
# Establish DB Connection
connection = MongoClient(config.mongoURI)
# Retrieve names of all databases
dbNames = connection.list_database_names()
if 'alerts' in dbNames:
# Set Up DB for Alerts
alertDB = connection["alerts"]
alertCollection = alertDB["alerts"]
#Retrieve all alerts
db_alerts = alertCollection.find()
#Generate list of alerts
for alert in db_alerts:
alerts.append(alert)
return alerts
#Returns list of dictionaries that contained registered device information
def retrieve_regDevices():
regDevices = []
#Establish DB Connection
connection = MongoClient(config.mongoURI)
# Retrieve names of all databases
dbNames = connection.list_database_names()
if 'netmap' in dbNames:
#Set up DB for NetMap Devices
netmapDB = connection["netmap"]
netmapCollection = netmapDB["netmaps"]
#Retrieve all registered devices
devices = netmapCollection.find()
#Generate list of registered devices
for device in devices:
regDevices.append(device)
#Export the list of dictonaries
return regDevices
| UTF-8 | Python | false | false | 1,639 | py | 23 | db_handler.py | 20 | 0.663819 | 0.663819 | 0 | 68 | 23.088235 | 74 |
jesdin/OST | 10,316,511,487,727 | d61ee8beaf8be02ac6f48e51a503c17d831ee458 | cefab47e1b4d134b02a7e28b8a28e2d289534044 | /Exp 4/exp4a.py | 83afd9b41b70e4dd8a31d650706ed11b4b688c9d | [] | no_license | https://github.com/jesdin/OST | d251de9a471b0891410a2d504b2dce94a9e1168b | 702cf008ec0d7b6cf5ee9852d7f0a66acecd8e14 | refs/heads/master | "2020-04-26T09:25:22.813993" | "2019-03-26T16:53:49" | "2019-03-26T16:53:49" | 173,454,435 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import copy
str1 = input("String 1: ")
str2 = input("String 2: ")
set1 = set(list(str1))
set1.discard(' ')
set2 = set(list(str2))
set2.discard(' ')
print("Set1: {}".format(set1))
print("Set2: {}".format(set2))
print("Common letters: {}".format(set1 & set2))
print("letters in str1 not in str2: {}".format(set1 - set2))
print("set of letters in both strings: {}".format(set1 | set2))
print("letters in str1 and str2 but not common in both: {}".format(set1 ^ set2))
copyshallow = copy.copy(set1)
copyasn = set1
print("Set1: {}".format(set1))
print("SetCopyShallow: {}".format(copyshallow))
print("SetAssgn: {}".format(copyasn))
print("adding Python to set1")
set1.add("Python")
print("Set1: {}".format(set1))
print("SetCopyShallow: {}".format(copyshallow))
print("SetAssgn: {}".format(copyasn))
print("")
print("ID of Set1: {}".format(id(set1)))
print("ID of SetCopyShallow: {}".format(id(copyshallow)))
print("ID of SetAssgn: {}".format(id(copyasn)))
| UTF-8 | Python | false | false | 954 | py | 16 | exp4a.py | 16 | 0.672956 | 0.63522 | 0 | 30 | 30.766667 | 80 |
vecelo/Chuck2 | 19,636,590,478,457 | f9b09a29f6fd2d1fec297b8ef6b8b7056475e4c6 | 089e2bf84731446ce7d3b2b209fc58fc882d97b0 | /test1.py | b99faad13f236bec84e4a15749906c7c55e9149c | [] | no_license | https://github.com/vecelo/Chuck2 | d3c4163b30c181947a024c099d97a9d3514c81dc | 86265a727b4747f9607ee0e8496ee66704275b98 | refs/heads/master | "2021-01-10T02:10:49.057812" | "2016-01-10T07:32:11" | "2016-01-10T07:32:11" | 49,357,834 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from f1 import kiem
class tc1(unittest.TestCase):
def setUp(self):
pass
def test_f1(self):
self.assertEqual(kiem(['<key>Track ID</key>'],['<integer>369</integer>']), 369)
if __name__ == '__main__':
unittest.main() | UTF-8 | Python | false | false | 260 | py | 3 | test1.py | 2 | 0.603846 | 0.569231 | 0 | 12 | 20.75 | 87 |
iciclenine/jim | 8,203,387,580,140 | 632322b6fc8772ffa7cb44a34c15537919b1c0f7 | 3b3e636c711219b0173b4297f7ab672689463c0a | /old_jim/config.py | be2146a11c8acc1a962fd54e28b3eb609a94d2e9 | [] | no_license | https://github.com/iciclenine/jim | 3822f54770efcdd7c1f31a87024b6e5257ec7fc5 | 1e89b8ab94f4775366964cee43e919d1346e21e9 | refs/heads/master | "2016-05-23T02:36:49.776523" | "2013-12-13T23:05:27" | "2013-12-13T23:05:27" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/opt/local/bin/python
import getpass
import os
# from jim import JIM_DIR, JIM_DB, JIM_WORKING, JIM_CONFIG, COL_NUM
import re
import sys
import yaml
ARGV = sys.argv
NUM_ARGV = len(ARGV)
HOME = os.path.expanduser("~")
JIM_DIR = os.path.join(HOME, ".jim")
PROJECT_DB = os.path.join(JIM_DIR, "projects.db")
JIM_DB = os.path.join(JIM_DIR, "jim.db")
JIM_WORKING = JIM_DB + ".wk"
JIM_CONFIG = os.path.join(JIM_DIR, "config.yml")
COL_NUM = 11
DEFAULT_USER = getpass.getuser()
def createConfig():
f = open(JIM_CONFIG, 'w+')
file_text = '''default_project:
projects: {}
anchors: {}
Users:
josh:
Demerits: 0
Citations: 0
Violations: 0
Verbal Warnings: 0
Written Warnings: 0
Dessaggelations: 0
kyson:
Demerits: 0
Citations: 0
Violations: 0
Verbal Warnings: 0
Written Warnings: 0
Dessaggelations: 0
jon:
Demerits: 0
Citations: 0
Violations: 0
Verbal Warnings: 0
Written Warnings: 0
Dessaggelations: 0
Remote: adkinsjo@flop.engr.oregonstate.edu:~/jim/.jim'''
file_text = re.sub(r'DEFAULT_USER', DEFAULT_USER, file_text)
f.write(file_text)
f.close()
def readYML():
#jadkins - just assume that the file exists for now
f = open(JIM_CONFIG, 'r')
y = yaml.load(f)
f.close()
return y
def writeYML(y):
f = open(JIM_CONFIG, 'w')
yaml.dump(y, f)
f.close()
# this is super ghetto
def setRemote(hostname):
f = open(JIM_CONFIG, 'r')
y = yaml.load(f)
f.close()
y['Remote'] = hostname
f = open(JIM_CONFIG, 'w')
yaml.dump(y, f)
f.close()
initConfig()
def createUser(name):
pass
def deleteUser(name):
pass
def printUserInfo(users):
f = open(JIM_CONFIG, 'r')
y = yaml.load(f)
f.close()
# determine the longest user name
max_name = 0
for u in users:
if len(str(u)) > max_name: max_name = len(str(u)) + 1
top = [" "*max_name, "Demerits", "Violations", "Citations", "Verbal Warnings", "Written Warnings", "Dessaggelations"]
# set array of character values to center to
maxes = [0]*len(top)
for i in range(len(top)): maxes[i] = len(top[i])
maxes[0] = max_name
table = [[]]*len(users)
for i in range(len(users)):
row = [0]*len(top)
row[0] = users[i]
for w in range(1, len(top) - 1):
row[w] = y['Users'][users[i]][top[w]]
table[i] = row
s1 = ""
for t in top: s1 += t + "\t"
s2 = ""
for t in table:
for i in range(len(t)):
if i == 0: s2 += str(t[i]).ljust(maxes[i]) + "\t"
else: s2 += str(t[i]).center(maxes[i]) + "\t"
s2 += "\n"
print s1
print s2
def addWhack(user):
f = open(JIM_CONFIG, 'r')
y = yaml.load(f)
f.close()
cur = y['Users'][user]
cur['Demerits'] += 1
if cur['Demerits'] == 3:
cur['Demerits'] = 0
cur['Violations'] += 1
if cur['Violations'] == 5:
cur['Violations'] = 0
cur['Citations'] += 1
if cur['Citations'] == 4:
cur['Citations'] = 0
cur['Verbal Warnings'] += 1
if cur['Verbal Warnings'] == 2:
cur['Verbal Warnings'] = 0
cur['Written Warnings'] += 1
if cur['Written Warnings'] == 2:
cur['Written Warnings'] = 0
cur['Dessaggelations'] += 1
writeYML(y)
| UTF-8 | Python | false | false | 3,043 | py | 21 | config.py | 14 | 0.616826 | 0.599408 | 0 | 149 | 19.416107 | 118 |
gm332211/workspace_p3x | 14,697,378,121,403 | 7c5fd5b83bd69dfbe39ce25ed45fec05266c4ff3 | 88efda89a4aa923079e89ea53c4eaa13522d1bfe | /ๆจกๅ/็ฑป็ไฝฟ็จ/classๆจกๅ.py | ff94924873807942b7d111f6af87bbc3151912eb | [] | no_license | https://github.com/gm332211/workspace_p3x | fab5132107863ae26ed079774d2c80883e187266 | 33c106cd069962df8e2ab99b552bfd9288530f1a | refs/heads/master | "2021-04-12T12:05:16.716708" | "2019-01-29T09:02:28" | "2019-01-29T09:02:28" | 126,682,989 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Role():
#ๆ้ ๅฝๆฐ
n=123 #็ฑปๅ้๏ผๆฒกๆๅฎไพๅไนๅฏไปฅ่ฐ็จ็ฑปๅ้(ๅฅฝๅคๅ
ฑ็จๅฑๆง๏ผ่็ๅ
ๅญ๏ผ
def __init__(self,name,role,have_gun,live_value=100,money=1000):#็ฑป็ๅๅงๅๆนๆณ
self.name=name #ๅฎไพๅ้๏ผไฝ็จๅๅฎไพๆฌ่บซ๏ผ๏ผ้ๆๅฑๆง๏ผ
self.role=role
self.have_gun=have_gun
self.live_value=live_value
self.money=money
def buy_gun(self,gun): #็ฑป็ๆนๆณ ๏ผๅจๆๅฑๆง๏ผ
print('%s buy %s'%(self.name,gun))
def shoot(self):
print('%s shoot'%(self.name))
def got_shoot(self):
print('%s got_shoot'%(self.name))
def __del__(self):#ๆๆๅฝๆฐ๏ผๅจๅฎไพ็ปๆๅ่ฐ็จ
pass
r1=Role('xiaoming','police','AK74') #ๅฎไพๅ็ฑป
r1.buy_gun('AWM') #่ฐ็จ็ฑป็ๆนๆณ
r1.body_armor='body_armor'#ๆทปๅ ๅฎไพๅ้
Role.test='test'#ๆทปๅ ็ฑปๅ้
#็ฑปๅ้(ๅฅฝๅค:ๅ
ฑๆ,่็ๅ
ๅญ)
#ๅฎไพๅ้(้ๆๅฑๆง)
#็ฑปๆนๆณ(ๅจๆๅฑๆง)
#ๆ้ ๅฝๆฐ๏ผๅๅงๅๅฎไพ๏ผ
#ๆๆๅฝๆฐ๏ผๅไธไบๆถๅฐพๅทฅไฝ๏ผๅ
ณ้ญๆฐๆฎๅบ่ฟๆฅๅไธดๆถๆๅผ็ๆไปถ
#็งๆๅฑๆง:self.__life_value=100 (ๅชๆๅ
้จ่ฎฟ้ฎ๏ผ
#็งๆๆนๆณ:def __got_shot() (ไนๅชๆๅ
้จๅฏไปฅ่ฎฟ้ฎ)
# ๅฐ่ฃ
็จๆทๆ ๆณ่ฎฟ้ฎ็ๅ
้จ็งๆๆนๆณ (้่ๅฎ็ฐ็ป่)
# ็ปงๆฟ ๅๅฐไปฃ็ ้็จ็(ๅฏไปฅๆฉๅฑๅทฒๅญๅจ็ไปฃ็ ๆจกๅ) ไปฃ็ ้็จ
# ๅคๆ (ไธ็งๆฅๅฃๅค็งๅฎ็ฐ) ๆฅๅฃ้็จ | UTF-8 | Python | false | false | 1,447 | py | 138 | classๆจกๅ.py | 82 | 0.622481 | 0.603393 | 0 | 32 | 28.5 | 76 |
PatriotJ/chenyun-leetcode | 5,317,169,547,097 | a09537ebfeeac5e68d7e4e446d98f0febb8ec32a | 638684378d281aa23d3262f0eb6ead5527fb544c | /python/496.py | fefca4a68a61217864be0461c7d28f2402234999 | [] | no_license | https://github.com/PatriotJ/chenyun-leetcode | 3bd52525f75b91786bfa11ead754e19d7765ef4f | e1b43d5e1819916cd48598a1a6a547f749ffff3c | refs/heads/master | "2020-05-30T18:59:23.818287" | "2019-07-11T23:20:37" | "2019-07-11T23:20:37" | 189,913,801 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def nextGreaterElement(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
ans = [-1] * len(nums1)
dic= {}
for i,num in enumerate(nums1):
dic[num] = i
stack = []
for num in nums2:
while stack and stack[-1] < num:
top = stack.pop()
if top in dic:
ans[dic[top]] = num
stack.append(num)
return ans
| UTF-8 | Python | false | false | 532 | py | 465 | 496.py | 464 | 0.43797 | 0.421053 | 0 | 19 | 27 | 47 |
wwwwodddd/Zukunft | 6,158,983,122,347 | 964304cf466cc6b4f753baa627baa50ed14bc7cf | 1c390cd4fd3605046914767485b49a929198b470 | /luogu/P4236.py | 7d56d25698bd9fab1b682e048c4d0adb529081b5 | [] | no_license | https://github.com/wwwwodddd/Zukunft | f87fe736b53506f69ab18db674311dd60de04a43 | 03ffffee9a76e99f6e00bba6dbae91abc6994a34 | refs/heads/master | "2023-01-24T06:14:35.691292" | "2023-01-21T15:42:32" | "2023-01-21T15:42:32" | 163,685,977 | 7 | 8 | null | null | null | null | null | null | null | null | null | null | null | null | null | t = int(input())
for tt in range(t):
a, n = map(int, input().split())
if a & 1:
print(['wzt', 'lsq'][n & 1], 'Win')
else:
n %= a + 1
print(['wzt', 'lsq'][n & 1 or n == a], 'Win')
| UTF-8 | Python | false | false | 188 | py | 6,144 | P4236.py | 6,060 | 0.462766 | 0.441489 | 0 | 8 | 22.5 | 47 |
Pramod-Shrinivas/Project-Euler | 4,621,384,823,644 | cbb8a058d531532baebb42b33024fd2146ea4e68 | 353508e9f9746be5783dc822b37776dcd13c15e6 | /Python/002.py | c81f2c04a01eae27feddef02e5d3ecdea985b3b8 | [] | no_license | https://github.com/Pramod-Shrinivas/Project-Euler | ee94e7448feeeff8cc03ae2534e85a88326768e6 | 86951a507f7c71788f1d0264108fa4c23bb53022 | refs/heads/master | "2016-09-06T19:30:53.637228" | "2015-01-28T10:23:51" | "2015-01-28T10:23:51" | 29,229,741 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
'''
Purpose: https://projecteuler.net/problem=2
@author:Pramod S
'''
def main():
f1,f2,sum=1,2,2
while(f2<=4000000):
f1,f2 = f2,f1+f2
if(f2%2==0):
sum+=f2
print("result {0}".format(sum))
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 304 | py | 18 | 002.py | 17 | 0.483553 | 0.404605 | 0 | 15 | 19.266667 | 49 |
Yu-Jhin-s-Python/006992 | 10,565,619,559,238 | 902a9dc4f4f710aac28d68c02a47d911728b1155 | d342494d8a41fe941f16d1a89144afa001f04e01 | /twoWindow.py | 81f406fec718328d5ebbb52d10ce77943f170d4f | [] | no_license | https://github.com/Yu-Jhin-s-Python/006992 | 8381ce8b756569ec94d4e69828aa07426ca41ec2 | 7493540e7ae7cd54e2fc1e6a6c8be3bdafd67e70 | refs/heads/master | "2022-04-08T09:40:24.148920" | "2019-09-30T06:07:44" | "2019-09-30T06:07:44" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tkinter
window1 = tkinter.Tk()
window1.geometry("100x100")
window1.configure(background="white")
window2 = tkinter.Tk()
window2.geometry("100x100")
window2.configure(background="black")
def changecolor():
print("button1")
def changecolor2():
print("button2")
btn = tkinter.Button(window1, text="Random color!", command=changecolor)
btn.pack()
btn2 = tkinter.Button(window2, text="Random color!", command=changecolor2)
btn2.pack()
window2.mainloop()
| UTF-8 | Python | false | false | 468 | py | 34 | twoWindow.py | 33 | 0.74359 | 0.685897 | 0 | 22 | 20.272727 | 74 |
Tubbz-alt/psbeam | 12,257,836,667,298 | e3f01adc04a0dac281e4edbe0d0abf20df87a9dd | 28b94ec8485c1dcb387078c6a18b9d19af2c37b8 | /tests/utils.py | 46c31ec0db2a50d555662a6fc47616a707b1894a | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/Tubbz-alt/psbeam | 443648723e5a53700d0720ec9d6d52af6e34096e | 42524e0ba52e04bf37d86416bbd35360e5df5d2c | refs/heads/master | "2021-08-15T17:58:44.242303" | "2017-11-18T01:50:45" | "2017-11-18T01:50:45" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
############
# Standard #
############
import logging
logger=logging.getLogger(__name__)
def collector(field, output):
"""
Reimplement bluesky.callbacks.collector to not raise exception when field
is missing. Instead, log a warning.
"""
def f(name, event):
try:
output.append(event['data'][field])
logger.debug("%s collector has collected, all output: %s",
field, output)
except KeyError:
logger.warning("did not find %s in event doc, skipping", field)
return f
| UTF-8 | Python | false | false | 612 | py | 21 | utils.py | 16 | 0.571895 | 0.570261 | 0 | 23 | 25.608696 | 77 |
swuerth/StravaDevChallenge | 2,044,404,435,904 | 2bf0cf89242bca0d2721c26c5da81910016026bd | 44a2803051ca3627025dae5d10b095596ad58ad0 | /strava-club-highlights-gcloud/env/lib/python2.7/site-packages/units/abstract.py | c5dbd122698c120f4a4e0eedf974c3eb0d0387fc | [] | no_license | https://github.com/swuerth/StravaDevChallenge | f5e238a3b5a67ed1acca48e0bdfc315800d9b7d0 | 0fcdddeab99694c1bd5be983d2b34ea043f7c39f | refs/heads/master | "2020-04-06T07:03:14.729373" | "2016-09-07T21:01:38" | "2016-09-07T21:01:38" | 65,424,264 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | """An abstract base class to define the interface for all units."""
from units.compatibility import compatible
from units.exception import IncompatibleUnitsError
from units.quantity import Quantity
class AbstractUnit(object):
"""Parent class/interface for units."""
def __init__(self, is_si):
self._si = is_si
def __call__(self, quantity):
"""Overload the function call operator to convert units."""
if not hasattr(quantity, 'unit'):
return Quantity(quantity, self)
elif compatible(self, quantity.unit):
return Quantity(quantity.num *
quantity.unit.squeeze() /
self.squeeze(),
self)
else:
raise IncompatibleUnitsError()
def canonical(self):
"""Return an immutable, comparable derivative of this unit"""
raise NotImplementedError
def invert(self):
"""Return (this unit)^-1."""
raise NotImplementedError
def is_si(self):
"""Whether it makes sense to give this unit an SI prefix."""
return self._si
def squeeze(self):
"""Return this unit's implicit quantity multiplier."""
raise NotImplementedError
def str_includes_multiplier(self):
"""Whether the string name of the unit already encapsulates
the unit's multiplier."""
raise NotImplementedError
| UTF-8 | Python | true | false | 1,512 | py | 78 | abstract.py | 54 | 0.580026 | 0.579365 | 0 | 44 | 32.977273 | 69 |
blackball/.emacs.d | 7,310,034,345,352 | 3ea194bdd43bbc7873d6bd5388788b9e9bb604a8 | b317ea149ba1954bc2b7a12e58fce69e72e814b0 | /.python-environments/default/lib/python3.6/sre_constants.py | 62e6eb3529094de6b4513626a2c490fb7b10bf27 | [] | no_license | https://github.com/blackball/.emacs.d | d71d863308c1bc03550996856f75cf1f0677dfa1 | b2a9988957bf0c30840afb9f39f1487b2d8e268c | refs/heads/master | "2023-06-05T20:06:29.440954" | "2021-06-17T07:19:11" | "2021-06-17T07:19:11" | 69,253,081 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /home/rvbust/Rvbust/Install/Miniconda3/lib/python3.6/sre_constants.py | UTF-8 | Python | false | false | 69 | py | 76 | sre_constants.py | 37 | 0.84058 | 0.797101 | 0 | 1 | 69 | 69 |
ly-c-christopher/EllieActiveToStaging | 3,427,383,908,883 | 297a6dde9244bb50d12b6ed278ba098c46557a0d | ecc7a22cb85cb935aaa13e48eadd4e2f1b6271b9 | /psql_db.py | ff343809881f89c59a6e7b182f346619401d533c | [] | no_license | https://github.com/ly-c-christopher/EllieActiveToStaging | 67c90643e93aea23d4fc053e42224854be1d54a6 | c38c50dae4a92f1b1b663651412f9cbb7a1603e1 | refs/heads/main | "2023-02-13T01:12:13.208471" | "2021-01-19T00:23:47" | "2021-01-19T00:23:47" | 330,789,911 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import psycopg2
import PsqlProduct
from config import config, TABLES, ELLIE_TESTING_TABLES
def get_products():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config('ellie_testing')
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute('SELECT version()')
# display the PostgreSQL database server version
db_version = cur.fetchone()
print(db_version)
cur.execute('select * from alternate_products where product_id = \'%s\'' % 1644903628851)
alt_prods = cur.fetchall()
for prod in alt_prods:
print('id: %s, product_title: %s, product_id: %s, variant_id: %s, sku: %s, product_collection: %s' %
(prod[0], prod[1], prod[2], prod[3], prod[4], prod[5]))
cur.execute('select * from products;')
database_products = []
all_products = cur.fetchall()
for prod in all_products:
new_product = PsqlProduct(prod[0], prod[1], prod[2], prod[3], prod[4], prod[5], prod[6], prod[7], prod[8],
prod[9], prod[10], prod[11], prod[12], prod[13], prod[14], prod[15], prod[16],
prod[17], prod[18])
database_products.append(new_product)
for prod in database_products:
exists = False
if prod.metafields_global_description_tag is not None or '':
exists = True
if prod.metafields_global_title_tag is not None or '':
exists = True
if exists:
print(prod.to_string())
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
def save_products(products_list):
conn = None
try:
params = config('ellie_testing')
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute('TRUNCATE TABLE %s' % ELLIE_TESTING_TABLES['products'])
for product in products_list:
sql = 'INSERT INTO ' + TABLES['ellie_testing_products'] \
+ ' (body_html, shopify_id, handle, images, options, product_type, published_at, image, ' \
'published_scope, tags, template_suffix, title, metafields_global_title_tag, ' \
'metafields_global_description_tag, variants, vendor, created_at, updated_at) VALUES ' \
'(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
values = (product.body_html, product.shopify_id, product.handle, product.images, product.options,
product.product_type, product.published_at, product.image, product.published_scope, product.tags,
product.template_suffix, product.title, product.metafields_global_title_tag,
product.metafields_global_description_tag, product.variants, product.vendor, product.created_at,
product.updated_at)
cur.execute(sql, values)
conn.commit()
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
def save_alternate_products(alternate_products_list):
conn = None
try:
params = config('ellie_testing')
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute('TRUNCATE TABLE %s' % ELLIE_TESTING_TABLES['alt_prods'])
for alt_prod in alternate_products_list:
sql = 'INSERT INTO ' + ELLIE_TESTING_TABLES['alt_prods'] + ' (product_title, product_id, variant_id, ' \
'sku, product_collection) ' \
'VALUES (%s, %s, %s, %s, %s)'
values = (alt_prod.product_title, alt_prod.product_id, alt_prod.variant_id, alt_prod.sku,
alt_prod.product_collection)
cur.execute(sql, values)
conn.commit()
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
def save_product_tags(product_tags_list):
conn = None
try:
params = config('ellie_testing')
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute('TRUNCATE TABLE %s' % ELLIE_TESTING_TABLES['prod_tags'])
for prod_tag in product_tags_list:
sql = 'INSERT INTO ' + ELLIE_TESTING_TABLES['prod_tags'] + ' (product_id, tag, active_start, active_end,' \
'theme_id) VALUES (%s, %s, %s, %s, %s)'
values = (prod_tag.product_id, prod_tag.tag, prod_tag.active_start, prod_tag.active_end, prod_tag.theme_id)
cur.execute(sql, values)
conn.commit()
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.') | UTF-8 | Python | false | false | 6,163 | py | 20 | psql_db.py | 17 | 0.547785 | 0.538699 | 0 | 142 | 41.415493 | 119 |
fepeter/VASY | 18,116,172,089,289 | 5639a9f6ea8178a9afc081a5dc924c7619dd3eb5 | 69b84f3f90bea2335e96106311bda30314f3d550 | /Aufgabe 2/exit.py | 6d30c2e3cb8dc83fc7915918a00ed5d530d58310 | [] | no_license | https://github.com/fepeter/VASY | 2a5d21a8c20788ad9fb28fe624919ff191275b66 | c6344cba500b10ed01b994dd1fd47223880afa0e | refs/heads/master | "2021-09-04T21:39:45.463528" | "2018-01-22T12:22:22" | "2018-01-22T12:22:22" | 113,789,601 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import mraa, time
class EdisonCar():
def __init__(self):
self.pwm_ina = mraa.Pwm(0)
self.pwm_inb = mraa.Pwm(14)
self.pin_stby = mraa.Gpio(15) # 0 = motor off
self.pin_ina1 = mraa.Gpio(45) # 0/1 -> go forward/backward
self.pin_ina2 = mraa.Gpio(46)
self.pin_inb1 = mraa.Gpio(47) # 0/1 -> go left/right
self.pin_inb2 = mraa.Gpio(48)
self.period = 1
def enable_pins(self):
if self.pwm_ina is None:
raise RuntimeError('pwm_ina could not be initialized')
if self.pwm_ina.enable(True) is not mraa.SUCCESS:
raise RuntimeError('error while enabling pwm_ina')
if self.pwm_inb is None:
raise RuntimeError('pwm_inb could not be initialized')
if self.pwm_inb.enable(True) is not mraa.SUCCESS:
raise RuntimeError('error while enabling pwm_inb')
if self.pin_stby.dir(mraa.DIR_OUT) is not mraa.SUCCESS:
raise RuntimeError("Can't set digital pin stdby as output, exiting")
if self.pin_ina1.dir(mraa.DIR_OUT) is not mraa.SUCCESS:
raise RuntimeError("Can't set digital pin a1 as output, exiting")
if self.pin_ina2.dir(mraa.DIR_OUT) is not mraa.SUCCESS:
raise RuntimeError("Can't set digital pin a2 as output, exiting")
if self.pin_inb1.dir(mraa.DIR_OUT) is not mraa.SUCCESS:
raise RuntimeError("Can't set digital pin b1 as output, exiting")
if self.pin_inb2.dir(mraa.DIR_OUT) is not mraa.SUCCESS:
raise RuntimeError("Can't set digital pin b2 as output, exiting")
def enable_motors(self):
self.pin_stby.write(1)
def disable_motors(self):
self.pin_stby.write(0)
def drive(self, duty_cycle):
self.pwm_ina.write(duty_cycle)
def brake(self):
print("breaking!!!")
i = 0
while (i < 1000):
self.pin_ina1.write(1)
self.pin_ina2.write(0)
self.pwm_ina.period(self.period)
self.pwm_ina.write(1.0)
self.pin_ina1.write(0)
self.pin_ina2.write(1)
self.pwm_ina.period(self.period)
self.pwm_ina.write(1.0)
i += 1
self.pin_ina1.write(0)
self.pin_ina2.write(0)
self.pwm_ina.write(1.0)
def steer(self, dir):
'''
:param dir: string "left" or "write"
:return:
'''
if (dir == "left"):
self.pin_inb1.write(1)
self.pin_inb2.write(0)
elif (dir == "right"):
self.pin_inb1.write(0)
self.pin_inb2.write(1)
def steeringAngle(self, duty_cycle):
self.pwm_inb.period(self.period)
self.pwm_inb.write(duty_cycle)
def setForward(self):
self.pin_ina1.write(1)
self.pin_ina2.write(0)
def setbBackward(self):
self.pin_ina1.write(0)
self.pin_ina2.write(1)
def main():
granTurino = EdisonCar()
granTurino.enable_pins()
granTurino.disable_motors()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 3,071 | py | 12 | exit.py | 12 | 0.57408 | 0.550309 | 0 | 102 | 29.107843 | 80 |
frstrtr/twisted-network-programming-essentials-examples | 18,459,769,439,702 | 37ead2ce6935a5a4ee01e02611f47aba89f36c36 | 370e82472caf4cbcc711a6877181e94275a38d5a | /Chapter4-Web-Servers/ex7-web60sec_dynamic_URL_dispatch.py | ced1194cc7327ee84552ed498201d46f53a26988 | [] | no_license | https://github.com/frstrtr/twisted-network-programming-essentials-examples | 74e45c065cfc45e58b48fe386871287626a3fa78 | 15ff534b7ca0a3ff27e49788d5338472c6b89463 | refs/heads/master | "2022-12-08T07:34:29.984408" | "2020-08-28T14:30:33" | "2020-08-28T14:30:33" | 289,447,143 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.internet import reactor, endpoints
from calendar import calendar
import datetime
class YearPage(Resource):
def __init__(self, year):
Resource.__init__(self)
self.year = year
def render_GET(self, request):
cal = calendar(self.year)
return (b"<!DOCTYPE html><html><head><meta charset='utf-8'>"
b"<title></title></head><body><pre>" + cal.encode('utf-8') + b"</pre>")
class Calendar(Resource):
def getChild(self, name, request):
if not name:
name = datetime.datetime.now().year # fix empty year (set it to current)
return YearPage(int(name))
root = Calendar()
factory = Site(root)
endpoint = endpoints.TCP4ServerEndpoint(reactor, 8880)
endpoint.listen(factory)
reactor.run()
| UTF-8 | Python | false | false | 859 | py | 27 | ex7-web60sec_dynamic_URL_dispatch.py | 26 | 0.66007 | 0.651921 | 0 | 31 | 26.709677 | 87 |
AndreaCensi/geometry | 14,104,672,607,897 | 863cad5d583c5c59cb9533b56eb7eac654ad0f4e | 8b5488c3d339c5e1bf2cc9679636b1f5968f43c1 | /src/geometry/__init__.py | 5130e7930b49f177c83ccd97201511e217652a2e | [] | no_license | https://github.com/AndreaCensi/geometry | 6538ade2364d38ca6a9d8d5bf36a2c5f973ba043 | d192cfefffad2e4b29f18973f63f5eabb2a57fd1 | refs/heads/master | "2023-07-08T13:52:36.970494" | "2020-07-06T09:54:21" | "2020-07-06T09:54:21" | 916,570 | 42 | 11 | null | false | "2020-06-04T14:43:30" | "2010-09-16T19:24:49" | "2020-03-18T04:16:51" | "2020-06-04T14:43:29" | 7,422 | 39 | 11 | 1 | Python | false | false | # coding=utf-8
__version__ = "1.5.8"
# If True, additional checks are done at runtime
development = False
# Does extra checks to make sure things are ok.
# These are now redundant, but it was useful while debugging.
# Reactivate if some strange bug is suspected.
GEOMETRY_DO_EXTRA_CHECKS = False
def create_logger():
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
return logger
logger = create_logger()
def in_circle():
import os
return "CIRCLE" in os.environ
def set_numpy_errors_to_raise():
import numpy as np
np.seterr(all="raise")
if in_circle():
logger.info("Activating extra checks.")
development = True
GEOMETRY_DO_EXTRA_CHECKS = True
set_numpy_errors_to_raise()
try:
from scipy.linalg import logm, expm, eigh
scipy_found = True
except ImportError:
msg = "Scipy not found -- needed for functions logm, expm, eigh. "
msg += "I will go on without it, but later an error will be thrown "
msg += "if those functions are used."
logger.warn(msg)
def make_warning(s):
def f(*args, **kwargs):
raise Exception("Scipy not installed --- function %r not found." % s)
return f
logm = make_warning("logm")
expm = make_warning("expm")
eigh = make_warning("eigh")
scipy_found = False
development = False
from .types import *
from .basic_utils import *
from .constants import *
from .distances import *
from .formatting import *
from .manifolds import *
from .mds_algos import *
from .poses import *
from .poses_embedding import *
from .procrustes import *
from .rotations import *
from .rotations_embedding import *
from .spheres import *
from .spheres_embedding import *
| UTF-8 | Python | false | false | 1,771 | py | 132 | __init__.py | 71 | 0.677583 | 0.675325 | 0 | 79 | 21.417722 | 81 |
HPCC-Cloud-Computing/press | 18,528,488,922,801 | 7a86a13f8c247d213a5c3fb6c7df6b70b46a8320 | b1ff91db05435a2f6b912e0281cb2322e8189ea7 | /prediction/predict/feedforward/slidingwindow.py | 2997822080edad32eaa334ef5b1d88f9148306ab | [
"MIT"
] | permissive | https://github.com/HPCC-Cloud-Computing/press | 147c9c4b798ba68a3d17d5930f73791b1d61493e | 776f820116b853c9413c8771e8d0832238e88b1a | refs/heads/master | "2021-01-19T00:44:37.538452" | "2019-04-06T09:59:29" | "2019-04-06T09:59:29" | 87,205,983 | 2 | 0 | MIT | false | "2019-04-06T08:33:19" | "2017-04-04T15:47:17" | "2019-04-06T08:28:39" | "2019-04-06T08:33:18" | 11,211 | 2 | 0 | 0 | Jupyter Notebook | false | false | class SlidingWindow:
def __init__(self, data, size):
self.data = data
self.size = size
self.index = 0
def __iter__(self):
return self
def __len__(self):
return self.data.shape[0] - self.size + 1
def __next__(self):
if self.index < len(self.data) - self.size + 1:
self.index += 1
return self.data[(self.index - 1):(self.index - 1) + self.size]
else:
raise StopIteration
| UTF-8 | Python | false | false | 498 | py | 106 | slidingwindow.py | 40 | 0.495984 | 0.481928 | 0 | 18 | 25.666667 | 75 |
Takahiro800/algorithm-and-data | 19,524,921,329,962 | 9ebc35226b79b47490eb4e3b6f5c0545cfdde0bd | 2a2f40504b7d2ddcd8872d385abd7e5b84b7ff66 | /ALDS/1_3_A.py | bbe278a18d3b58b27dd1185c049f061341c96ddd | [] | no_license | https://github.com/Takahiro800/algorithm-and-data | 0c170bde2885d77fa0e4edaabee39445577b4816 | 8e1b5a437bd36f5ce26ba3b3694e37a03e213050 | refs/heads/main | "2023-01-12T01:49:54.747493" | "2020-11-03T12:22:03" | "2020-11-03T12:22:03" | 307,830,225 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | S = list(input().split())
N = len(S)
def is_int(str):
try:
int(str)
return True
except ValueError:
return False
class Stack:
def __init__(self, max):
self.size = max
self.top = 0
self.stack = [None] * self.size
def is_empty(self):
return self.top == 0
def push(self, arg_object):
self.top += 1
self.stack[self.top] = arg_object
def pop(self):
self.top -= 1
return self.stack[self.top+1]
NUM = Stack(N)
for i in range(N):
ch = S[i]
if ch in ['+', '-', '*']:
num_2 = NUM.pop()
num_1 = NUM.pop()
if ch == '+':
num = num_1 + num_2
elif ch == '-':
num = num_1 - num_2
else:
num = num_1 * num_2
NUM.push(num)
else:
NUM.push(int(ch))
print("%d" %(NUM.pop()))
| UTF-8 | Python | false | false | 768 | py | 9 | 1_3_A.py | 8 | 0.519531 | 0.502604 | 0 | 45 | 16.066667 | 37 |
rinditriandi/document_management | 3,350,074,498,511 | 9a02b68f6baa73bff547e484f9ecba8fca8f5d56 | dea51891c173a777028c5ebaad2ebcf729cb95d1 | /document_management/apps/company_regulations/views.py | 5b60a26d07bbf20f208a3efb6fdf14b328459f48 | [] | no_license | https://github.com/rinditriandi/document_management | cd9f4819421a3b6c3951229338fc6c63535887b7 | 42ddfd3dc82284e6c1cb1425b238178c18b4c309 | refs/heads/master | "2022-12-07T07:55:40.672166" | "2020-02-26T15:09:07" | "2020-02-26T15:09:07" | 184,234,567 | 0 | 0 | null | true | "2019-07-18T00:36:40" | "2019-04-30T09:37:48" | "2019-05-04T14:38:17" | "2019-07-18T00:36:39" | 1 | 0 | 0 | 0 | null | false | false | from datetime import datetime
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.shortcuts import render, redirect, reverse, get_object_or_404
from document_management.apps.documents.models import Document, DocumentFile
from document_management.core.decorators import legal_required
from .forms import (CompanyRegulationForm, ChangeRecordStatusForm, UploadForm,
ChangeStatusForm, DeleteForm, RemoveForm)
@login_required
def index(request):
query = request.GET.get('query', '')
effective_date = request.GET.get('effective_date', '')
category = int(request.GET.get('category', 0))
documents = Document.objects.select_related('partner')\
.filter(group=settings.GROUP_COMPANY_REGULATION)
if effective_date:
effective_date = datetime.strptime(effective_date, '%Y-%m-%d').date()
documents = documents.filter(effective_date=effective_date)
effective_date = effective_date.strftime("%Y-%m-%d")
if query:
documents = documents.filter(Q(number__icontains=query) |
Q(subject__icontains=query))
if category > 0:
documents = documents.filter(category=category)
documents = documents.order_by('-id')
page = request.GET.get('page', 1)
paginator = Paginator(documents, 25)
try:
page = paginator.page(page)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
context = {
'title': 'Company Regulations',
'document': Document,
'page': page,
'total_data': paginator.count,
'total_pages': paginator.num_pages,
'query': query,
'category': category,
'effective_date': effective_date
}
return render(request, 'company_regulations/index.html', context)
@legal_required
def add(request):
form = CompanyRegulationForm(data=request.POST or None, user=request.user)
if form.is_valid():
document = form.save()
messages.success(request, f'{document.number} has been added')
return redirect('backoffice:company_regulations:index')
else:
if form.has_error('__all__'):
messages.error(request, form.non_field_errors()[0])
context = {
'title': 'Add Company Regulation',
'form': form
}
return render(request, 'company_regulations/add.html', context)
@legal_required
def edit(request, id):
document = get_object_or_404(
Document.objects.filter(is_active=True), id=id
)
if document.status == Document.STATUS.done:
messages.error(request, "Document # %s status has already done" % (document.number))
return redirect(reverse("backoffice:company_regulations:details", args=[document.id]))
initial = {
'number': document.number,
'subject': document.subject,
'category': document.category,
'description': document.description,
'effective_date': document.effective_date.strftime("%Y-%m-%d")
}
form = CompanyRegulationForm(data=request.POST or None, initial=initial,
user=request.user, is_update=True)
if form.is_valid():
form.save()
messages.success(request, f'{document.number} has been updated')
return redirect(reverse('backoffice:company_regulations:details', args=[document.id]))
context = {
'title': 'Edit Company Regulation',
'document': document,
'form': form
}
return render(request, 'company_regulations/edit.html', context)
@legal_required
def delete(request, id):
document = get_object_or_404(
Document.objects.filter(is_active=True), id=id
)
if document.status == Document.STATUS.done:
messages.error(request, "Document # %s status has already done" % (document.number))
return redirect(reverse("backoffice:company_regulations:details", args=[document.id]))
form = DeleteForm(data=request.POST or None, document=document, user=request.user)
if form.is_valid():
form.save()
messages.success(request, "Document # %s has been deleted" % document.number)
return redirect("backoffice:company_regulations:index")
context = {
'title': 'Delete Company Regulation',
'document': document,
'form': form
}
return render(request, 'company_regulations/delete.html', context)
@login_required
def details(request, id):
document = get_object_or_404(
Document.objects.filter(group=settings.GROUP_COMPANY_REGULATION), id=id
)
if request.user.get_role_id() == settings.ROLE_USER_ID and \
document.type == Document.TYPE.private:
messages.error(request, "You do not have an access, but you can request an access.")
return redirect(reverse("backoffice:permission_requests:requests", args=[document.id, document.group]))
if document.is_active:
document.record_status_class = "badge badge-success p-1 ml-1"
else:
document.record_status_class = "badge badge-danger p-1 ml-1"
context = {
'title': 'Details Company Regulation',
'document': document
}
return render(request, 'company_regulations/details.html', context)
@legal_required
def upload(request, id):
document = get_object_or_404(
Document.objects.filter(is_active=True), id=id
)
if document.status == Document.STATUS.done:
messages.error(request, "Document # %s status has already done" % (document.number))
return redirect(reverse("backoffice:company_regulations:details", args=[document.id]))
form = UploadForm(data=request.POST or None, files=request.FILES or None,
document=document, user=request.user)
if form.is_valid():
form.save()
messages.success(request, "Document # %s files has already uploaded" %
(document.number))
return redirect(reverse("backoffice:company_regulations:details", args=[document.id]))
context = {
'title': 'Upload Company Regulation',
'document': document,
'form': form
}
return render(request, 'company_regulations/upload.html', context)
@legal_required
def change_status(request, id):
document = get_object_or_404(
Document.objects.filter(is_active=True), id=id
)
if document.status == Document.STATUS.done:
messages.error(request, "Document # %s status has already done" % (document.number))
return redirect(reverse("backoffice:company_regulations:details", args=[document.id]))
initial = {
'status': document.status
}
form = ChangeStatusForm(data=request.POST or None, initial=initial,
document=document, user=request.user)
if form.is_valid():
form.save()
messages.success(request, "Document # %s status has been changed into %s" %
(document.number, document.get_status_display().upper()))
return redirect(reverse("backoffice:company_regulations:details",
args=[document.id]))
context = {
'title': 'Change Status Company Regulation',
'form': form,
'document': document
}
return render(request, 'company_regulations/change_status.html', context)
@legal_required
def change_record_status(request, id):
document = get_object_or_404(Document, id=id)
if document.status == Document.STATUS.done:
messages.error(request, "Document # %s status has already done" % (document.number))
return redirect(reverse("backoffice:company_regulations:details", args=[document.id]))
form = ChangeRecordStatusForm(document=document, user=request.user)
if form.is_valid():
document = form.save()
if document.is_active:
string_status = "activated"
else:
string_status = "deactivated"
messages.success(request, "Document # %s has been %s" % (document.number, string_status))
return redirect("backoffice:company_regulations:index")
return redirect("backoffice:company_regulations:index")
@login_required
def preview(request, id):
document_file = get_object_or_404(DocumentFile, id=id)
context = {
'title': 'Preview Contract',
'document_file': document_file
}
return render(request, 'company_regulations/preview.html', context)
@legal_required
def remove(request, id):
document_file = get_object_or_404(
DocumentFile.objects.select_related('document', 'document__partner').filter(is_active=True), id=id
)
if document_file.document.status == Document.STATUS.done:
messages.error(request, "Document # %s status has already done" % (document_file.document.number))
return redirect(reverse("backoffice:company_regulations:details", args=[document_file.document.id]))
form = RemoveForm(data=request.POST or None, document_file=document_file, user=request.user)
if form.is_valid():
form.remove()
messages.success(request, "Company Regulation File of # %s has been deleted" % str(document_file.document.number))
return redirect(reverse("backoffice:company_regulations:details", args=[document_file.document.id]))
else:
if form.has_error('__all__'):
messages.error(request, form.non_field_errors()[0])
context = {
'title': 'Remove File Company Regulations',
'document_file': document_file,
'form': form
}
return render(request, 'company_regulations/remove.html', context)
| UTF-8 | Python | false | false | 9,821 | py | 107 | views.py | 56 | 0.657061 | 0.65309 | 0 | 283 | 33.70318 | 122 |
radomirklacza/C-BAS | 4,930,622,470,324 | ec32150cdc5c4e692658ec8970e4c886c82f2f01 | 214a99e53a9e3a2fc9332dd3b7dcd4d571dbd66a | /src/vendor/geniv3rpc/ext/geni/am/gibaggregate/resources.py | f313a98a89c6dfb738afa5f0e7e45ea9591e5aaf | [
"BSD-3-Clause"
] | permissive | https://github.com/radomirklacza/C-BAS | 0f2e76a308ca8d45ae74411aaea40cbd55ccea4e | 5005cf43f57302dc0f58b9d1b9cf7e4e3ab70e32 | refs/heads/master | "2020-03-31T21:20:40.092740" | "2018-10-26T10:10:12" | "2018-10-26T10:10:12" | 152,576,649 | 0 | 0 | NOASSERTION | true | "2018-10-11T10:53:23" | "2018-10-11T10:53:23" | "2018-09-07T12:05:27" | "2018-09-07T12:05:26" | 12,715 | 0 | 0 | 0 | null | false | null | #----------------------------------------------------------------------
# Copyright (c) 2012 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
import sys
import stat
import os.path
import uuid
import config
import graphUtils
from graphUtils import GraphNode
sliceURN = ""
sliceName = ""
class VMNode(GraphNode) :
""" This class holds information about a VM (compute node) requested
by an experimenter.
Some information held by this class comes from the request
Rspec. E.g. the experimenter specified name of the node. Other
information held by this class is provide by this script.
E.g. the OpenVZ container name that corresponds to this node.
"""
numVMs = 6 # max number of Virtual Machines available from this aggregate
def __init__(self, nodeNumber) :
self.containerName = 100 + nodeNumber # OpenVZ container name (e.g. 101)
self.controlNetAddr = '' # IP address of node on control network
self.nodeName = '' # Experimenter supplied name (client_id)
self.NICs = [] # List of NICs for this node
self.installList = [] # List of files to be installed in VM during setup
self.executeList = [] # List of commands to be executed on startup
self.componentID = '' # component ID for the resource
self.sliverURN = '' # sliver urn
def getNeighbors(self) :
return self.NICs
def getNodeName(self) :
return self.nodeName;
class NIC(GraphNode) :
""" This class holds information about a NIC. NICs are assoicated with
a compute node (VMNode class). The VMNode class keeps track of the
NICs associated with a given VM.
Some of the information held by this class comes from the request
Rspec. E.g. the interface name used by the experimenter.
Other information held by this class comes form this script. E.g.
the IP address associated with the NIC.
"""
def __init__(self) :
self.nicName = '' # Experimenter specified name for this NIC
self.deviceNumber = '' # Device num: 1 = eth1, 2 = eth2, 3 = eth3
self.macAddress = '' # MAC address for this NIC
self.ipAddress = '' # IP address associated with this NIC
self.myHost = None; # The host (VMNode) associated with this NIC
self.virtualEthName = '' # Name of corresponding VETH in the host OS
self.link = None # The link object associated with this NIC
self.componentID = '' # component ID for the resource
self.sliverURN = '' # sliver urn
def getNeighbors(self) :
return [self.link, self.myHost]
def getNodeName(self) :
return self.nicName;
class Link(GraphNode) :
""" This class holds information about a link in the experimenter
specified network topology. The NIC class keeps track of the link
to which it is connected.
Some of the information held by this class comes from the request
Rspec. E.g. the link name used by the experimenter. Other
information held by this class comes form this script. E.g. the
name of the host ethernet bridge associated with the NIC.
"""
def __init__(self) :
self.linkName = '' # Experimenter specified name for this link
self.subnetNumber = 0 # if subnetNumber is x, link is 10.0.x.0/24
self.bridgeID = '' # Name of the host ethernet bridge associated
# w/ the link (e.g. br3 for subnet 10.0.3.0/24)
self.endPoints = [] # NICs at the end points of this link
self.sliverURN = '' # sliver urn
def getNeighbors(self) :
return self.endPoints
def getNodeName(self) :
return self.linkName;
class installItem :
"""
VMNode maintains a list of files to be installed when the VM
starts up. Items in this list belong this class.
"""
def __init__(self) :
self.sourceURL = '' # Location of file to be installed
self.destination = '' # Location in file system where file goes
self.fileType = '' # Type of file to be installed
class executeItem :
"""
VMNode maintains a list of commands to be executed when the VM
starts up. Items in this list belong this class.
"""
def __init__(self) :
self.command = '' # Command to be executed at VM startup
self.shell = 'sh' # Shell used to execute command
experimentHosts = {} # Map of container names (e.g. 101) to corresponding
# VMNode objects
experimentLinks = [] # List of links specified by the experimenter
experimentNICs = {} # Map of client supplied network interface names to
# corresponding NIC objects
def _annotateGraph() :
""" This function walks through the VMNode, NIC and LINK objects
created by parsing the request Rspec and fills in the missing
information (e.g. MAC and IP addresses for interfaces, bridge names
for links, etc).
"""
# Walk though all NICs and assign them MAC addresses
MACAddresses = [ # Table of MAC addresses for assignment to NICs
"00:0C:29:B4:DF:A7",
"00:0C:29:69:1D:AB",
"00:0C:29:C8:76:FD",
"00:0C:29:71:BA:ED",
"00:0C:29:B8:81:05",
"00:0C:29:9B:6E:5A",
"00:0C:29:87:F0:5E",
"00:0C:29:E8:77:47",
"00:0C:29:7D:99:5C",
"00:0C:29:3B:CF:F8",
"00:0C:29:3E:76:6B",
"00:0C:29:D5:B2:C3",
"00:0C:29:D8:CB:38",
"00:0C:29:D5:B2:13",
"00:0C:29:DA:3E:91",
"00:0C:29:15:97:46",
"00:0C:29:AF:FC:08",
"00:0C:29:05:DF:8C"
]
macAddressesIndex = 0;
nicNames = experimentNICs.keys()
for i in range(len(nicNames)) :
nicObject = experimentNICs[nicNames[i]]
nicObject.macAddress = MACAddresses[macAddressesIndex];
macAddressesIndex += 1
# For every host, give its NICs numbers: 1 (= eth1), 2 or 3
# Also give the NICs the names of the corresponding veth device in the
# host. OpenVZ convention: veth101.1 is virtual ethernet on host that
# corresponds to eth1 on container 101; veth103.2 is virtual ethernet
# on host that corresponds to eth2 on container 103.
hostNames = experimentHosts.keys()
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
interfaceCount = 1
for nicObject in hostObject.NICs :
nicObject.deviceNumber = interfaceCount
nicObject.virtualEthName = 'veth%d.%d' % \
(nicObject.myHost.containerName, interfaceCount)
interfaceCount += 1
# Give each link a subnet address and bridge name
networkNumber = 3; # Subnet number to assign to link. Starts with 3
# since subnet 1 is for control network and
# subnet 2 is used by VirtualBox
for i in range(len(experimentLinks)) :
linkObject = experimentLinks[i]
linkObject.subnetNumber = networkNumber
linkObject.bridgeID = 'br%d' % networkNumber
# Assign IP address to endpoints associated with the link
# IP address is of the form 10.0.networkNumber.VMNodeContainerName
# E.g. net num 3 attached to NIC on container 101 would be 10.0.3.101
for j in range(0, len(linkObject.endPoints)) :
nicObject = linkObject.endPoints[j]
nicObject.ipAddress = "10.0.%d.%d" % (networkNumber, \
nicObject.myHost.containerName)
networkNumber += 1
# Assign URNs to the VM resources
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
hostObject.componentID = ('urn:publicid:IDN+geni-in-a-box.net+node+pc%s'
% hostObject.containerName)
hostObject.sliverURN = ('urn:publicid:IDN+geni-in-a-box.net+sliver+%s'
% hostObject.containerName)
# Assign URNs to the NICs
for i in range(len(nicNames)) :
nicObject = experimentNICs[nicNames[i]]
nicObject.componentID = \
('urn:publicid:IDN+geni-in-a-box.net+interface+pc%s:eth%s' %
(nicObject.myHost.containerName, nicObject.deviceNumber))
nicObject.sliverURN = ('urn:publicid:IDN+geni-in-a-box.net+sliver+%s%s'
% (nicObject.myHost.containerName,
nicObject.deviceNumber))
# Assign URNs to the links
for i in range(len(experimentLinks)) :
linkObject = experimentLinks[i]
linkObject.sliverURN = \
'urn:publicid:IDN+geni-in-a-box.net+sliver+%s' % linkObject.bridgeID
def _generateBashScript(users) :
''' Generate the Bash script that is run to actually create and set up the
Virtual Machines and networks used in the experiment.
'''
# Create the file into which the script will be written
pathToFile = config.sliceSpecificScriptsDir + '/' + config.shellScriptFile
try:
scriptFile = open(pathToFile, 'w')
except IOError:
config.logger.error("Failed to open file that creates sliver: %s" %
pathToFile)
return None
# Make this file executable
os.chmod(pathToFile, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
# Start creating the script
scriptFile.write('#!/bin/bash \n\n')
scriptFile.write('# This script is auto-generated by the aggregate\n')
scriptFile.write('# manager in response to a createSliver call \n\n')
scriptFile.write('## Function definitions\n')
scriptFile.write('pingNode () { # pings specified PC to check if it is alive \n')
scriptFile.write(' pingAttempts=0 \n')
scriptFile.write(' echo \"Pinging VM 10.0.1.$1...\" \n')
scriptFile.write(' ping -c2 10.0.1.$1 \n')
scriptFile.write(' while [ $? -ne 0 ] && [ $pingAttempts -le 50 ] \n')
scriptFile.write(' do \n')
scriptFile.write(' sleep 10 # sleep for 10 more seconds \n')
scriptFile.write(' let \"pingAttempts += 1\" \n')
scriptFile.write(' echo \"Pinging VM 10.0.1.$1...\" \n')
scriptFile.write(' ping -c2 10.0.1.$1 \n')
scriptFile.write(' done \n')
scriptFile.write(' if [ $pingAttempts -gt 20 ] \n')
scriptFile.write(' then \n')
scriptFile.write(' return 1 # failed to ping PC \n')
scriptFile.write(' else \n')
scriptFile.write(' return 0 # success \n')
scriptFile.write(' fi \n')
scriptFile.write('} \n')
scriptFile.write('\n## Delete any existing sliver. \n')
scriptFile.write('%s/%s %s %s\n' % (config.standardScriptsDir,
config.deleteSliver,
config.homeDirectory,
config.sliceSpecificScriptsDir))
hostNames = experimentHosts.keys()
# Set the sliver status for each host to "unknown"
scriptFile.write('\n# Setting sliver status of hosts to unknown \n')
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
statusFileName = '%s/pc%s.status' % (config.sliceSpecificScriptsDir,
hostObject.containerName)
scriptFile.write('echo \"unknown\" > %s \n' % statusFileName)
# Create container templates
scriptFile.write('\n## Define containers for each of the hosts in the experiment.\n')
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
if config.distro == 'UBUNTU10-STD' :
scriptFile.write('vzctl create %s --ostemplate ubuntu-10.04-x86\n' % hostObject.containerName)
else :
scriptFile.write('vzctl create %s --ostemplate fedora-15-x86 --config basic\n' % hostObject.containerName)
scriptFile.write('\n## Set up host names and control network IP addresses for the containers. \n')
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
scriptFile.write('vzctl set %s --hostname %s --save \n' %
(hostObject.containerName, hostObject.nodeName))
scriptFile.write('vzctl set %s --ipadd 10.0.1.%s --save\n' %
(hostObject.containerName, hostObject.containerName))
scriptFile.write('\n# Turn off firewall on host \n')
scriptFile.write('/etc/init.d/iptables stop \n')
scriptFile.write('\n## Set up interfaces on the hosts and connect them to the appropriate bridges \n')
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
scriptFile.write('# Set up interfaces for host %s \n' %
hostObject.nodeName)
# for each NIC on host set up the interface
for j in range(0, len(hostObject.NICs)) :
nicObject = hostObject.NICs[j]
scriptFile.write('vzctl set %d --netif_add eth%d,%s,%s,FE:FF:FF:FF:FF:FF,%s --save \n' % (hostObject.containerName, nicObject.deviceNumber, nicObject.macAddress, nicObject.virtualEthName, nicObject.link.bridgeID))
scriptFile.write('\n')
scriptFile.write('\n## Start up the hosts (containers) \n')
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
scriptFile.write('vzctl start %s \n' % hostObject.containerName)
scriptFile.write('\n## Configure bridges on host \n')
for i in range(len(experimentLinks)) :
linkObject = experimentLinks[i]
scriptFile.write('brctl addbr %s \n' % linkObject.bridgeID)
# Add the virtual eth devices corresponding to the end-points of the
# link to the bridge
for j in range(len(linkObject.endPoints)) :
scriptFile.write('brctl addif %s %s \n' % (linkObject.bridgeID, \
linkObject.endPoints[j].virtualEthName))
scriptFile.write('ifconfig %s 0 \n\n' % linkObject.bridgeID)
scriptFile.write('\n## Give the hosts 30 seconds to start up \n')
scriptFile.write('sleep 30 \n\n')
scriptFile.write('# Ping hosts to make sure they are up. Give them more time if necessary.\n')
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
scriptFile.write('pingNode %d \n' % hostObject.containerName)
scriptFile.write('if [ $? -ne 0 ] \n')
scriptFile.write('then \n')
scriptFile.write(' echo \"Container %d failed to start up.\" \n' % hostObject.containerName)
statusFileName = '%s/pc%s.status' % (config.sliceSpecificScriptsDir,
hostObject.containerName)
scriptFile.write(' echo \"failed\" > %s \n' % statusFileName)
scriptFile.write('else \n')
statusFileName = '%s/pc%s.status' % (config.sliceSpecificScriptsDir,
hostObject.containerName)
scriptFile.write(' echo \"configuring\" > %s \n' % statusFileName)
scriptFile.write('fi \n')
scriptFile.write('\n## Set up interfaces on each host (container) \n')
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
scriptFile.write('# Set up interfaces on PC %s\n' % hostObject.nodeName)
# Set up ethernet devices on the container
for j in range(len(hostObject.NICs)) :
scriptFile.write('vzctl exec %d \"/sbin/ifconfig eth%d 0\" \n' % \
(hostObject.containerName, \
hostObject.NICs[j].deviceNumber))
scriptFile.write('vzctl exec %d \"/sbin/ip addr add %s dev eth%d\"\n' % \
(hostObject.containerName, \
hostObject.NICs[j].ipAddress, \
hostObject.NICs[j].deviceNumber))
scriptFile.write('vzctl exec %d \"echo 0 > /proc/sys/net/ipv4/conf/eth%d/rp_filter\" \n' \
% (hostObject.containerName, \
hostObject.NICs[j].deviceNumber))
scriptFile.write('vzctl exec %d \"/sbin/ifconfig eth%d up\" \n' % \
(hostObject.containerName, \
hostObject.NICs[j].deviceNumber))
scriptFile.write('\n')
# Now we are ready to set up the IP routing tables on each container
scriptFile.write('\n## Set up IP routing tables on each host \n');
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
scriptFile.write('# Set up IP routing table for %s\n' % \
hostObject.nodeName)
# Turn on IP forwarding so host (container) can forward IP packets
scriptFile.write('vzctl exec %d \"/sbin/sysctl -w net.ipv4.ip_forward=1\" \n' \
% hostObject.containerName)
## Now set up routing tables. Create routing table entries so we
# can get to any reachable link (subnet) from this host.
# We first set up routes to links that are directly connected to
# to this host. Then we set up routes to links that can be
# reached through a gateway.
# Make two lists: One of directly connected links and the other
# that are not directly connected
#
directlyConnectedLinks = []
notDirectlyConnectedLinks = []
for j in range(len(experimentLinks)) :
directlyConnected = False
linkObject = experimentLinks[j]
for k in range(len(linkObject.endPoints)) :
if linkObject.endPoints[k].myHost == hostObject :
# This host is directly connected to the link (subnet)
# because one of the endPoints (NICs) on this link
# belong to this host.
directlyConnected = True
if directlyConnected :
directlyConnectedLinks.append(linkObject)
else :
notDirectlyConnectedLinks.append(linkObject)
# Now set up routing table entries for directly connected links.
# For these links we simply route packets to the NIC on this
# host that is conneted to this link (subnet)
for j in range(len(directlyConnectedLinks)) :
linkObject = directlyConnectedLinks[j]
# Find the endpoint on this link that is connected to this host
for k in range(len(linkObject.endPoints)) :
if linkObject.endPoints[k].myHost == hostObject :
# Found the endPoint (NIC) connects this host to the link
endPointToLink = linkObject.endPoints[k]
scriptFile.write('vzctl exec %d \"/sbin/ip route add 10.0.%d.0/24 dev eth%d\" \n' \
% (hostObject.containerName, \
linkObject.subnetNumber, \
endPointToLink.deviceNumber))
break # Break out of the 'for k in...' loop
# Now set up routing table entries for links that are not directly
# connected. For these links find the shortes path to the link
# (subnet) and route packes in that direction (first host in that
# direction acts as a gateway)
for j in range(len(notDirectlyConnectedLinks)) :
linkObject = notDirectlyConnectedLinks[j]
# Find the shortest path from this host to this subnet (linkObject)
path = graphUtils.findShortestPath(hostObject, linkObject)
if path != None :
# We found a path from this host to the subnet (link)
# Path is a NIC -> Link -> NIC -> Host (gateway) -> ...
# We care about the NIC on the gateway i.e. the 3rd
# item on this path
scriptFile.write('vzctl exec %d \"/sbin/ip route add 10.0.%d.0/24 via %s\" \n' \
% (hostObject.containerName, \
linkObject.subnetNumber, \
path[3].ipAddress))
scriptFile.write('\n')
# Turn on forwarding and arp proxing on the virtual eth devices created
# in the host OS (container 0)
scriptFile.write('\n# Turn on forwarding and arp proxing on the virtual eth devices created on the host OS \n')
nicNames = experimentNICs.keys()
for i in range(len(nicNames)) :
nicObject = experimentNICs[nicNames[i]]
scriptFile.write('ifconfig %s 0 \n' % nicObject.virtualEthName)
scriptFile.write('echo 1 > /proc/sys/net/ipv4/conf/%s/forwarding \n' \
% nicObject.virtualEthName)
scriptFile.write('echo 1 > /proc/sys/net/ipv4/conf/%s/proxy_arp \n' \
% nicObject.virtualEthName)
scriptFile.write('\n')
# Set up DNS entries on the containers so they can reference one another
# by name and can also reference hosts on the external network by name
scriptFile.write('\n# Set up DNS on the hosts. Use Google DNS.\n')
scriptFile.write('PRIMARYDNS=\"nameserver 8.8.8.8\" \n')
scriptFile.write('SECONDARYDNS=\"nameserver 8.8.4.4\" \n')
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
scriptFile.write('vzctl exec %s \"echo order host,bind >> /etc/host.conf\" \n' % hostObject.containerName)
scriptFile.write('vzctl exec %s \"echo $PRIMARYDNS >> /etc/resolv.conf\" \n' % hostObject.containerName)
scriptFile.write('vzctl exec %s \"echo $SECONDARYDNS >> /etc/resolv.conf\" \n' % hostObject.containerName)
scriptFile.write('\n')
# Add hostname and IP addresses to /etc/hosts. For each host we pick
# IP address to add to this file. We arbitrarily pick the IP address
# associated with the first eth device in the list of NICs associated
# with the host. Examples of how hosts can be addressed: client_id,
# pc101, client_id.sliceName.geni-in-a-box.net or
# pc101.geni-in-a-box.net.
scriptFile.write('# Add host names and IP addresses to /etc/hosts \n')
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
# In the /etc/hosts for this host add an entry for every host
for j in range(len(hostNames)) :
hostObject2 = experimentHosts[hostNames[j]]
if len(hostObject2.NICs) != 0 :
scriptFile.write('vzctl exec %s \"echo %s %s pc%s %s.%s.geni-in-a-box.net pc%s.geni-in-a-box.net >> /etc/hosts\" \n' \
% (hostObject.containerName,
hostObject2.NICs[0].ipAddress,
hostObject2.nodeName,
hostObject2.containerName,
hostObject2.nodeName,
sliceName,
hostObject2.containerName))
# /etc/hosts has an entry for this host that is automatically
# put in there by OpenVZ. The entry looks like:
# 10.0.1.101 hostName
# We don't want this entry because is uses the control network. To
# delete this entry we copy /etc/hosts to /tmp/etc.hosts, delete
# the offending line, and write to /etc/hosts. The offending
# line will always start with 10.0.1. (control network)
scriptFile.write('# Deleting entry for %s that was inserted by OpenVz\n' % hostObject.nodeName)
scriptFile.write('vzctl exec %s \"cp /etc/hosts /tmp/etc.hosts\"\n'
% hostObject.containerName)
scriptFile.write('vzctl exec %s \"cat /tmp/etc.hosts | sed \'/^10.0.1./d\' > /etc/hosts\" \n' % hostObject.containerName)
scriptFile.write('vzctl exec %s \"rm /tmp/etc.hosts\" \n' %
hostObject.containerName)
scriptFile.write('\n')
# Download and install experimenter specified files into the VMs
# Go through each host and find out what needs to be installed
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
installList = hostObject.installList
if len(installList) != 0 :
scriptFile.write('# Install experimenter specified software on host %s \n' % hostObject.nodeName)
for item in installList :
# Download the file from the specified URL
scriptFile.write('# Download file %s\n' % item.sourceURL)
scriptFile.write('wget -P /tmp %s \n' % item.sourceURL)
scriptFile.write('if [ $? -eq 0 ] \n')
scriptFile.write('then \n')
scriptFile.write(' # Download successful \n')
downloadedFile = '/tmp/%s' % os.path.basename(item.sourceURL)
# Now generate commands to move the file to its proper location.
# If the file is of type .tgz or .tar.gz, we untar it to
# the location specified by the experimenter.
# If the file is of type .gz, we copy the file to the
# location specified by the experimenter and gunzip it there
# If the file is of some other type, we simply copy it to the
# location specified by the experimenter
# First figure out file type
if item.fileType == "" :
# File type not specified; guess based on file extension
if item.sourceURL.endswith("tgz") or \
item.sourceURL.endswith("tar.gz") :
# This is a tarball
item.fileType = "tar.gz"
elif item.sourceURL.endswith("gz") :
# This is a gzip compressed file
item.fileType = "gz"
else :
# Unknown or unsupported file type. We simply copy
# such a file to its install location
item.fileType = "unknown"
# Now make sure destination path does not end with a / (unless it
# is the directory /)
dest = item.destination
if dest.endswith("/") and len(dest) > 1 :
dest = dest[:-1]
# The downloaded file is to be installed in the file system of
# the appropriate container. For e.g. file system for
# container 101 is at /vz/root/101/...
dest = ("/vz/root/%s" % hostObject.containerName) + dest
# Create destination directory (and any necessary parent/ancestor
# directories in path) if it does not exist
if not os.path.isdir(dest) :
scriptFile.write(' mkdir -p %s \n' % dest)
if item.fileType == 'tar.gz':
# File to be installed is of type tar.gz: Uncompress and
# untar to destination
scriptFile.write(' tar -C %s -zxvf %s \n' %
(dest, downloadedFile))
elif item.fileType == 'gz' :
# File to be installed is of type gz: Copy to destination
# and then gunzip in place
scriptFile.write(' cp %s %s \n' % (downloadedFile, dest))
# Get the name of the zipped file
zipFile = dest + '/' + os.path.basename(downloadedFile)
scriptFile.write(' gunzip %s \n' % zipFile)
else :
# Some other file type. Simply copy file to destination
scriptFile.write(' cp %s %s \n' % (downloadedFile, dest))
# Make file accessible to experimenter
scriptFile.write(' chmod -R 777 %s \n' % dest)
# Delete the downloaded file
scriptFile.write(' rm %s \n' % downloadedFile)
scriptFile.write('fi \n')
# Now handle scripts to be executed on host (container) startup
execList = hostObject.executeList
if len(execList) != 0 :
scriptFile.write('\n# Set up experimenter specified startup scripts on host %s \n' % hostObject.nodeName)
for item in execList :
if item.shell == 'sh' or 'bash' :
pathToScript = '/vz/root/%s/%s' % (hostObject.containerName,
item.command)
scriptFile.write('vzctl runscript %s %s \n' % \
(hostObject.containerName, pathToScript))
else :
# Not a script type we recognize. Log error
config.logger.error("Execute script %s is of unsuported type" \
% item.command)
scriptFile.write('\n')
# set up an account for root
scriptFile.write('# Create root account in container %i \n' %
hostObject.containerName)
scriptFile.write('vzctl set %i --userpasswd root:%s \n' %
(hostObject.containerName, config.rootPwd))
# set up the user accounts and ssh public keys
for user in users :
userName = "" # the current user the public key is installed for
publicKeys = [] # the public keys for the current user, these are not files
# go through every user and get the user's name and ssh public key
for key in user.keys() :
# found a user, there should only be one of these per key in 'user'
if key == "urn" :
userName = user[key]
userName = userName.split("+")[-1]
# found a new public key list, store all the public keys
elif key == "keys" :
for value in user[key] :
publicKeys.append(value)
# only install the user account if there is a user to install
if userName != "":
scriptFile.write("# Create user %s for container %i and install public keys\n" % (userName, hostObject.containerName))
scriptFile.write("echo \"Creating user %s for container %s and installing public keys...\"\n" % (userName, hostObject.nodeName))
scriptFile.write("vzctl set %i --userpasswd %s:%s \n" %
(hostObject.containerName,
userName, config.rootPwd))
# install all of the public keys for this user
for publicKey in publicKeys :
scriptFile.write("mkdir -p /vz/root/%i/home/%s/.ssh\n" % (hostObject.containerName, userName))
scriptFile.write("chmod 755 /vz/root/%i/home/%s/.ssh\n" % (hostObject.containerName, userName))
scriptFile.write("touch /vz/root/%i/home/%s/.ssh/authorized_keys\n" % (hostObject.containerName, userName))
scriptFile.write("chmod 744 /vz/root/%i/home/%s/.ssh/authorized_keys\n" % (hostObject.containerName, userName))
scriptFile.write("echo \"%s\">>/vz/root/%i/home/%s/.ssh/authorized_keys\n" % (publicKey[:-1], hostObject.containerName, userName))
# add this user to group wheel or root depending on OS
groupName = ""
if config.distro == 'UBUNTU10-STD' :
groupName = "root"
else :
groupName = "wheel"
scriptFile.write('vzctl exec %s \"usermod -a -G %s %s" \n' %
(hostObject.containerName, groupName, userName))
scriptFile.write('\n')
scriptFile.write('\n')
scriptFile.close()
def specialFiles() :
# Re-open the file containing the bash script in append mode
pathToFile = config.sliceSpecificScriptsDir + '/' + config.shellScriptFile
try:
scriptFile = open(pathToFile, 'a')
except IOError:
config.logger.error("Failed to re-open file that creates sliver: %s" %
pathToFile)
return None
scriptFile.write('\n# Set up special files that contain slice info. \n')
hostNames = experimentHosts.keys()
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
# Put the slice manifest in the VMs
# Figure out name of destination directory for manifest. Create that
# directory (and any necessary parent/ancestor directories in path)
# if it does not exist
scriptFile.write('# Put slice manifest in /proj/<siteName>/exp/<sliceName>/tbdata/geni_manifest \n')
dest = '/vz/root/%s/proj/geni-in-a-box.net/exp/%s/tbdata/geni_manifest' % (hostObject.containerName, sliceName)
scriptFile.write('mkdir -p %s \n' % dest)
# Copy the manifest to this directory
src = config.sliceSpecificScriptsDir + '/' + config.manifestFile
scriptFile.write('cp %s %s \n' % (src, dest))
# Put slice information in /var/emulab/boot/nickname
# This file has the fully qualified name of the host in the form
# <experimenterSpecifiedHostName>.<sliceName>.geni-in-a-box.net
scriptFile.write('# Create nickname file \n')
dest = '/vz/root/%s/var/emulab/boot' % \
hostObject.containerName
scriptFile.write('mkdir -p %s \n' % dest)
fileContents = '%s.%s.geni-in-a-box.net' % (hostObject.nodeName,
sliceName)
scriptFile.write('echo \"%s\" > %s/nickname \n' % (fileContents, dest))
# Set status of the node to ready
scriptFile.write('# Set node status to ready \n')
statusFileName = '%s/pc%s.status' % (config.sliceSpecificScriptsDir,
hostObject.containerName)
scriptFile.write('echo \"ready\" > %s \n' % statusFileName)
scriptFile.write('\n')
scriptFile.close()
def freeResources() :
"""
Free up resources.
"""
experimentHosts.clear()
del experimentLinks[:]
experimentNICs.clear()
def getResourceStatus() :
"""
Return a list with the status of all VM resources. Each item in the
list is a dictionary with resource URN, resource status and error code.
This is what the list looks like:
[ { geni_urn: <resource URN>
geni_status: <status: configuring, ready, failed or unknown>
geni_error: <error code> }
{ geni_urn: <resource URN>
geni_status: <status: configuring, ready, failed or unknown>
geni_error: <error code> }
]
"""
resStatus = list()
hostNames = experimentHosts.keys()
for i in range(len(hostNames)) :
hostObject = experimentHosts[hostNames[i]]
resStatusFile = '%s/pc%s.status' % (config.sliceSpecificScriptsDir,
hostObject.containerName)
try :
f = open(resStatusFile, 'r')
resStatus.append(dict(geni_urn = hostObject.sliverURN,
geni_status = f.readline().strip('\n'),
geni_error = ''))
f.close()
except :
resStatus.append(dict(geni_urn = hostObject.sliverURN,
geni_status = "unknown",
geni_error = ''))
return resStatus
def provisionSliver(users) :
"""
Provision the sliver. First fill in missing information in the
VMNode, NIC and Link objects created when parsing the request rspec.
Then generate the bash script that, when run, will create and configure
the OpenVZ containers.
"""
# Fill in missing information in VMNode, NIC and Link objects
_annotateGraph()
# Generate the bash script
_generateBashScript(users)
| UTF-8 | Python | true | false | 37,606 | py | 92 | resources.py | 67 | 0.581822 | 0.57177 | 0 | 787 | 46.777637 | 225 |
cctbx/cctbx_project | 2,370,821,973,906 | 74c2012b1e535ca89460a022218d3f4785d5af90 | 5b6ba0f288b1e2ac236af846a9bf546a63228476 | /xfel/ui/db/job.py | dcc374f8f4944c687ba68b7d0291ad879cd7eec1 | [
"BSD-3-Clause-LBNL"
] | permissive | https://github.com/cctbx/cctbx_project | 5b547b416cadbdf95cca21dace9f54272a08d98a | 7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa | refs/heads/master | "2023-08-17T17:44:05.077010" | "2023-08-16T22:40:22" | "2023-08-16T22:40:22" | 39,508,026 | 206 | 131 | NOASSERTION | false | "2023-09-14T17:12:55" | "2015-07-22T13:36:27" | "2023-08-22T19:40:38" | "2023-09-14T17:12:55" | 122,392 | 190 | 113 | 119 | Python | false | false | from __future__ import absolute_import, division, print_function
from xfel.ui import settings_dir
from xfel.ui.db import db_proxy, get_run_path, write_xtc_locator, get_image_mode
import os, shutil, copy
known_job_statuses = ["DONE", "ERR", "PEND", "RUN", "SUSP", "PSUSP", "SSUSP", "UNKWN", "EXIT", "DONE", "ZOMBI", "DELETED", "SUBMIT_FAIL", "SUBMITTED", "HOLD", "TIMEOUT"]
finished_job_statuses = ["DONE", "EXIT", "DELETED", "UNKWN", "ERR", "SUBMIT_FAIL", "TIMEOUT"]
class JobFactory(object):
@staticmethod
def from_job(job, task_type = None):
if job.task_id is None:
return IndexingJob(job.app, job.id, **job._db_dict)
if task_type is None:
task_type = job.app.get_task(job.task_id).type
if task_type == "indexing":
return IndexingJob(job.app, job.id, **job._db_dict)
if task_type == "ensemble_refinement":
return EnsembleRefinementJob(job.app, job.id, **job._db_dict)
if task_type == "scaling":
return ScalingJob(job.app, job.id, **job._db_dict)
if task_type == "merging":
return MergingJob(job.app, job.id, **job._db_dict)
if task_type == "phenix":
return PhenixJob(job.app, job.id, **job._db_dict)
@staticmethod
def from_args(app, job_id = None, **kwargs):
if 'task_type' in kwargs:
task_type = kwargs.pop('task_type')
else:
task_type = None
return JobFactory.from_job(Job(app, job_id, **kwargs), task_type=task_type)
class Job(db_proxy):
def __init__(self, app, job_id = None, **kwargs):
db_proxy.__init__(self, app, "%s_job" % app.params.experiment_tag, id = job_id, **kwargs)
self.job_id = self.id
self._run = None
self._rungroup = None
self._trial = None
self._task = None
self._dataset = None
self._dataset_version = None
def __getattr__(self, name):
# Called only if the property cannot be found
if name in ["run", "rungroup", "trial", "task", "dataset", "dataset_version"]:
_name = "_" + name
name_id = name + "_id"
if getattr(self, _name) is None:
if name == "dataset_version":
if self.dataset_id is not None:
self._dataset_version = self.app.get_job_dataset_version(self.id)
elif getattr(self, name_id) is not None:
setattr(self, _name, getattr(self.app, "get_" + name)(**{name_id:self.trial_id}))
return getattr(self, _name)
elif name == "scope":
return task_scope[task_types.index(self.type)]
else:
return super(Job, self).__getattr__(name)
def __setattr__(self, name, value):
if name in ["run", "rungroup", "trial", "task", "dataset", "dataset_version"]:
setattr(self, "_"+name, value)
else:
super(Job, self).__setattr__(name, value)
def get_log_path(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run)
return os.path.join(run_path, "stdout", "log.out")
def submit(self, previous_job = None):
raise NotImplementedError("Override me!")
def delete(self, output_only=False):
raise NotImplementedError("Override me!")
def get_output_files(self):
# Retrun folder and experiment and reflection table suffixes
raise NotImplementedError("Override me!")
def remove_from_db(self):
assert self.status == "DELETED"
print("Removing job %d from the db"%self.id, end=' ')
tag = self.app.params.experiment_tag
query = """DELETE job FROM `%s_job` job
WHERE job.id = %d""" % (
tag, self.id)
cursor = self.app.execute_query(query, commit=True)
print("(%d)"%cursor.rowcount)
def get_identifier_string(self):
if self.app.params.facility.name == 'lcls':
s = "%s_%s_r%04d_t%03d_rg%03d"% \
(self.app.params.facility.lcls.experiment, self.app.params.experiment_tag, int(self.run.run), self.trial.trial, self.rungroup.id)
else:
s = "%s_%s_t%03d_rg%03d"% \
(self.app.params.experiment_tag, self.run.run, self.trial.trial, self.rungroup.id)
if self.task is not None:
s += "_task%03d"%self.task.id
return s
class AveragingJob(Job):
def get_identifier_string(self):
# Override this function because rungroups are not used for averaging
if self.app.params.facility.name == 'lcls':
s = "%s_%s_r%04d"% \
(self.app.params.facility.lcls.experiment, self.app.params.experiment_tag, int(self.run.run))
else:
s = "%s_%s"% \
(self.app.params.experiment_tag, self.run.run)
return s
def submit(self, previous_job=None):
from xfel.command_line.cxi_mpi_submit import Script as submit_script
params = copy.deepcopy(self.app.params)
params.dispatcher = 'dxtbx.image_average'
configs_dir = os.path.join(settings_dir, "cfgs")
if not os.path.exists(configs_dir):
os.makedirs(configs_dir)
identifier_string = self.get_identifier_string()
# Make an argument list that can be submitted to cxi_mpi_submit.
# dxtbx.image_average does not use phil files.
extra_args = "-a <output_dir>/avg.cbf -m <output_dir>/max.cbf -s <output_dir>/std.cbf"
if self.skip_images > 0:
extra_args += f' --skip-images={self.skip_images}'
if self.num_images > 0:
extra_args += f' --num-images={self.num_images}'
self.args = [
f'input.run_num = {self.run.run}',
'input.dispatcher = dxtbx.image_average',
'output.output_dir = {0}'.format(os.path.join(params.output_folder, 'averages')),
'output.split_logs = False',
'output.add_output_dir_option = False',
f'mp.extra_args = {extra_args}',
f'mp.method = {params.mp.method}',
]
if params.mp.method != 'local' or (params.mp.method == 'local' and params.facility.name == 'lcls'):
mp_args = [
f'mp.use_mpi = {params.mp.use_mpi}',
f'mp.mpi_command = {params.mp.mpi_command}',
f'mp.mpi_option = "--mpi=True"',
f'mp.nnodes = {params.mp.nnodes}',
f'mp.nproc = {params.mp.nproc}',
f'mp.nproc_per_node = {params.mp.nproc_per_node}',
f'mp.queue = {params.mp.queue}',
f'mp.env_script = {params.mp.env_script[0]}',
f'mp.wall_time = {params.mp.wall_time}',
f'mp.htcondor.executable_path = {params.mp.htcondor.executable_path}',
]
for arg in mp_args:
self.args.append(arg)
if params.mp.shifter.shifter_image is not None:
shifter_args = [
f'mp.shifter.nersc_shifter_image = {params.mp.shifter.shifter_image}',
f'mp.shifter.sbatch_script_template = {params.mp.shifter.sbatch_script_template}',
f'mp.shifter.srun_script_template = {params.mp.shifter.srun_script_template}',
f'mp.shifter.nersc_partition = {params.mp.shifter.partition}',
f'mp.shifter.nersc_jobname = {params.mp.shifter.jobname}',
f'mp.shifter.nersc_project = {params.mp.shifter.project}',
f'mp.shifter.nersc_constraint = {params.mp.shifter.constraint}',
f'mp.shifter.nersc_reservation = {params.mp.shifter.reservation}',
f'mp.shifter.staging = {params.mp.shifter.staging}',
]
for arg in shifter_args:
self.args.append(arg)
if params.facility.name == 'lcls':
locator_path = os.path.join(configs_dir, identifier_string + ".loc")
self.args.append(f'input.locator = {locator_path}')
write_xtc_locator(locator_path, params, self.run, self.rungroup)
result = submit_script().run(self.args)
return result
class IndexingJob(Job):
def get_output_files(self):
run_path = str(get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run))
return os.path.join(run_path, 'out'), '_integrated.expt', '_integrated.refl', None, None
def submit(self, previous_job = None):
import libtbx.load_env
configs_dir = os.path.join(settings_dir, "cfgs")
if not os.path.exists(configs_dir):
os.makedirs(configs_dir)
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(configs_dir, identifier_string + "_params.phil")
dispatcher = self.app.params.dispatcher
phil_str = self.trial.target_phil_str
if phil_str is None: phil_str = ""
if self.rungroup.extra_phil_str is not None:
phil_str += "\n" + self.rungroup.extra_phil_str
from xfel.ui import load_phil_scope_from_dispatcher
if dispatcher == "cxi.xtc_process":
image_format = 'pickle'
else:
orig_phil_scope = load_phil_scope_from_dispatcher(dispatcher)
if os.path.isfile(dispatcher):
dispatcher = 'libtbx.python ' + dispatcher
from iotbx.phil import parse
if self.rungroup.two_theta_low is not None or self.rungroup.two_theta_high is not None:
override_str = """
radial_average {
enable = True
show_plots = False
verbose = False
output_bins = False
mask = %s
}
"""%(self.rungroup.untrusted_pixel_mask_path)
phil_scope = orig_phil_scope.fetch(parse(override_str))
else:
phil_scope = orig_phil_scope
trial_params = phil_scope.fetch(parse(phil_str)).extract()
image_format = self.rungroup.format
mode = get_image_mode(self.rungroup)
if hasattr(trial_params, 'format'):
trial_params.format.file_format = image_format
trial_params.format.cbf.mode = mode
if hasattr(trial_params.indexing.stills, 'known_orientations') and \
len(trial_params.indexing.stills.known_orientations) == 1:
try:
ko_trial, ko_rungroup = trial_params.indexing.stills.known_orientations[0].split('_')
ko_trial = self.app.get_trial(trial_number=int(ko_trial))
ko_rungroup = self.app.get_rungroup(int(ko_rungroup.lstrip('rg')))
except (IndexError, ValueError):
pass
else:
ko_run_path = get_run_path(self.app.params.output_folder, ko_trial, ko_rungroup, self.run)
ko_wildcard = trial_params.output.refined_experiments_filename.replace('%s', '*')
trial_params.indexing.stills.known_orientations[0] = os.path.join(ko_run_path, 'out', ko_wildcard)
if self.rungroup.calib_dir is not None or self.rungroup.config_str is not None or dispatcher == 'cxi.xtc_process' or image_format == 'pickle':
config_path = os.path.join(configs_dir, identifier_string + ".cfg")
else:
config_path = None
if hasattr(trial_params.dispatch, 'process_percent'):
trial_params.dispatch.process_percent = self.trial.process_percent
# Dictionary for formating the submit phil and, if used, the labelit cfg file
d = dict(
# Generally for the LABELIT backend or image pickles
address = self.rungroup.detector_address,
default_calib_dir = libtbx.env.find_in_repositories("xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0"),
dark_avg_path = self.rungroup.dark_avg_path,
dark_stddev_path = self.rungroup.dark_stddev_path,
untrusted_pixel_mask_path = self.rungroup.untrusted_pixel_mask_path,
detz_parameter = self.rungroup.detz_parameter,
gain_map_path = self.rungroup.gain_map_path,
gain_mask_level = self.rungroup.gain_mask_level,
beamx = self.rungroup.beamx,
beamy = self.rungroup.beamy,
energy = self.rungroup.energy,
binning = self.rungroup.binning,
two_theta_low = self.rungroup.two_theta_low,
two_theta_high = self.rungroup.two_theta_high,
# Generally for job submission
dry_run = self.app.params.dry_run,
dispatcher = dispatcher,
cfg = config_path,
experiment = self.app.params.facility.lcls.experiment, # LCLS specific parameter
run_num = self.run.run,
output_dir = self.app.params.output_folder,
use_ffb = self.app.params.facility.lcls.use_ffb, # LCLS specific parameter
# Generally for both
trial = self.trial.trial,
rungroup = self.rungroup.rungroup_id,
experiment_tag = self.app.params.experiment_tag,
calib_dir = self.rungroup.calib_dir,
nproc = self.app.params.mp.nproc,
nnodes = self.app.params.mp.nnodes_index or self.app.params.mp.nnodes,
nproc_per_node = self.app.params.mp.nproc_per_node,
queue = self.app.params.mp.queue or None,
env_script = self.app.params.mp.env_script[0] if self.app.params.mp.env_script is not None and len(self.app.params.mp.env_script) > 0 and len(self.app.params.mp.env_script[0]) > 0 else None,
phenix_script = self.app.params.mp.phenix_script[0] if self.app.params.mp.phenix_script is not None and len(self.app.params.mp.phenix_script) > 0 and len(self.app.params.mp.phenix_script[0]) > 0 else None,
method = self.app.params.mp.method,
wall_time = self.app.params.mp.wall_time,
htcondor_executable_path = self.app.params.mp.htcondor.executable_path,
nersc_shifter_image = self.app.params.mp.shifter.shifter_image,
sbatch_script_template = self.app.params.mp.shifter.sbatch_script_template,
srun_script_template = self.app.params.mp.shifter.srun_script_template,
nersc_partition = self.app.params.mp.shifter.partition,
nersc_jobname = self.app.params.mp.shifter.jobname,
nersc_project = self.app.params.mp.shifter.project,
nersc_constraint = self.app.params.mp.shifter.constraint,
nersc_reservation = self.app.params.mp.shifter.reservation,
nersc_staging = self.app.params.mp.shifter.staging,
target = target_phil_path,
host = self.app.params.db.host,
dbname = self.app.params.db.name,
user = self.app.params.db.user,
port = self.app.params.db.port,
# always use mpi for 'lcls'
use_mpi = self.app.params.mp.method != 'local' or (self.app.params.mp.method == 'local' and self.app.params.facility.name == 'lcls'),
mpi_command = self.app.params.mp.mpi_command,
extra_options = "\n".join(["extra_options = %s"%opt for opt in self.app.params.mp.extra_options]),
)
if self.app.params.mp.method == 'sge':
d['use_mpi'] = False
if self.app.params.db.password is not None and len(self.app.params.db.password) == 0:
d['password'] = None
else:
d['password'] = self.app.params.db.password
phil = open(target_phil_path, "w")
if dispatcher == 'cxi.xtc_process':
phil.write(phil_str)
else:
extra_scope = None
if hasattr(trial_params, 'format'):
if image_format == "cbf":
trial_params.input.address = self.rungroup.detector_address
trial_params.format.cbf.detz_offset = self.rungroup.detz_parameter
trial_params.format.cbf.override_energy = self.rungroup.energy
trial_params.format.cbf.invalid_pixel_mask = self.rungroup.untrusted_pixel_mask_path
if mode == 'cspad':
trial_params.format.cbf.cspad.gain_mask_value = self.rungroup.gain_mask_level
elif mode == 'rayonix':
trial_params.format.cbf.rayonix.bin_size = self.rungroup.binning
trial_params.format.cbf.rayonix.override_beam_x = self.rungroup.beamx
trial_params.format.cbf.rayonix.override_beam_y = self.rungroup.beamy
if trial_params.input.known_orientations_folder is not None:
trial_params.input.known_orientations_folder = trial_params.input.known_orientations_folder.format(run=self.run.run)
else:
if trial_params.spotfinder.lookup.mask is None:
trial_params.spotfinder.lookup.mask = self.rungroup.untrusted_pixel_mask_path
if trial_params.integration.lookup.mask is None:
trial_params.integration.lookup.mask = self.rungroup.untrusted_pixel_mask_path
if self.app.params.facility.name == 'lcls':
locator_path = os.path.join(configs_dir, identifier_string + ".loc")
write_xtc_locator(locator_path, self.app.params, self.run, self.rungroup)
if mode == 'rayonix':
from xfel.cxi.cspad_ana import rayonix_tbx
pixel_size = rayonix_tbx.get_rayonix_pixel_size(self.rungroup.binning)
extra_scope = parse("geometry { detector { panel { origin = (%f, %f, %f) } } }"%(-self.rungroup.beamx * pixel_size,
self.rungroup.beamy * pixel_size,
-self.rungroup.detz_parameter))
d['locator'] = locator_path
else:
d['locator'] = None
if self.rungroup.two_theta_low is not None or self.rungroup.two_theta_high is not None:
try:
trial_params.radial_average.two_theta_low = self.rungroup.two_theta_low
trial_params.radial_average.two_theta_high = self.rungroup.two_theta_high
except AttributeError:
pass # not all dispatchers support radial averaging
working_phil = phil_scope.format(python_object=trial_params)
if extra_scope:
working_phil = working_phil.fetch(extra_scope)
diff_phil = orig_phil_scope.fetch_diff(source=working_phil)
phil.write(diff_phil.as_str())
phil.close()
if config_path is not None:
if dispatcher != 'cxi.xtc_process':
d['untrusted_pixel_mask_path'] = None # Don't pass a pixel mask to mod_image_dict as it will
# will be used during dials processing directly
config_str = "[psana]\n"
if self.rungroup.calib_dir is not None:
config_str += "calib-dir=%s\n"%self.rungroup.calib_dir
modules = []
if self.rungroup.config_str is not None:
for line in self.rungroup.config_str.split("\n"):
if line.startswith('['):
modules.append(line.lstrip('[').rstrip(']'))
if dispatcher == 'cxi.xtc_process':
modules.insert(0, 'my_ana_pkg.mod_radial_average')
modules.extend(['my_ana_pkg.mod_hitfind:index','my_ana_pkg.mod_dump:index'])
elif image_format == 'pickle':
modules.insert(0, 'my_ana_pkg.mod_radial_average')
modules.extend(['my_ana_pkg.mod_image_dict'])
if self.app.params.facility.lcls.dump_shots:
modules.insert(0, 'my_ana_pkg.mod_dump:shot')
if len(modules) > 0:
config_str += "modules = %s\n"%(" ".join(modules))
if self.rungroup.config_str is not None:
config_str += self.rungroup.config_str + "\n"
if dispatcher == 'cxi.xtc_process' or image_format == 'pickle':
d['address'] = d['address'].replace('.','-').replace(':','|') # old style address
if dispatcher == 'cxi.xtc_process':
template = open(os.path.join(libtbx.env.find_in_repositories("xfel/ui/db/cfgs"), "index_all.cfg"))
elif image_format == 'pickle':
template = open(os.path.join(libtbx.env.find_in_repositories("xfel/ui/db/cfgs"), "image_dict.cfg"))
for line in template.readlines():
config_str += line.format(**d)
template.close()
d['address'] = self.rungroup.detector_address
cfg = open(config_path, 'w')
cfg.write(config_str)
cfg.close()
if dispatcher != 'cxi.xtc_process':
d['untrusted_pixel_mask_path'] = self.rungroup.untrusted_pixel_mask_path
submit_phil_path = os.path.join(configs_dir, identifier_string + "_submit.phil")
submit_root = libtbx.env.find_in_repositories("xfel/ui/db/cfgs")
if dispatcher in ['cxi.xtc_process', 'cctbx.xfel.xtc_process']:
template = open(os.path.join(submit_root, "submit_xtc_process.phil"))
else:
test_root = os.path.join(submit_root, "submit_" + dispatcher + ".phil")
if os.path.exists(test_root):
template = open(test_root)
else:
if hasattr(trial_params, 'format'):
template = open(os.path.join(submit_root, "submit_xtc_process.phil"))
else:
template = open(os.path.join(submit_root, "submit_xfel_process.phil"))
phil = open(submit_phil_path, "w")
if dispatcher == 'cxi.xtc_process':
d['target'] = None # any target phil will be in mod_hitfind
for line in template.readlines():
phil.write(line.format(**d))
d['target'] = target_phil_path
template.close()
phil.close()
from xfel.command_line.cxi_mpi_submit import Script as submit_script
args = [submit_phil_path]
if self.app.params.facility.name not in ['lcls']:
args.append(self.run.path)
return submit_script().run(args)
def delete(self, output_only=False):
if self.status not in finished_job_statuses:
print("Job is not finished (status = %s)"%self.status)
return
if self.status == "DELETED":
return
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run)
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
# Have to be careful to delete from the tables in the right order
tag = self.app.params.experiment_tag
def delete_and_commit(query):
cursor = self.app.execute_query(query, commit=True)
print("(%d)"%cursor.rowcount)
print("Deleting cell_bin entries", end=' ')
query = """DELETE cell_bin FROM `%s_cell_bin` cell_bin
JOIN `%s_crystal` crystal ON crystal.id = cell_bin.crystal_id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
ids = {}
for item in "crystal", "beam", "detector":
print("Listing %s ids"%item, end=' ')
query = """SELECT %s.id FROM `%s_%s` %s
JOIN `%s_experiment` expr ON expr.%s_id = %s.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
item, tag, item, item, tag, item, item, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
ids[item] = ",".join(item_ids)
if len(self.trial.isoforms) == 0:
print("Listing bin entries", end=' ')
query = """SELECT bin.id FROM `%s_bin` bin
JOIN `%s_cell` cell ON bin.cell_id = cell.id
JOIN `%s_crystal` crystal ON crystal.cell_id = cell.id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d
AND cell.trial_id is NULL""" % (
tag, tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
bin_ids = ",".join(item_ids)
print("Listing cell entries", end=' ')
query = """SELECT cell.id FROM `%s_cell` cell
JOIN `%s_crystal` crystal ON crystal.cell_id = cell.id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d
AND cell.trial_id IS NULL""" % (
tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
cell_ids = ",".join(item_ids)
print("Deleting experiment entries", end=' ')
query = """DELETE expr FROM `%s_experiment` expr
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
for item in "crystal", "beam", "detector":
if len(ids[item]) > 0:
print("Deleting %s entries"%item, end=' ')
query = """DELETE %s FROM `%s_%s` %s
WHERE %s.id IN (%s)""" % (
item, tag, item, item, item, ids[item])
delete_and_commit(query)
if len(self.trial.isoforms) == 0 and len(bin_ids) > 0:
print("Deleting bin entries", end=' ')
query = """DELETE bin FROM `%s_bin` bin
WHERE bin.id IN (%s)""" % (
tag, bin_ids)
delete_and_commit(query)
if len(self.trial.isoforms) == 0 and len(cell_ids) > 0:
print("Deleting cell entries", end=' ')
query = """DELETE cell FROM `%s_cell` cell
WHERE cell.id IN (%s)""" % (
tag, cell_ids)
delete_and_commit(query)
print("Listing imageset entries", end=' ')
query = """SELECT imgset.id FROM `%s_imageset` imgset
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
imageset_ids = ",".join(item_ids)
print("Deleting imageset_event entries", end=' ')
query = """DELETE is_e FROM `%s_imageset_event` is_e
JOIN `%s_event` evt ON evt.id = is_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
if len(imageset_ids) > 0:
print("Deleting imageset entries", end=' ')
query = """DELETE imgset FROM `%s_imageset` imgset
WHERE imgset.id IN (%s)""" % (
tag, imageset_ids)
delete_and_commit(query)
print("Deleting event entries", end=' ')
query = """DELETE evt FROM `%s_event` evt
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
self.status = "DELETED"
class EnsembleRefinementJob(Job):
def delete(self, output_only=False):
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
return os.path.join(run_path, 'combine_experiments_t%03d'%self.trial.trial, 'intermediates', "*reintegrated*"), '.expt', '.refl', None, None
def get_log_path(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
return os.path.join(run_path, 'combine_experiments_t%03d'%self.trial.trial, 'intermediates',
"combine_t%03d_rg%03d_chunk000.out"%(self.trial.trial, self.rungroup.id)) # XXX there can be multiple chunks or multiple clusters
def submit(self, previous_job = None):
from xfel.command_line.striping import Script
from xfel.command_line.cxi_mpi_submit import get_submission_id
from libtbx import easy_run
configs_dir = os.path.join(settings_dir, "cfgs")
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(configs_dir, identifier_string + "_params.phil")
with open(target_phil_path, 'w') as f:
if self.task.parameters:
f.write(self.task.parameters)
path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
os.mkdir(path)
arguments = """
mp.queue={}
mp.nnodes={}
mp.nproc_per_node={}
mp.method={}
{}
{}
mp.wall_time={}
mp.use_mpi=False
mp.mpi_command={}
{}
mp.shifter.submit_command={}
mp.shifter.shifter_image={}
mp.shifter.sbatch_script_template={}
mp.shifter.srun_script_template={}
mp.shifter.partition={}
mp.shifter.jobname={}
mp.shifter.project={}
mp.shifter.reservation={}
mp.shifter.constraint={}
mp.shifter.staging={}
striping.results_dir={}
striping.trial={}
striping.rungroup={}
striping.run={}
{}
striping.chunk_size=256
striping.stripe=False
striping.dry_run=True
striping.output_folder={}
reintegration.integration.lookup.mask={}
mp.local.include_mp_in_command=False
""".format(self.app.params.mp.queue if len(self.app.params.mp.queue) > 0 else None,
1,#self.app.params.mp.nproc,
self.app.params.mp.nproc_per_node,
self.app.params.mp.method,
'\n'.join(['mp.env_script={}'.format(p) for p in self.app.params.mp.env_script if p]),
'\n'.join(['mp.phenix_script={}'.format(p) for p in self.app.params.mp.phenix_script if p]),
self.app.params.mp.wall_time,
self.app.params.mp.mpi_command,
"\n".join(["extra_options={}".format(opt) for opt in self.app.params.mp.extra_options]),
self.app.params.mp.shifter.submit_command,
self.app.params.mp.shifter.shifter_image,
self.app.params.mp.shifter.sbatch_script_template,
self.app.params.mp.shifter.srun_script_template,
self.app.params.mp.shifter.partition,
self.app.params.mp.shifter.jobname,
self.app.params.mp.shifter.project,
self.app.params.mp.shifter.reservation,
self.app.params.mp.shifter.constraint,
self.app.params.mp.shifter.staging,
self.app.params.output_folder,
self.trial.trial,
self.rungroup.id,
self.run.run,
target_phil_path,
path,
self.rungroup.untrusted_pixel_mask_path,
).split()
try:
commands = Script(arguments).run()
except Exception as e:
if 'no DIALS integration results found' in str(e):
print("No DIALS integration results found")
self.status = "EXIT"
return
else: raise
submission_ids = []
if self.app.params.mp.method == 'local':
self.status = "RUNNING"
for command in commands:
try:
result = easy_run.fully_buffered(command=command)
result.raise_if_errors()
except Exception as e:
if not "Warning: job being submitted without an AFS token." in str(e):
raise e
submission_ids.append(get_submission_id(result, self.app.params.mp.method))
if self.app.params.mp.method == 'local':
self.status = "DONE"
else:
return ",".join(submission_ids)
class ScalingJob(Job):
def delete(self, output_only=False):
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
return os.path.join(run_path, 'out'), ".expt", ".refl", None, None
def write_submit_phil(self, submit_phil_path, target_phil_path):
import libtbx.load_env
from xfel.ui.db.task import task_types, task_dispatchers
submit_root = libtbx.env.find_in_repositories("xfel/ui/db/cfgs")
d = dict(
dry_run = self.app.params.dry_run,
dispatcher = task_dispatchers[task_types.index(self.task.type)],
run_num = self.run.run,
output_dir = self.app.params.output_folder,
trial = self.trial.trial,
rungroup = self.rungroup.rungroup_id,
task = self.task.id,
nproc = self.app.params.mp.nproc,
nproc_per_node = self.app.params.mp.nproc_per_node,
queue = self.app.params.mp.queue or None,
env_script = self.app.params.mp.env_script[0] if len(self.app.params.mp.env_script) > 0 and len(self.app.params.mp.env_script[0]) > 0 else None,
phenix_script = self.app.params.mp.phenix_script[0] if len(self.app.params.mp.phenix_script) > 0 and len(self.app.params.mp.phenix_script[0]) > 0 else None,
method = self.app.params.mp.method,
htcondor_executable_path = self.app.params.mp.htcondor.executable_path,
nersc_shifter_image = self.app.params.mp.shifter.shifter_image,
sbatch_script_template = self.app.params.mp.shifter.sbatch_script_template,
srun_script_template = self.app.params.mp.shifter.srun_script_template,
nersc_partition = self.app.params.mp.shifter.partition,
nersc_jobname = self.app.params.mp.shifter.jobname,
nersc_project = self.app.params.mp.shifter.project,
nersc_constraint = self.app.params.mp.shifter.constraint,
nersc_reservation = self.app.params.mp.shifter.reservation,
nersc_staging = self.app.params.mp.shifter.staging,
target = target_phil_path,
# always use mpi for 'lcls'
use_mpi = self.app.params.mp.method != 'local' or (self.app.params.mp.method == 'local' and self.app.params.facility.name == 'lcls'),
mpi_command = self.app.params.mp.mpi_command,
nnodes = self.app.params.mp.nnodes_scale or self.app.params.mp.nnodes,
wall_time = self.app.params.mp.wall_time,
extra_options = "\n".join(["extra_options = %s"%opt for opt in self.app.params.mp.extra_options]),
)
with open(submit_phil_path, "w") as phil:
for line in open(os.path.join(submit_root, "submit_xfel_merge.phil")).readlines():
phil.write(line.format(**d))
def submit(self, previous_job = None):
from xfel.command_line.cxi_mpi_submit import Script as submit_script
output_path = os.path.join(get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task), 'out')
configs_dir = os.path.join(settings_dir, "cfgs")
if not os.path.exists(configs_dir):
os.makedirs(configs_dir)
identifier_string = self.get_identifier_string()
submit_phil_path = os.path.join(configs_dir, identifier_string + "_submit.phil")
target_phil_path = os.path.join(configs_dir, identifier_string + "_params.phil")
input_folder, expt_suffix, refl_suffix, _, _ = previous_job.get_output_files()
with open(target_phil_path, 'w') as f:
f.write("input.path=%s\n"%input_folder)
f.write("input.experiments_suffix=%s\n"%expt_suffix)
f.write("input.reflections_suffix=%s\n"%refl_suffix)
f.write("output.output_dir=%s\n"%output_path)
f.write("output.prefix=%s_%d\n"%(self.task.type, self.task.id))
f.write(self.task.parameters)
self.write_submit_phil(submit_phil_path, target_phil_path)
args = [submit_phil_path]
return submit_script().run(args)
class MergingJob(Job):
def get_global_path(self):
if self.dataset_version is None:
return None
return self.dataset_version.output_path()
def get_log_path(self):
return self.get_global_path()
def get_identifier_string(self):
return "%s_%s%03d_v%03d"%(self.dataset.name, self.task.type, self.task.id, self.dataset_version.version)
def delete(self, output_only=False):
job_folder = self.get_global_path()
if job_folder and os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
path = self.get_global_path()
return path, None, None, "%s_v%03d_all.mtz"%(self.dataset.name, self.dataset_version.version), None
def submit(self, previous_job = None):
from xfel.command_line.cxi_mpi_submit import do_submit
output_path = self.get_global_path()
if not os.path.exists(output_path):
os.makedirs(output_path)
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(output_path, identifier_string + "_params.phil")
with open(target_phil_path, 'w') as f:
expt_suffix = refl_suffix = None
for job in self.dataset_version.jobs:
input_folder, _expt_suffix, _refl_suffix, _, _ = job.get_output_files()
if expt_suffix is None: expt_suffix = _expt_suffix
else: assert expt_suffix == _expt_suffix
if refl_suffix is None: refl_suffix = _refl_suffix
else: assert refl_suffix == _refl_suffix
f.write("input.path=%s\n"%input_folder)
f.write("input.experiments_suffix=%s\n"%expt_suffix)
f.write("input.reflections_suffix=%s\n"%refl_suffix)
f.write("output.output_dir=%s\n"%output_path)
f.write("output.prefix=%s_v%03d\n"%(self.dataset.name, self.dataset_version.version))
f.write(self.task.parameters)
command = "cctbx.xfel.merge %s"%target_phil_path
submit_path = os.path.join(output_path, identifier_string + "_submit.sh")
params = self.app.params.mp
if params.nnodes_merge:
params = copy.deepcopy(params)
params.nnodes = params.nnodes_merge
return do_submit(command, submit_path, output_path, params, log_name="log.out", err_name="err.out", job_name=identifier_string)
class PhenixJob(Job):
def get_global_path(self):
return os.path.join(self.dataset_version.output_path(), self.get_identifier_string())
def get_log_path(self):
return self.get_global_path()
def get_identifier_string(self):
return "%s_%s%03d_v%03d"%(self.dataset.name, self.task.type, self.task.id, self.dataset_version.version)
def delete(self, output_only=False):
job_folder = self.get_global_path()
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
path = self.get_global_path()
return path, None, None, ".mtz", ".pdb"
def submit(self, previous_job = None):
from xfel.command_line.cxi_mpi_submit import do_submit
output_path = self.get_global_path()
if not os.path.exists(output_path):
os.makedirs(output_path)
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(output_path, identifier_string + "_params.phil")
input_folder, _, _, input_mtz, _ = previous_job.get_output_files()
command = self.task.parameters.split('\n')[0]
phil_params = '\n'.join(self.task.parameters.split('\n')[1:])
phil_params = phil_params.replace('<PREVIOUS_TASK_MTZ>', os.path.join(input_folder, input_mtz))
phil_params = phil_params.replace('<PREVIOUS_TASK_FOLDER>', input_folder)
phil_params = phil_params.replace('<DATASET_NAME>', self.dataset.name)
phil_params = phil_params.replace('<DATASET_VERSION>', str(self.dataset_version.version))
with open(target_phil_path, 'w') as f:
f.write(phil_params)
command = "%s %s"%(command, target_phil_path)
submit_path = os.path.join(output_path, identifier_string + "_submit.sh")
params = copy.deepcopy(self.app.params.mp)
if params.nnodes_merge:
params.nnodes = params.nnodes_merge
params.use_mpi = False
params.shifter.staging = None
if 'upload' in command:
params.nnodes = 1
params.nproc_per_node = 1
#params.queue = 'shared'
else:
params.env_script = params.phenix_script
if params.method == 'shifter' and 'upload' not in command:
import libtbx.load_env
params.shifter.sbatch_script_template = os.path.join( \
libtbx.env.find_in_repositories("xfel/ui/db/cfgs"), "phenix_sbatch.sh")
params.shifter.srun_script_template = os.path.join( \
libtbx.env.find_in_repositories("xfel/ui/db/cfgs"), "phenix_srun.sh")
return do_submit(command, submit_path, output_path, params, log_name="log.out", err_name="err.out", job_name=identifier_string)
# Support classes and functions for job submission
class _job(object):
"""Used to represent a job that may not have been submitted into the cluster or database yet"""
def __init__(self, trial, rungroup, run, task=None, dataset=None):
self.trial = trial
self.rungroup = rungroup
self.run = run
self.task = task
self.dataset = dataset
def __str__(self):
s = "Job: Trial %d, rg %d, run %s"%(self.trial.trial, self.rungroup.id, self.run.run)
if self.task:
s += ", task %d %s"%(self.task.id, self.task.type)
if self.dataset:
s += ", dataset %d %s"%(self.dataset.id, self.dataset.name)
return s
@staticmethod
def job_hash(job):
ret = []
check = ['trial', 'rungroup', 'run', 'task', 'dataset']
for subitem_name in check:
subitem = getattr(job, subitem_name)
if subitem is None:
ret.append(None)
else:
ret.append(subitem.id)
return tuple(ret)
def __eq__(self, other):
return job_hash(self) == job_hash(other)
def submit_all_jobs(app):
submitted_jobs = {_job.job_hash(j):j for j in app.get_all_jobs()}
if app.params.mp.method == 'local': # only run one job at a time
for job in submitted_jobs.values():
if job.status in ['RUN', 'UNKWN', 'SUBMITTED']: return
runs = app.get_all_runs()
trials = app.get_all_trials(only_active = True)
needed_jobs = []
for trial in trials:
for rungroup in trial.rungroups:
assert rungroup.active
for run in rungroup.runs:
needed_jobs.append(_job(trial, rungroup, run))
for job in needed_jobs:
if _job.job_hash(job) in submitted_jobs:
continue
print("Submitting job: trial %d, rungroup %d, run %s"%(job.trial.trial, job.rungroup.id, job.run.run))
j = JobFactory.from_args(app,
trial_id = job.trial.id,
rungroup_id = job.rungroup.id,
run_id = job.run.id,
status = "SUBMITTED")
j.trial = job.trial; j.rungroup = job.rungroup; j.run = job.run
try:
j.submission_id = j.submit()
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
if app.params.mp.method == 'local': # only run one job at a time
return
datasets = app.get_all_datasets()
for dataset_idx, dataset in enumerate(datasets):
if not dataset.active: continue
# one of the tasks will have a trial, otherwise we don't know where to save the data
trial = None
tasks = dataset.tasks
for task in tasks:
if task.trial is not None:
if trial is None:
trial = task.trial
else:
assert trial.id == task.trial.id, "Found multiple trials, don't know where to save the results"
assert trial, "No trial found in task list, don't know where to save the results"
trial_tags_ids = [t.id for t in trial.tags]
dataset_tags = [t for t in dataset.tags if t.id in trial_tags_ids]
if not dataset_tags or len(dataset_tags) < len(dataset.tags): continue
runs_rungroups = []
for rungroup in trial.rungroups:
runs_rungroups.extend([(run, rungroup) for run in app.get_rungroup_runs_by_tags(rungroup, dataset_tags, dataset.tag_operator)])
# Datasets always start with indexing
global_tasks = {}
for run, rungroup in runs_rungroups:
submit_next_task = False
last_task_status = ""
previous_job = None
for task_idx, task in enumerate(tasks):
if task.scope == 'global':
if previous_job.status in ["DONE", "EXIT"]:
key = (dataset_idx, task_idx)
if key not in global_tasks:
global_tasks[key] = []
global_tasks[key].append(previous_job)
continue
if task.type == 'indexing':
job = _job(trial, rungroup, run)
else:
job = _job(trial, rungroup, run, task)
try:
submitted_job = submitted_jobs[_job.job_hash(job)]
except KeyError:
if not submit_next_task:
print("Warning, expected to find submitted %s job: trial %d, rungroup %d, run %s, task %d"% \
(task.type, trial.trial, rungroup.id, run.run, task.id))
break
else:
if not task_idx+1 < len(tasks): break # no more tasks to do after this one
next_task = tasks[task_idx+1]
if submitted_job.status not in finished_job_statuses or submitted_job.status == "UNKWN":
print ("Task %s waiting on job %d (%s) for trial %d, rungroup %d, run %s, task %d" % \
(next_task.type, submitted_job.id, submitted_job.status, trial.trial, rungroup.id, run.run, next_task.id))
break
if submitted_job.status not in ["DONE", "EXIT"]:
print ("Task %s cannot start due to unexpected status for job %d (%s) for trial %d, rungroup %d, run %s, task %d" % \
(next_task.type, submitted_job.id, submitted_job.status, trial.trial, rungroup.id, run.run, next_task.id))
break
submit_next_task = True
previous_job = submitted_job
continue
print("Submitting %s job: trial %d, rungroup %d, run %s, task %d"% \
(task.type, trial.trial, rungroup.id, run.run, task.id))
j = JobFactory.from_args(app,
trial_id = trial.id,
rungroup_id = rungroup.id,
run_id = run.id,
task_id = task.id,
status = "SUBMITTED")
j.trial = job.trial; j.rungroup = job.rungroup; j.run = job.run; j.task = job.task
try:
j.submission_id = j.submit(previous_job)
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
previous_job = j
if app.params.mp.method == 'local': # only run one job at a time
return
break # job submitted so don't look for more in this run for this dataset
versions = dataset.versions
for task_idx, task in enumerate(tasks):
if task.scope == 'local':
# only global tasks follow global tasks
if task_idx: assert tasks[task_idx-1].scope != 'global'
continue
assert task.scope == 'global' # only two task scopes
assert task_idx # first task cannot be global
prev_task = tasks[task_idx-1]
if prev_task.scope == 'global':
# Submit a job for this task for any versions where it has not been
prev_j = _job(None, None, None, prev_task, dataset)
test_j = _job(None, None, None, task, dataset)
for version in versions:
prev_job = this_job = None
for j in version.jobs:
if prev_j == j:
prev_job = j
continue
elif test_j == j:
this_job = j
continue
if prev_job and this_job: break
if not this_job and prev_job and prev_job.status == 'DONE':
j = JobFactory.from_args(app,
task_id = task.id,
dataset_id = dataset.id,
status = "SUBMITTED")
j.task = task; j.dataset = dataset; j.dataset_version = version
try:
j.submission_id = j.submit(prev_job)
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
version.add_job(j)
if app.params.mp.method == 'local': # only run one job at a time
return
key = dataset_idx, task_idx
if key not in global_tasks: continue # no jobs ready yet
latest_version = dataset.latest_version
next_version = None
if latest_version is None:
next_version = 0
else:
latest_version_local_jobs = [j.id for j in latest_version.jobs if j.task.scope == 'local']
new_jobs = [j for j in global_tasks[key] if j.id not in latest_version_local_jobs]
if new_jobs:
next_version = latest_version.version + 1
if next_version is not None:
latest_version = app.create_dataset_version(dataset_id = dataset.id, version=next_version)
for job in global_tasks[key]:
latest_version.add_job(job)
j = JobFactory.from_args(app,
task_id = task.id,
dataset_id = dataset.id,
status = "SUBMITTED")
j.task = task; j.dataset = dataset; j.dataset_version = latest_version
try:
j.submission_id = j.submit()
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
latest_version.add_job(j)
if app.params.mp.method == 'local': # only run one job at a time
return
| UTF-8 | Python | false | false | 51,293 | py | 1,928 | job.py | 1,786 | 0.605638 | 0.603786 | 0 | 1,150 | 43.602609 | 223 |
Orderlee/tjoeun | 9,431,748,222,007 | 62e5d91cf73e7aaab011f05f6e2c404cdc7d923d | 904f80808061d498e9c8aeaccee5de172b5e97e6 | /myweb/address/views.py | 0881662edd5d2d2403eb0db68547b9880a8d79e3 | [] | no_license | https://github.com/Orderlee/tjoeun | ad48691f439f47b5518f374ca3089c9d5f2e845e | e494b39ee4574076b52ab1cc2b93443ee0572bae | refs/heads/master | "2022-12-23T07:35:07.481383" | "2020-09-27T14:20:30" | "2020-09-27T14:20:30" | 298,336,476 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render, redirect
#from django.shortcuts import redirect
# Address ํด๋์ค import
from address.models import Address
from django.views.decorators.csrf import csrf_exempt
def home(request):
# address ํ
์ด๋ธ์ ๋ชจ๋ ๋ ์ฝ๋๋ฅผ name ์ค๋ฆ์ฐจ์์ผ๋ก ์ ์ฅ, ๋ด๋ฆผ์ฐจ์์ -name
items = Address.objects.order_by('name')
# address ํ
์ด๋ธ์ ๋ ์ฝ๋ ๊ฐฏ์๋ฅผ ์ ์ฅ
address_count = Address.objects.all().count()
# list.html๋ก ์ด๋ (๋ฐ์ดํฐ๋ ๊ฐ์ด ์ ๋ฌ๋จ)
return render(request, 'address/list.html',
{'items':items,'address_count':address_count})
# http://localhost/address/write
def write(request):
# address/templates/address/write.html ํ์ด์ง๋ก ์ด๋
return render(request, 'address/write.html')
#ํฌ๋ก์ค ์ฌ์ดํธ ์คํฌ๋ฆฝํ
๊ณต๊ฒฉ์ ๋ฐฉ์งํ๊ธฐ ์ํ ์ฝ๋
@csrf_exempt
# request : ์ฌ์ฉ์๊ฐ ์
๋ ฅํ ๋ด์ฉ๋ค์ด ์ ์ฅ๋ ๋ณ์
def insert(request):
# post ๋ฐฉ์์ผ๋ก ์ ๋ฌ๋ ๊ฐ๋ค์ Address ํด๋์ค์ ์ ์ฅ
addr = Address(name=request.POST['name'],
tel=request.POST['tel'],
email=request.POST['email'],
address=request.POST['address'])
# ๋ ์ฝ๋๊ฐ ์ถ๊ฐ๋จ
addr.save()
# ๋ฆฌ๋ค์ด๋ ํธ http://localhost/address ๋ก ์ด๋
return redirect('/address')
# http://localhost/address/detail ํ์์์ธ ํ๋ฉด
def detail(request):
#์ฟผ๋ฆฌ์คํธ๋ง์ผ๋ก ์ ๋ฌ๋ ๋ณ์ (get ๋ฐฉ์)
id=request.GET['idx']
# address ํ
์ด๋ธ์ ๋ ์ฝ๋ ์ค์์ idx ๊ฐ์ผ๋ก ์กฐํ
addr=Address.objects.get(idx=id)
# detail.html๋ก ๋์ด๊ฐ์ ์ถ๋ ฅ๋จ
return render(request, 'address/detail.html',
{'addr':addr})
#ํฌ๋ก์ค ์ฌ์ดํธ ์คํฌ๋ฆฝํ
๊ณต๊ฒฉ ๋ฐฉ์ง ์ฝ๋
@csrf_exempt
def update(request):
id=request.POST['idx'] # hidden์ผ๋ก ์ ๋ฌ๋ ๊ธ๋ฒํธ
# ์์ ํ ๋ด์ฉ์ ํธ์ง
addr = Address(idx=id, name=request.POST['name'],
tel=request.POST['tel'], email=request.POST['email'],
address=request.POST['address'])
#๋ ์ฝ๋๊ฐ ์์ ๋จ
addr.save()
# http://localhost/address๋ก ์ด๋(๋ชฉ๋ก์ด ๊ฐฑ์ ๋จ)
return redirect("/address")
#ํฌ๋ก์ค ์ฌ์ดํธ ์คํฌ๋ฆฝํ
๊ณต๊ฒฉ ๋ฐฉ์ง ์ฝ๋
@csrf_exempt
def delete(request):
#์ญ์ ํ ์ฃผ์๋ก ๋ฒํธ
id=request.POST['idx']
#๋ ์ฝ๋๊ฐ ์ญ์ ๋จ
Address.objects.get(idx=id).delete()
#๋ชฉ๋ก์ผ๋ก ์ด๋ http://localhost/address
return redirect("/address")
| UTF-8 | Python | false | false | 2,632 | py | 536 | views.py | 233 | 0.628289 | 0.628289 | 0 | 65 | 30.738462 | 72 |
lance-lh/Data-Structures-and-Algorithms | 6,262,062,328,155 | bbd00433c3ddf2bfb6f8101448ff5a3d1c539be0 | e4a2bbcae9a8ef78780b8bc2504434edb2990260 | /DSA/hashtable/findAnagrams.py | 41e7e30bf2f3d3b58cf55eb8dd31fb27cae9679a | [
"MIT"
] | permissive | https://github.com/lance-lh/Data-Structures-and-Algorithms | e21b4c36df9887ad2d638a00b65e1b7f0d9ee534 | c432654edaeb752536e826e88bcce3ed2ab000fb | refs/heads/master | "2020-04-28T03:10:20.303427" | "2019-09-13T14:16:16" | "2019-09-13T14:16:16" | 174,926,695 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def findAnagrams(self, s, p):
'''
:param s: str
:param p: str
:return: List[int]
'''
from collections import Counter
ls, lp = len(s), len(p)
cp = Counter(p)
cs = Counter()
res = []
for i in range(ls):
cs[s[i]] += 1
if i >= lp:
# when add a new element
# in cs, the leftmost key value minus 1
cs[s[i - lp]] -= 1
# delete the leftmost key
if cs[s[i - lp]] == 0:
del cs[s[i - lp]]
if cs == cp:
res.append(i - lp + 1)
return res
# test
s = "cbaebabacd"
p = "abc"
print(Solution().findAnagrams(s,p)) | UTF-8 | Python | false | false | 763 | py | 255 | findAnagrams.py | 128 | 0.414155 | 0.407602 | 0 | 29 | 25.344828 | 55 |
anrav/bcg_bigdata_case_study | 17,274,358,503,995 | e50affd41203f7be4715a08b6275c4d63909d95d | ab17680fa7bc0ddb9d1d3bf839016e33d3a663b0 | /src/analytics/analysis_top_speeding_vehicles.py | bc60cc5e979e7f4f71a55740c1a5efedfba6ec92 | [] | no_license | https://github.com/anrav/bcg_bigdata_case_study | ced275acde9d9f7826d50533d3dddba158256c9b | f555a97af6f415bc64eff7a5a68010e0e24601f2 | refs/heads/main | "2023-05-31T10:55:35.591002" | "2021-06-28T03:32:10" | "2021-06-28T03:32:10" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyspark.sql.functions import col
from utils import Utils, schemas
class TopSpeedingVehicles:
"""
Analysis 8: Determine the Top 5 Vehicle Makes where drivers are charged with speeding related offences,
has licensed Drivers, uses top 10 used vehicle colours and has car licensed with the Top 25 states with highest
number of offences (to be deduced from the data)
"""
def __process(self, session, files):
"""
process the top 5 vehicles with speeding related offences has licensed drivers
:param session: SparkSession : `~pyspark.sql.SparkSession`
:param files: Yaml config['files']
:return: Returns a : Dataframe `~spark.sql.SparkSession`
Sample output:
|VEH_MAKE_ID|
+-----------+
| FORD|
| HONDA|
| SUBARU|
| HYUNDAI|
------------
"""
# Input files path
source_path = files['inputpath']
charges_use_csv_path = source_path + "/" + files["charges"]
person_use_csv_path = source_path + "/" + files["person"]
units_use_csv_path = source_path + "/" + files["units"]
# Reading from the CSV files
charges_df = Utils.load_csv(session=session, path=charges_use_csv_path, header=True,
schema=schemas.charges_schema)
person_df = Utils.load_csv(session=session, path=person_use_csv_path, header=True,
schema=schemas.primary_person_schema)
units_df = Utils.load_csv(session=session, path=units_use_csv_path, header=True,
schema=schemas.units_schema)
# Joining on charges data with person data on crash_id
join_condition = charges_df.CRASH_ID == person_df.CRASH_ID
join_type = "inner"
# charges with speed related offences with driver licences
speeding_with_licences = charges_df.join(person_df, join_condition, join_type)\
.where((col("CHARGE").like("%SPEED%")) &
(col("DRVR_LIC_TYPE_ID") == "DRIVER LICENSE")) \
.select(charges_df.CRASH_ID,
charges_df.CHARGE,
charges_df.UNIT_NBR,
person_df.DRVR_LIC_TYPE_ID,
person_df.DRVR_LIC_STATE_ID,
person_df.DRVR_LIC_CLS_ID
)
# Top states with highest number of offences
top_states_offences = person_df\
.groupBy("DRVR_LIC_STATE_ID")\
.count()\
.orderBy(col("count").desc()).take(25)
# Top used vehicles colours with licenced
top_licensed_vehicle_colors = units_df.\
join(person_df, units_df.CRASH_ID == person_df.CRASH_ID, "inner") \
.where((col("DRVR_LIC_TYPE_ID") == "DRIVER LICENSE")
& (col("DRVR_LIC_CLS_ID").like("CLASS C%"))) \
.groupBy("VEH_COLOR_ID")\
.count()\
.orderBy(col("count").desc()).take(10)
top_colors = [i["VEH_COLOR_ID"] for i in top_licensed_vehicle_colors]
top_states = [i['DRVR_LIC_STATE_ID'] for i in top_states_offences]
top_vehicles_made = speeding_with_licences\
.join(units_df, speeding_with_licences.CRASH_ID == units_df.CRASH_ID, "inner") \
.where((col("VEH_COLOR_ID").isin(top_colors))
& (col("DRVR_LIC_STATE_ID").isin(top_states))) \
.select("VEH_MAKE_ID")
return top_vehicles_made
@staticmethod
def execute(session, files):
"""
Invokes the process methods to get tha analysis report
:param session: SparkSession : `~pyspark.sql.SparkSession`
:param files: Yaml config['files']
:return: Dataset `~pyspark.sql.dataframe` -> Top Vehicles colors
"""
return TopSpeedingVehicles.__process(TopSpeedingVehicles, session, files)
| UTF-8 | Python | false | false | 3,947 | py | 18 | analysis_top_speeding_vehicles.py | 14 | 0.570053 | 0.567266 | 0 | 97 | 39.690722 | 115 |
tkterje/algdat | 4,569,845,238,565 | 11c4b23b8613fdb4df5540a83bb434a5a31bb0ff | 80c677059b9c98776bcc38dce0d6805b5917882b | /HashTable.py | 748e7c3389fd546293e1b3b35e2e248574b6e395 | [] | no_license | https://github.com/tkterje/algdat | 4c0fa3866e7a2eb9b4889a8bbbb274c4e03ae20d | 91a3ba9a127e1e2d3200aeb6cba8259beae7d986 | refs/heads/master | "2022-12-12T21:22:20.525990" | "2020-09-04T18:36:54" | "2020-09-04T18:36:54" | 292,921,516 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #implement a hashtable with chaining in python
#initialize Node
class ListNode:
def __init__(self,key, value):
self.pair = (key,value)
self.next = None
class HashTable:
def __init__(self):
self.m = 100 #number size of table
self.table = [None] * self.m
def is_empty(self, hashvalue):
return self.table[hashvalue] == None
def hash_function(self, key):
hashvalue = key % len(self.table)
return hashvalue
def insert(self, key, value):
new = ListNode(key,value)
hashvalue = self.hash_function(key)
if self.is_empty(hashvalue):
self.table[hashvalue] = new
else:
new.next = self.table[hashvalue]
self.table[hashvalue] = new
#def print_table(self):
#driver code
if __name__=='__main__':
hashtable = HashTable()
hashtable.insert(5,"Terje")
hashtable.insert(5,"Finn")
print(hashtable.table)
| UTF-8 | Python | false | false | 964 | py | 7 | HashTable.py | 7 | 0.590249 | 0.585062 | 0 | 44 | 20.909091 | 46 |
BriandaBri/Clases | 16,913,581,240,360 | 98ec69dc1808b170b8b30ebf747f9988c898eee6 | 83d3b03428e0e3f27883063e37d673e9ee08f206 | /bloque1/Ej2.py | ed4f431ea2c7496df6a2deaae0a91b17c3c62afd | [] | no_license | https://github.com/BriandaBri/Clases | 4dcd66767928148ef1943c06dce0ae03d0b18853 | dfd0e122a9fae9430130a82e5ecf25bf8b503cff | refs/heads/master | "2021-01-02T09:10:25.504446" | "2017-08-07T21:25:45" | "2017-08-07T21:25:45" | 99,152,197 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def mayor_edad(n):
if n >= 18:
return True
else:
return False
n = 15
print mayor_edad(n) | UTF-8 | Python | false | false | 128 | py | 22 | Ej2.py | 17 | 0.476563 | 0.445313 | 0 | 9 | 12.222222 | 21 |
digipodium/string-and-string-functions-singhaditya12 | 7,825,430,464,177 | e6939d8bff4ef42823ec9c66535a87b6110778f5 | c566451989436330a75537f9cf61698e0481640c | /lw.py | 5203a31cfd1322c822442b4743bfc812d6e56a16 | [] | no_license | https://github.com/digipodium/string-and-string-functions-singhaditya12 | 14d13013671b8f59686043bf8b09f1f91e170a52 | 8c5ce032a5cd4758e1162201d405c6fb3e221cdb | refs/heads/main | "2023-08-28T10:33:03.691620" | "2021-10-12T14:53:42" | "2021-10-12T14:53:42" | 416,361,875 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | str="How Is It Going?"
print(str.lower()) | UTF-8 | Python | false | false | 42 | py | 11 | lw.py | 11 | 0.666667 | 0.666667 | 0 | 2 | 20 | 22 |
soumik12345/Adventures-with-GANS | 19,481,971,656,088 | da80016fb7e3cd922caab6ef02a4ef6d5ff8f7e6 | 2fd4c351c259ddf69176347769291ebf9ba434b4 | /CycleGAN/Discriminator.py | 1f9119cb8d04128390f1d0233b4ef204894ed77f | [] | no_license | https://github.com/soumik12345/Adventures-with-GANS | 970c3aa08f6c5d72555c209cc7824d49da0d309f | 438ea0dfd1a0795dbedad8063a8b68d2346f7ceb | refs/heads/master | "2023-04-02T20:04:03.506016" | "2022-09-24T09:52:26" | "2022-09-24T09:52:26" | 197,522,773 | 18 | 3 | null | false | "2023-03-24T23:40:15" | "2019-07-18T06:09:54" | "2023-02-21T15:54:26" | "2023-03-24T23:40:11" | 60,937 | 14 | 2 | 5 | Jupyter Notebook | false | false | from tensorflow.keras.layers import (
Conv2D,
BatchNormalization,
Input,
LeakyReLU,
ZeroPadding2D
)
from tensorflow.keras.models import Model
class Discriminator:
def __init__(self, n_filters, max_layers, image_size, image_channels):
self.image_size = image_size
self.image_channels = image_channels
self.n_filters = n_filters
self.max_layers = max_layers
self.build_model()
def build_model(self):
input_placeholder = Input(
shape = (
self.image_size,
self.image_size,
self.image_channels
),
name = 'Discriminator_Input_Layer'
)
x = Conv2D(
self.n_filters,
kernel_size = 4,
strides = 2,
padding = 'same',
name = 'First_Layer'
)(input_placeholder)
x = LeakyReLU(alpha = 0.2)(x)
for layer in range(1, self.max_layers):
output_features = self.n_filters * min(2 ** layer, 8)
x = Conv2D(
output_features,
kernel_size = 4,
strides = 2,
padding = 'same',
use_bias = False,
name = 'Branch_{0}'.format(layer)
)(x)
x = BatchNormalization()(x, training = 1)
x = LeakyReLU(alpha = 0.2)(x)
output_features = self.n_filters * min(2 ** layer, 8)
x = ZeroPadding2D(1)(x)
x = Conv2D(
output_features,
kernel_size = 4,
use_bias = False,
name = 'Branch_last'
)(x)
x = BatchNormalization()(x, training = 1)
x = LeakyReLU(alpha = 0.2)(x)
x = ZeroPadding2D(1)(x)
output = Conv2D(
1, kernel_size = 4,
activation = 'sigmoid',
name = 'Discriminator_Output_Layer'
)(x)
self.discrimator = Model([input_placeholder], output)
print(output.get_shape)
def display(self):
self.discrimator.summary()
def save(self, model_path):
self.discrimator.save(model_path) | UTF-8 | Python | false | false | 2,198 | py | 20 | Discriminator.py | 8 | 0.49636 | 0.482257 | 0 | 80 | 26.4875 | 74 |