repo
stringlengths
3
91
file
stringlengths
16
152
code
stringlengths
0
3.77M
file_length
int64
0
3.77M
avg_line_length
float64
0
16k
max_line_length
int64
0
273k
extension_type
stringclasses
1 value
LeetCode-Python
LeetCode-Python-master/0240.搜索二维矩阵II/0240-搜索二维矩阵II.py
class Solution(object): def searchMatrix(self, matrix, target): """ :type matrix: List[List[int]] :type target: int :rtype: bool """ if not matrix or not matrix[0]: return False m, n = len(matrix), len(matrix[0]) for i in range(m): for j in range(n): if matrix[i][j] == target: return True return False
447
27
43
py
LeetCode-Python
LeetCode-Python-master/1812.判断国际象棋棋盘中一个格子的颜色/1812-判断国际象棋棋盘中一个格子的颜色.py
class Solution: def squareIsWhite(self, coordinates: str) -> bool: row, col = int(coordinates[1]), ord(coordinates[0]) - ord("a") # 0 for balck, 1 for white if col % 2 == 0: color = 0 else: color = 1 if row % 2 == 0: color = 1 - color return color == 1
354
26.307692
70
py
LeetCode-Python
LeetCode-Python-master/1010.总持续时间可被60整除的歌曲/1010-总持续时间可被60整除的歌曲.py
class Solution(object): def numPairsDivisibleBy60(self, time): """ :type time: List[int] :rtype: int """ record = [0 for _ in range(0, 60)] for index, item in enumerate(time): record[item % 60] += 1 res = 0 for i in range(0, 60): if i in [0, 30] and record[i] > 1: res += record[i] * (record[i] - 1) # NԱ60˵ȡнĸΪC N 2 = N *(N - 1) / 2 record[i] = 0 # һδеĿԱ60Ȼrecord[0]㣬֤ظ elif i: res += record[60 - i] * record[i] return res // 2
602
30.736842
84
py
LeetCode-Python
LeetCode-Python-master/1302.层数最深叶子节点的和/1302-层数最深叶子节点的和.py
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def deepestLeavesSum(self, root: Optional[TreeNode]) -> int: queue = [root] while queue: level_sum = 0 next_queue = [] for node in queue: if node.left: next_queue.append(node.left) if node.right: next_queue.append(node.right) level_sum += node.val queue = next_queue[:] return level_sum
671
31
64
py
LeetCode-Python
LeetCode-Python-master/1156.单字符重复子串的最大长度/1156-单字符重复子串的最大长度.py
class Solution(object): def maxRepOpt1(self, text): """ :type text: str :rtype: int """ if len(text) == text.count(text[0]): return len(text) record = collections.Counter(text) start, end = 0, 1 cur, nxt, idx_nxt = text[0], None, 0 res = 1 while end < len(text): if text[end] != cur : if nxt is None: nxt = text[end] #ҵ˵һַ idx_nxt = end else: #ǰѾ l = end - 1 - start + 1 #һַַͬӴ if l <= record[text[start]]: #жַͬ԰мַͬ res = max(res, l) else: res = max(res, l - 1) #ֻĿǰִıַַ߽ͬ cur = nxt nxt = None start, end = idx_nxt, idx_nxt idx_nxt = 0 if end == len(text) - 1: #յ # print end l = end - start + 1 #һַַͬӴ # print l if l <= record[text[start]]: #жַͬ԰мַͬ res = max(res, l) else: res = max(res, l - 1) #ֻĿǰִıַַ߽ͬ # print text[start:end + 1] end += 1 # print end return res
1,341
31.731707
57
py
LeetCode-Python
LeetCode-Python-master/1156.单字符重复子串的最大长度/1156-单字符重复子串的最大长度 2.py
class Solution(object): def maxRepOpt1(self, text): """ :type text: str :rtype: int """ if len(text) == text.count(text[0]): return len(text) record = collections.Counter(text) start, end = 0, 1 cur, nxt, idx_nxt = text[0], None, 0 res = 1 while end < len(text): if text[end] != cur : if nxt is None: nxt = text[end] #ҵ˵һַ idx_nxt = end else: #ǰѾ l = end - 1 - start + 1 #һַַͬӴ if l <= record[text[start]]: #жַͬ԰мַͬ res = max(res, l) else: res = max(res, l - 1) #ֻĿǰִıַַ߽ͬ cur = nxt nxt = None start, end = idx_nxt, idx_nxt idx_nxt = 0 if end == len(text) - 1: #յ # print end l = end - start + 1 #һַַͬӴ # print l if l <= record[text[start]]: #жַͬ԰мַͬ res = max(res, l) else: res = max(res, l - 1) #ֻĿǰִıַַ߽ͬ # print text[start:end + 1] end += 1 # print end return res
1,341
31.731707
57
py
LeetCode-Python
LeetCode-Python-master/0985.查询后的偶数和/0985-查询后的偶数和.py
class Solution(object): def sumEvenAfterQueries(self, A, queries): """ :type A: List[int] :type queries: List[List[int]] :rtype: List[int] """ sumOfEven = sum(num for num in A if not num % 2) result = [] for item in queries: value = item[0] index = item[1] newvalue = A[index] + value if not A[index] % 2 and not newvalue % 2: # even and even sumOfEven += value elif not A[index] % 2 and newvalue % 2: # even and odd sumOfEven -= A[index] elif A[index] % 2 and not newvalue % 2: # odd and even sumOfEven += newvalue result.append(sumOfEven) A[index] = newvalue return result
875
31.444444
69
py
LeetCode-Python
LeetCode-Python-master/1700.无法吃午餐的学生数量/1700-无法吃午餐的学生数量.py
class Solution: def countStudents(self, students: List[int], sandwiches: List[int]) -> int: from collections import deque students = deque(students) while sandwiches: cur, l = 0, len(students) while cur < l: student = students.popleft() if student == sandwiches[0]: break students.append(student) cur += 1 if cur == l: break sandwiches = sandwiches[1:] return len(students)
557
33.875
79
py
LeetCode-Python
LeetCode-Python-master/0470.用Rand7()实现Rand10()/0470-用Rand7()实现Rand10().py
# The rand7() API is already defined for you. # def rand7(): # @return a random integer in the range 1 to 7 class Solution(object): def rand10(self): """ :rtype: int """ while True: tmp = (rand7()-1)*7 + rand7()-1 if tmp < 40: return tmp % 10 + 1
332
22.785714
46
py
LeetCode-Python
LeetCode-Python-master/0107.二叉树的层序遍历II/0107-二叉树的层序遍历II.py
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def levelOrderBottom(self, root: Optional[TreeNode]) -> List[List[int]]: queue = [root] res = [] while queue: next_queue = [] cur_level = [] for node in queue: if node: next_queue.extend([node.left, node.right]) cur_level.append(node.val) if cur_level: res.append(cur_level) queue = next_queue return res[::-1]
688
28.956522
76
py
LeetCode-Python
LeetCode-Python-master/0969.煎饼排序/0969-煎饼排序.py
class Solution(object): def pancakeSort(self, A): """ :type A: List[int] :rtype: List[int] """ count = 0 l = len(A) num = 0 res = [] while(num < l - 1): maxe = max(A) # print "max element is ", maxe index = A.index(maxe) # print index, num,res if index != l - num -1: res.append(index + 1) res.append(l-num) B = A[:index + 1] #+ A[index + 1:] # print B B = B[::-1] B += A[index + 1:] # print B[::-1] A = B[::-1][:-1] else: # print A A = A[:-1] # print "A now is ", A num += 1 return res
837
25.1875
50
py
LeetCode-Python
LeetCode-Python-master/0452.用最少数量的箭引爆气球/0452-用最少数量的箭引爆气球.py
class Solution(object): def findMinArrowShots(self, points): """ :type points: List[List[int]] :rtype: int """ if not points or not points[0]: return 0 points = sorted(points, key = lambda x: x[1]) res = 1 pre_end = points[0][1] for i in range(1, len(points)): if points[i][0] > pre_end: res += 1 pre_end = points[i][1] return res
473
23.947368
53
py
LeetCode-Python
LeetCode-Python-master/1247.交换字符使得字符串相同/1247-交换字符使得字符串相同.py
class Solution(object): def minimumSwap(self, s1, s2): """ :type s1: str :type s2: str :rtype: int """ s = s1 + s2 x = s.count("x") if len(s1) != len(s2) or x % 2 == 1 or (len(s) - x) % 2 == 1: return -1 pair1 = 0 pair2 = 0 for i in range(len(s1)): if s1[i] == "y" and s2[i] == "x": pair1 += 1 elif s1[i] == "x" and s2[i] == "y": pair2 += 1 return pair1 // 2 + pair2 // 2 + pair1 % 2 + pair2 % 2
569
26.142857
69
py
LeetCode-Python
LeetCode-Python-master/1037.有效的回旋镖/1037-有效的回旋镖.py
class Solution(object): def isBoomerang(self, points): """ :type points: List[List[int]] :rtype: bool """ #ͬ x1, x2, x3 = points[0][0], points[1][0], points[2][0] y1, y2, y3 = points[0][1], points[1][1], points[2][1] return (x1*y2-x2*y1)+(x2*y3-x3*y2)+(x3*y1-x1*y3) != 0
366
25.214286
61
py
LeetCode-Python
LeetCode-Python-master/0748.最短完整词/0748-最短完整词.py
class Solution(object): def shortestCompletingWord(self, licensePlate, words): """ :type licensePlate: str :type words: List[str] :rtype: str """ charlist = [] for i, char in enumerate(licensePlate): if char.isalpha(): t = char t = t.lower() charlist.append(t) charlistrecord = collections.Counter(charlist) res = "aaaaaaaaaaaaaaaa" for word in words: cnt = 0 wordrecord = collections.Counter(word) for char in charlist: if wordrecord.get(char, 0) >= charlistrecord[char]: cnt += 1 # print word, cnt if cnt == len(charlist): # if len(word) < len(res): res = word return res
905
29.2
67
py
LeetCode-Python
LeetCode-Python-master/2446.判断两个事件是否存在冲突/2446-判断两个事件是否存在冲突.py
class Solution: def haveConflict(self, event1: List[str], event2: List[str]) -> bool: return not (self.time1EarlierThanTime2(event1[1], event2[0]) or self.time1EarlierThanTime2(event2[1], event1[0])) def time1EarlierThanTime2(self, time1, time2): h1, h2 = int(time1[:2]), int(time2[:2]) m1, m2 = int(time1[3:]), int(time2[3:]) return h1 < h2 or (h1 == h2 and m1 < m2)
409
50.25
121
py
LeetCode-Python
LeetCode-Python-master/6424.半有序排列/6424-半有序排列.py
class Solution: def semiOrderedPermutation(self, nums: List[int]) -> int: index_1 = nums.index(1) index_n = nums.index(len(nums)) if index_1 < index_n: return index_1 + (len(nums) - 1 - index_n) else: return index_1 + (len(nums) - 1 - index_n - 1)
307
37.5
61
py
LeetCode-Python
LeetCode-Python-master/0243.最短单词距离/0243-最短单词距离.py
class Solution(object): def shortestDistance(self, words, word1, word2): """ :type words: List[str] :type word1: str :type word2: str :rtype: int """ res = len(words) - 1 pos1, pos2 = -1, -1 for i, word in enumerate(words): if word == word1: pos1 = i elif word == word2: pos2 = i if pos1 != -1 and pos2 != -1: res = min(res, abs(pos1 - pos2)) return res
521
28
52
py
LeetCode-Python
LeetCode-Python-master/0011.盛最多水的容器/0011-盛最多水的容器.py
class Solution(object): def maxArea(self, height): """ :type height: List[int] :rtype: int """ left, right = 0, len(height) - 1 res = 0 while(left < right): # print left, right, res = max(res, (right - left) * min(height[left], height[right])) if height[left] < height[right]: left += 1 else: right -= 1 return res
463
28
77
py
LeetCode-Python
LeetCode-Python-master/1018.可被5整除的二进制前缀/1018-可被5整除的二进制前缀.py
class Solution(object): def prefixesDivBy5(self, A): """ :type A: List[int] :rtype: List[bool] """ n = A[0] res = list() for i in range(0, len(A) - 1): if n % 5 == 0: res.append(True) else: res.append(False) n *= 2 n += A[i + 1] if n % 5 == 0: res.append(True) else: res.append(False) return res
497
21.636364
38
py
LeetCode-Python
LeetCode-Python-master/面试题 02.07.链表相交/面试题 02.07-链表相交.py
# Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution: def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode: pa, pb = headA, headB if not headA or not headB: return None la, lb = 0, 0 while pa: la += 1 pa = pa.next while pb: lb += 1 pb = pb.next if la < lb: headA, headB = headB, headA la, lb = lb, la diff = la - lb pa = headA while diff: pa = pa.next diff -= 1 pb = headB while pa and pb and pa != pb: pa = pa.next pb = pb.next return pa if pa == pb else None
818
21.75
80
py
LeetCode-Python
LeetCode-Python-master/0296.最佳的碰头地点/0296-最佳的碰头地点.py
class Solution(object): def minTotalDistance(self, grid): """ :type grid: List[List[int]] :rtype: int """ if not grid or not grid[0]: return -1 m, n = len(grid), len(grid[0]) row, col = [], [] for i in range(m): for j in range(n): if grid[i][j]: row.append(i) col.append(j) meet_point = [self.findMedian(row), self.findMedian(col)] res = 0 for i in range(m): for j in range(n): if grid[i][j]: res += abs(i - meet_point[0]) + abs(j - meet_point[1]) return res def findMedian(self, nums): nums.sort() return nums[len(nums) // 2]
811
26.066667
74
py
LeetCode-Python
LeetCode-Python-master/0189.旋转数组/0189-旋转数组.py
class Solution(object): def rotate(self, nums, k): """ :type nums: List[int] :type k: int :rtype: None Do not return anything, modify nums in-place instead. """ #ÿΰһŵǰ棬k for i in range(k): # tmp = nums[0] tmp = nums.pop() nums.insert(0, tmp)
346
25.692308
74
py
LeetCode-Python
LeetCode-Python-master/2330.有效的回文IV/2330-有效的回文IV.py
class Solution: def makePalindrome(self, s: str) -> bool: left, right = 0, len(s) - 1 step = 0 while left < right: if s[left] != s[right]: step += 1 left += 1 right -= 1 return step <= 2
276
22.083333
45
py
LeetCode-Python
LeetCode-Python-master/0148.排序链表/0148-排序链表.py
# Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None class Solution(object): def sortList(self, head): """ :type head: ListNode :rtype: ListNode """ #1. һΪ #2. #3. ϶Ϊһ if not head or not head.next: return head dummy = ListNode(-1) dummy.next = head pre, slow, fast = head, head, head while fast and fast.next: pre = slow slow = slow.next fast = fast.next.next first = head second = pre.next pre.next = None # print first.val, second.val sortedfirst = self.sortList(first) sortedsecond = self.sortList(second) return self.merge(sortedfirst, sortedsecond) def merge(self, l1, l2): if not l1: return l2 if not l2: return l1 if l1.val <= l2.val: tmp = ListNode(l1.val) tmp.next = self.merge(l1.next, l2) else: tmp = ListNode(l2.val) tmp.next = self.merge(l1, l2.next) return tmp
1,257
23.666667
52
py
LeetCode-Python
LeetCode-Python-master/1016.子串能表示从1到N数字的二进制串/1016-子串能表示从1到N数字的二进制串.py
class Solution(object): def queryString(self, S, N): """ :type S: str :type N: int :rtype: bool """ for i in range(1, N + 1): # print str(bin(i)[2:]) if str(bin(i)[2:]) not in S: return False return True
312
23.076923
40
py
LeetCode-Python
LeetCode-Python-master/1016.子串能表示从1到N数字的二进制串/1016-子串能表示从1到N数字的二进制串 2.py
class Solution(object): def queryString(self, S, N): """ :type S: str :type N: int :rtype: bool """ for i in range(1, N + 1): # print str(bin(i)[2:]) if str(bin(i)[2:]) not in S: return False return True
312
23.076923
40
py
LeetCode-Python
LeetCode-Python-master/0365.水壶问题/0365-水壶问题.py
class Solution(object): def canMeasureWater(self, x, y, z): """ :type x: int :type y: int :type z: int :rtype: bool """ if not z: return True if not x: return y == z if not y: return x == z if x + y < z: return False def gcd(a, b): while a % b: a, b = b, a % b return b return not z % gcd(x, y)
482
22
39
py
LeetCode-Python
LeetCode-Python-master/0633.平方数之和/0633-平方数之和.py
class Solution(object): def judgeSquareSum(self, c): """ :type c: int :rtype: bool """ for i in range(int(c ** 0.5) + 1): t = c - i ** 2 s = int (t ** 0.5) if t == s ** 2: return True return False if c else True
315
25.333333
42
py
LeetCode-Python
LeetCode-Python-master/1162.地图分析/1162-地图分析.py
class Solution(object): def maxDistance(self, grid): """ :type grid: List[List[int]] :rtype: int """ from collections import deque m, n = len(grid), len(grid[0]) land = [] for i in range(m): for j in range(n): if grid[i][j] == 1: land.append((i, j)) if not land or len(land) == m * n: return -1 res = 0 dx = [1, -1, 0, 0] dy = [0, 0, 1, -1] queue = deque(land) visited = set(land) while queue: for _ in range(len(queue)): x0, y0 = queue.popleft() for k in range(4): x = x0 + dx[k] y = y0 + dy[k] if 0 <= x < m and 0 <= y < n and grid[x][y] == 0 and (x, y) not in visited: queue.append((x, y)) visited.add((x, y)) res += 1 return res - 1
1,016
25.763158
95
py
LeetCode-Python
LeetCode-Python-master/0134.加油站/0134-加油站.py
class Solution(object): def canCompleteCircuit(self, gas, cost): """ :type gas: List[int] :type cost: List[int] :rtype: int """ idx = 0 for i in range(len(gas)): if i < idx: continue j = i left_gas = gas[i] while left_gas > 0: # print j if left_gas < cost[j]: #ȥһվ idx = max(idx, j) break left_gas -= cost[j] if (j + 1) % len(gas) == i: return i j = (j + 1) % len(gas) left_gas += gas[j] return -1
732
26.148148
44
py
LeetCode-Python
LeetCode-Python-master/1353.最多可以参加的会议数目/1353-最多可以参加的会议数目.py
class Solution(object): def maxEvents(self, events): """ :type events: List[List[int]] :rtype: int """ from heapq import * if not events: return 0 # 排序并反转,反转是为了可以快速pop events = sorted(events, key = lambda x:(x[0], x[1]))[::-1] queue = [] res = 0 for day in range(1, 10 ** 5 + 1): # 把所有结束日期在当前日期之前的event都pop掉 while queue and queue[0] < day: heappop(queue) # 把所有开始日期大于等于当前日期的event都push进队列 while events and events[-1][0] <= day: last = events.pop() heappush(queue, last[1]) if queue: # 如果当前日期有可以去的event,就去这一个 heappop(queue) res += 1 if not queue and not events: # 如果所有event都参加完了 break return res
948
27.757576
66
py
LeetCode-Python
LeetCode-Python-master/1559.二维网格图中探测环/1559-二维网格图中探测环.py
class Solution(object): def containsCycle(self, grid): """ :type grid: List[List[str]] :rtype: bool """ dx = [1, -1, 0, 0] dy = [0, 0, 1, -1] ufs = UnionFindSet(grid) m, n = len(grid), len(grid[0]) for i in range(m): for j in range(n): for next_i, next_j in [(i - 1, j), (i, j - 1)]: if 0 <= next_i < m and 0 <= next_j < n and grid[next_i][next_j] == grid[i][j]: if ufs.find(i * n + j) == ufs.find(next_i * n + next_j): return True else: ufs.union(i * n + j, next_i * n + next_j) return False class UnionFindSet(object): def __init__(self, grid): m, n = len(grid), len(grid[0]) self.roots = [-1 for i in range(m*n)] self.rank = [0 for i in range(m*n)] self.count = 0 for i in range(m): for j in range(n): self.roots[i * n + j] = i * n + j self.count += 1 def find(self, member): tmp = [] while member != self.roots[member]: tmp.append(member) member = self.roots[member] for root in tmp: self.roots[root] = member return member def union(self, p, q): parentP = self.find(p) parentQ = self.find(q) if parentP != parentQ: if self.rank[parentP] > self.rank[parentQ]: self.roots[parentQ] = parentP elif self.rank[parentP] < self.rank[parentQ]: self.roots[parentP] = parentQ else: self.roots[parentQ] = parentP self.rank[parentP] -= 1 self.count -= 1
1,843
33.148148
98
py
LeetCode-Python
LeetCode-Python-master/0098.验证二叉搜索树/0098-验证二叉搜索树.py
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def isValidBST(self, root): """ :type root: TreeNode :rtype: bool """ def inorder(node): if not node: return [] return inorder(node.left) + [node.val] + inorder(node.right) l = inorder(root) return l == sorted(l) and len(l) == len(set(l))
542
26.15
72
py
LeetCode-Python
LeetCode-Python-master/1863.找出所有子集的异或总和再求和/1863-找出所有子集的异或总和再求和.py
class Solution: def subsetXORSum(self, nums: List[int]) -> int: subsets = [[]] res = 0 for num in nums: new_subsets = [] for subset in subsets: new_subset = subset + [num] res += reduce(lambda x, y: x^y, new_subset) new_subsets.append(new_subset) subsets += new_subsets return res
400
32.416667
59
py
LeetCode-Python
LeetCode-Python-master/5433.n的第k个因子/5433-n的第k个因子.py
class Solution(object): def kthFactor(self, n, k): """ :type n: int :type k: int :rtype: int """ l = [] for i in range(1, int(n ** 0.5) + 1): if n % i == 0: l.append(i) total = len(l) * 2 if l[-1] ** 2 != n else len(l) * 2 - 1 if total < k: return -1 if k - 1 < len(l): return l[k - 1] else: idx = (len(l) - 1) * 2 - k return n // l[idx] if total % 2 else n // l[idx + 2]
557
25.571429
65
py
LeetCode-Python
LeetCode-Python-master/1052.爱生气的书店老板/1052-爱生气的书店老板.py
class Solution(object): def maxSatisfied(self, customers, grumpy, X): """ :type customers: List[int] :type grumpy: List[int] :type X: int :rtype: int """ record = [0 for _ in range(len(grumpy))] #iĹ˿ s = 0 for i in range(len(grumpy)): if grumpy[i] == 0: record[i] += record[i - 1] + customers[i] else: record[i] += record[i - 1] print record tmp = record[-1]#ʱѾ prefix = [0 for _ in range(len(grumpy))] prefix[0] = customers[0] for i in range(1, len(grumpy)): prefix[i] += prefix[i - 1] + customers[i] lo, hi = 0, X - 1 newcus = 0 print prefix while(hi < len(grumpy)): if lo == 0: presum = prefix[hi] - 0 #BUFF֮ angsum = record[hi] - 0 #ûBUFF else: presum = prefix[hi] - prefix[lo - 1] angsum = record[hi] - record[lo - 1] earn = presum - angsum print presum, angsum, earn, hi newcus = max(presum - angsum, newcus) hi += 1 lo += 1 return tmp + newcus
1,316
28.266667
57
py
LeetCode-Python
LeetCode-Python-master/1255.得分最高的单词集合/1255-得分最高的单词集合.py
class Solution(object): def maxScoreWords(self, words, letters, score): """ :type words: List[str] :type letters: List[str] :type score: List[int] :rtype: int """ from collections import defaultdict, Counter dic = dict() letter_dic = defaultdict(int) for i, val in enumerate(score):#һֵ dic[chr(ord("a") + i)] = val #key ĸvalĸӦķ letter_dic = Counter(letters)#һֵ䣬 keyĸ valÿĸʣĸ s = set(letters) v_words = [] for word in words:#ɾиܱɵĵ flag = 0 for char in word: if char not in s: flag = 1 if flag: # һijlettersҲĸ迼 continue v_words.append(word) self.res = 0 def helper(word, letter_dic): # return True wordletter_dicletterɣ򷵻False dicc = collections.Counter(word) for key in dicc: if dicc[key] > letter_dic[key]: return False return True def dfs(start, tmp): self.res = max(self.res, tmp) if start >= len(v_words): return for i in range(start, len(v_words)):#startʼңظ if helper(v_words[i], letter_dic):#ǰʿԱ for char in v_words[i]: #ֵ letter_dic[char] -= 1 dfs(i + 1, tmp + sum([dic[char] for char in v_words[i]])) #dfsһ for char in v_words[i]: #ݣԭ״̬ letter_dic[char] += 1 dfs(0, 0) return self.res
1,690
32.82
83
py
LeetCode-Python
LeetCode-Python-master/2418.按身高排序/2418-按身高排序.py
class Solution: def sortPeople(self, names: List[str], heights: List[int]) -> List[str]: combine = [(name, heights[index]) for index, name in enumerate(names)] return [pair[0] for pair in sorted(combine, key = lambda x: -x[1])]
257
42
78
py
LeetCode-Python
LeetCode-Python-master/0415.字符串相加/0415-字符串相加.py
class Solution(object): def addStrings(self, s1, s2): """ :type num1: str :type num2: str :rtype: str """ l1, l2 = len(s1), len(s2) if l1 < l2: s1, s2 = s2, s1 l1, l2 = l2, l1 s1 = [int(x) for x in s1] s2 = [int(x) for x in s2] s1, s2 = s1[::-1], s2[::-1] for i, digit in enumerate(s2): s1[i] += s2[i] s1 = self.CarrySolver(s1) s1 = s1[::-1] return "".join(str(x) for x in s1) def CarrySolver(self, nums): #Ĺǣеÿһλýλ #[15, 27, 12], [5, 8, 4, 1] i = 0 while i < len(nums): if nums[i] >= 10: carrier = nums[i] // 10 if i == len(nums) - 1: nums.append(carrier) else: nums[i + 1] += carrier nums[i] %= 10 i += 1 return nums
983
26.333333
42
py
LeetCode-Python
LeetCode-Python-master/1230.抛掷硬币/1230-抛掷硬币.py
class Solution(object): def probabilityOfHeads(self, prob, target): """ :type prob: List[float] :type target: int :rtype: float """ dp = [0 for _ in range(len(prob) + 1)] dp[1] = prob[0] dp[0] = 1 - prob[0] for i in range(1, len(prob)): new_dp = [0 for _ in range(len(prob) + 1)] for j in range(target + 1): new_dp[j] = dp[j] * (1 - prob[i]) + dp[j - 1] * prob[i] dp = new_dp[:] return dp[target] # dp = [[0 for _ in range(len(prob) + 1)] for _ in range(len(prob))] # # dp[i][j] ʾǰiӲjӲ泯ϵĸ # dp[0][1] = prob[0] # dp[0][0] = 1 - prob[0] # for i, p in enumerate(prob): # for j in range(target + 1): # if i > 0: # dp[i][j] += dp[i - 1][j] * (1 - p) # dp[i][j] += dp[i - 1][j - 1] * (p) # return dp[-1][target]
963
34.703704
76
py
LeetCode-Python
LeetCode-Python-master/2089.找出数组排序后的目标下标/2089-找出数组排序后的目标下标.py
class Solution: def targetIndices(self, nums: List[int], target: int) -> List[int]: nums.sort() res = [] for i, num in enumerate(nums): if num == target: res.append(i) return res
242
29.375
71
py
LeetCode-Python
LeetCode-Python-master/0752.打开转盘锁/0752-打开转盘锁.py
from collections import deque class Solution(object): def openLock(self, deadends, target): """ :type deadends: List[str] :type target: str :rtype: int """ deadends = set(deadends) if "0000" in deadends: #㶼߾88 return -1 queue = deque() queue.append(["0000", 0]) cnt = 0 while queue: node, cnt = queue.popleft() #ȡһcntǵǰߵIJ if node == target: #ҵ return cnt for i in range(4): for j in [1, -1]: next_node = node[:i] + str((int(node[i]) + j) % 10) + node[i + 1:] if next_node not in deadends: #µĵ߶û߹ deadends.add(next_node) #ظ queue.append([next_node, cnt + 1]) return -1
854
27.5
87
py
LeetCode-Python
LeetCode-Python-master/面试题64.求1+2+…+n/面试题64-求1+2+…+n.py
class Solution(object): def sumNums(self, n): """ :type n: int :rtype: int """ return sum(range(1, n + 1))
150
20.571429
35
py
LeetCode-Python
LeetCode-Python-master/2708.一个小组的最大实力值/2708-一个小组的最大实力值.py
class Solution: def maxStrength(self, nums: List[int]) -> int: if all([num == 0 for num in nums]): return 0 if len(nums) == 1: return nums[0] pos = [num for num in nums if num > 0] neg = [num for num in nums if num < 0] res = 0 neg_length = len(neg) if neg_length >= 2: if neg_length % 2 == 0: res = reduce((lambda x, y: x * y), neg) else: neg.sort() res = reduce((lambda x, y: x * y), neg[:-1]) if pos: res = reduce((lambda x, y: x * y), pos) * max(res, 1) return res
655
28.818182
65
py
LeetCode-Python
LeetCode-Python-master/0207.课程表/0207-课程表.py
class Solution: def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool: # 1. find all node/course with indegree 0, let them enter a queue from collections import defaultdict, deque indegree = defaultdict(int) children = defaultdict(set) all_courses = set() for cur, pre in prerequisites: indegree[cur] += 1 children[pre].add(cur) all_courses.add(cur) all_courses.add(pre) queue = deque([]) for course in all_courses: if indegree[course] == 0: queue.append(course) # 2. BFS, let course with indegree 0 leave a queue, and let its children with indegree 0 into the queue studied_course = 0 while queue: cur = queue.popleft() studied_course += 1 for child in children[cur]: indegree[child] -= 1 if indegree[child] == 0: queue.append(child) return studied_course == len(all_courses)
1,066
35.793103
111
py
LeetCode-Python
LeetCode-Python-master/0090.子集II/0090-子集II.py
class Solution(object): def subsetsWithDup(self, nums): """ :type nums: List[int] :rtype: List[List[int]] """ res = [[]] for num in nums: tmp = res[:] for item in res: newitem = sorted(item + [num]) if newitem not in tmp: tmp.append(newitem) res = tmp[:] return res
426
25.6875
46
py
LeetCode-Python
LeetCode-Python-master/1267.统计参与通信的服务器/1267-统计参与通信的服务器.py
class Solution(object): def countServers(self, grid): """ :type grid: List[List[int]] :rtype: int """ m, n = len(grid), len(grid[0]) res = 0 for i in range(m): for j in range(n): if grid[i][j] == 1: # horizontal for col in range(n): if col != j: if grid[i][col] in [1, 2]: grid[i][j] = 2 grid[i][col] = 2 # vertical for row in range(m): if row != i: if grid[row][j] in [1, 2]: grid[i][j] = 2 grid[row][j] = 2 if grid[i][j] == 2: res += 1 # print grid return res
943
31.551724
54
py
LeetCode-Python
LeetCode-Python-master/面试题35.复杂链表的复制/面试题35-复杂链表的复制.py
""" # Definition for a Node. class Node: def __init__(self, x, next=None, random=None): self.val = int(x) self.next = next self.random = random """ class Solution(object): def copyRandomList(self, head): """ :type head: Node :rtype: Node """ mapping = {} # key is the old node, val is the new node p = head while p: mapping[p] = Node(p.val) p = p.next p = head while p: if p.next: mapping[p].next = mapping[p.next] if p.random: mapping[p].random = mapping[p.random] p = p.next return mapping[head] if head else head
725
23.2
63
py
LeetCode-Python
LeetCode-Python-master/1544.整理字符串/1544-整理字符串.py
class Solution(object): def makeGood(self, s): """ :type s: str :rtype: str """ stack = [] for ch in s: if not stack or abs(ord(ch) - ord(stack[-1])) != 32: stack.append(ch) else: stack.pop() return "".join(stack)
330
22.642857
64
py
LeetCode-Python
LeetCode-Python-master/1392.最长快乐前缀/1392-最长快乐前缀.py
class Solution(object): def longestPrefix(self, s): """ :type s: str :rtype: str """ base = 131 mod = 10 ** 9 + 7 res = "" prefix, suffix = 0, 0 multiple = 1 for i in range(len(s) - 1): prefix = (prefix * base + ord(s[i])) % mod suffix = (ord(s[-(i + 1)]) * multiple + suffix) % mod if prefix == suffix: res = s[:i + 1] multiple = multiple * base % mod return res
535
25.8
66
py
LeetCode-Python
LeetCode-Python-master/0931.下降路径最小和/0931-下降路径最小和.py
class Solution(object): def minFallingPathSum(self, A): """ :type A: List[List[int]] :rtype: int """ m = len(A) n = m if A == [[]]: return 0 dp = [[0 for _ in range(n)] for t in range(m)] for i in range(n): dp[0][i] = A[0][i] for i in range(1, m): for j in range(n): if not j: # the first column # if j + 1 <= n - 1: # j + 1 column exists dp[i][j] = min(dp[i - 1][j], dp[i - 1][j + 1]) + A[i][j] elif j == n - 1: # the last column # if j - 1 >= 0: # j -1 column exists dp[i][j] = min(dp[i - 1][j], dp[i - 1][j - 1]) + A[i][j] else: # if j + 1 < n - 1 and j - 1 >= 0: dp[i][j] = min(dp[i - 1][j], dp[i - 1][j - 1], dp[i - 1][j + 1]) + A[i][j] # print dp return min(dp[-1])
1,058
32.09375
94
py
LeetCode-Python
LeetCode-Python-master/0780.到达终点/0780-到达终点.py
class Solution(object): def reachingPoints(self, sx, sy, tx, ty): """ :type sx: int :type sy: int :type tx: int :type ty: int :rtype: bool """ if tx < sx or ty < sy: return False if tx == sx and (ty - sy) % sx == 0: return True if ty == sy and (tx - sx) % sy == 0: return True return self.reachingPoints(sx, sy, tx % ty, ty % tx)
457
27.625
60
py
LeetCode-Python
LeetCode-Python-master/0060.第k个排列/0060-第k个排列.py
class Solution(object): def getPermutation(self, n, k): """ :type n: int :type k: int :rtype: str """ fac = [1] for i in range(2, n + 1): fac.append(fac[-1] * i) digits = [i for i in range(1, n + 1)] self.res = "" def dfs(left_digit, tmp, kk): if left_digit == 0: self.res = tmp[:] return for digit in digits: kk -= fac[left_digit - 2] if kk <= 0: kk += fac[left_digit - 2] fac.pop() digits.remove(digit) dfs(left_digit - 1, tmp + str(digit), kk) break dfs(n, "", k) return self.res
798
27.535714
61
py
LeetCode-Python
LeetCode-Python-master/0139.单词拆分/0139-单词拆分.py
class Solution: def wordBreak(self, s: str, wordDict: List[str]) -> bool: from collections import deque wordDict = set(wordDict) record = [0] for i in range(len(s) + 1): for j in record: if s[j:i] in wordDict: record.append(i) break # print (record) return record[-1] == len(s)
398
29.692308
61
py
LeetCode-Python
LeetCode-Python-master/0845.数组中的最长山脉/0845-数组中的最长山脉.py
class Solution(object): def longestMountain(self, A): """ :type A: List[int] :rtype: int """ # a = sorted(A) # if a == A or a[::-1] == A: # return 0 l, r = [0 for _ in A], [0 for _ in A] for i in range(1, len(A)): if A[i] > A[i - 1]: l[i] = l[i - 1] + 1 for i in range(len(A) - 2, -1, -1): if A[i] > A[i + 1]: r[i] = r[i + 1] + 1 res = 0 for i in range(len(A)): if l[i] and r[i] and l[i] + r[i] > 1: res = max(l[i] + r[i] + 1, res) return res
657
26.416667
49
py
LeetCode-Python
LeetCode-Python-master/0941.有效的山脉数组/0941-有效的山脉数组.py
class Solution(object): def validMountainArray(self, A): """ :type A: List[int] :rtype: bool """ if len(A) < 3: return False max_A_pos = A.index(max(A)) if max_A_pos in [0, len(A) - 1]: return False first, last = A[:max_A_pos + 1], A[max_A_pos:] if len(first) != len(set(first)) or len(last) != len(set(last)): return False return first == sorted(first) and last == sorted(last)[::-1]
515
27.666667
72
py
LeetCode-Python
LeetCode-Python-master/0912.排序数组/0912-排序数组.py
class Solution(object): def sortArray(self, nums): """ :type nums: List[int] :rtype: List[int] """ return sorted(nums)
171
20.5
30
py
LeetCode-Python
LeetCode-Python-master/0912.排序数组/0912-排序数组 2.py
class Solution(object): def sortArray(self, nums): """ :type nums: List[int] :rtype: List[int] """ return sorted(nums)
171
20.5
30
py
LeetCode-Python
LeetCode-Python-master/0212.单词搜索II/0212-单词搜索II.py
class Trie(object): def __init__(self): """ Initialize your data structure here. """ self.root = {} def insert(self, word): """ Inserts a word into the trie. :type word: str :rtype: None """ node = self.root for char in word: node = node.setdefault(char, {}) node["end"] = True def search(self, word): """ Returns if the word is in the trie. :type word: str :rtype: bool """ node = self.root for char in word: if char not in node: return False node = node[char] return "end" in node def startsWith(self, prefix): """ Returns if there is any word in the trie that starts with the given prefix. :type prefix: str :rtype: bool """ node = self.root for char in prefix: if char not in node: return False node = node[char] return True class Solution(object): def findWords(self, board, words): """ :type board: List[List[str]] :type words: List[str] :rtype: List[str] """ if not board or not board[0]: return [] m, n = len(board), len(board[0]) dx = [1, -1, 0, 0] dy = [0, 0, 1, -1] tree = Trie() for word in words: tree.insert(word) words = set(words) res = set() def dfs(x0, y0, node, tmpword): visited.add((x0, y0)) # print tmpword, x0, y0 for k in range(4): x = x0 + dx[k] y = y0 + dy[k] if 0 <= x < m and 0 <= y < n and board[x][y] in node and (x, y) not in visited: visited.add((x, y)) dfs(x, y, node[board[x][y]], tmpword + board[x][y]) visited.remove((x,y)) if tmpword in words: res.add(tmpword) for i in range(m): for j in range(n): if board[i][j] in tree.root: visited = set((i,j)) dfs(i, j, tree.root[board[i][j]], board[i][j]) return list(res)
2,358
27.083333
95
py
LeetCode-Python
LeetCode-Python-master/1121.将数组分成几个递增序列/1121-将数组分成几个递增序列.py
class Solution(object): def canDivideIntoSubsequences(self, nums, K): """ :type nums: List[int] :type K: int :rtype: bool """ from collections import Counter return max(Counter(nums).values()) * K <= len(nums)
269
29
59
py
LeetCode-Python
LeetCode-Python-master/1087.字母切换/1087-字母切换.py
class Solution(object): def permute(self, S): """ :type S: str :rtype: List[str] """ def generate(s, tmp): if not s: res.append(tmp[:]) return if s[0].isalpha(): generate(s[1:], tmp + s[0]) else: for i in range(len(s)): if s[i] == "}": substring = s[1:i] break chars = substring.split(",") chars.sort() for char in chars: generate(s[i + 1:], tmp + char) res = [] generate(S, "") return res
719
26.692308
51
py
meta_sequential_prediction
meta_sequential_prediction-main/training_loops.py
import math import torch from torch import nn import pytorch_pfn_extras as ppe from utils.clr import simclr from utils.misc import freq_to_wave from tqdm import tqdm def loop_seqmodel(manager, model, optimizer, train_loader, config, device): while not manager.stop_trigger: for images in train_loader: with manager.run_iteration(): reconst = True if manager.iteration < config['training_loop']['args']['reconst_iter'] else False if manager.iteration >= config['training_loop']['args']['lr_decay_iter']: optimizer.param_groups[0]['lr'] = config['lr']/3. else: optimizer.param_groups[0]['lr'] = config['lr'] model.train() images = torch.stack(images).transpose(1, 0).to(device) loss, (loss_bd, loss_orth, _) = model.loss(images, T_cond=config['T_cond'], return_reg_loss=True, reconst=reconst) optimizer.zero_grad() loss.backward() optimizer.step() ppe.reporting.report({ 'train/loss': loss.item(), 'train/loss_bd': loss_bd.item(), 'train/loss_orth': loss_orth.item(), }) if manager.stop_trigger: break def loop_simclr(manager, model, optimizer, train_loader, config, device): while not manager.stop_trigger: for images in train_loader: with manager.run_iteration(): if manager.iteration >= config['training_loop']['args']['lr_decay_iter']: optimizer.param_groups[0]['lr'] = config['lr']/3. else: optimizer.param_groups[0]['lr'] = config['lr'] model.train() images = torch.stack(images, dim=1).to(device) # n t c h w zs = model(images) zs = [zs[:, i] for i in range(zs.shape[1])] loss = simclr( zs, loss_type=config['training_loop']['args']['loss_type'], temperature=config['training_loop']['args']['temp'] ) optimizer.zero_grad() loss.backward() optimizer.step() ppe.reporting.report({ 'train/loss': loss.item(), }) if manager.stop_trigger: break
2,461
40.728814
132
py
meta_sequential_prediction
meta_sequential_prediction-main/run.py
import os import argparse import yaml import copy import functools import random import argparse import numpy as np import torch import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader import pytorch_pfn_extras as ppe from pytorch_pfn_extras.training import extensions from utils import yaml_utils as yu def train(config): torch.cuda.empty_cache() torch.manual_seed(config['seed']) random.seed(config['seed']) np.random.seed(config['seed']) if torch.cuda.is_available(): device = torch.device('cuda') cudnn.deterministic = True cudnn.benchmark = True else: device = torch.device('cpu') gpu_index = -1 # Dataaset data = yu.load_component(config['train_data']) train_loader = DataLoader( data, batch_size=config['batchsize'], shuffle=True, num_workers=config['num_workers']) # Def. of Model and optimizer model = yu.load_component(config['model']) model.to(device) optimizer = torch.optim.Adam(model.parameters(), config['lr']) manager = ppe.training.ExtensionsManager( model, optimizer, None, iters_per_epoch=len(train_loader), out_dir=config['log_dir'], stop_trigger=(config['max_iteration'], 'iteration') ) manager.extend( extensions.PrintReport( ['epoch', 'iteration', 'train/loss', 'train/loss_bd', 'train/loss_orth', 'loss_internal_0', 'loss_internal_T', 'elapsed_time']), trigger=(config['report_freq'], 'iteration')) manager.extend(extensions.LogReport( trigger=(config['report_freq'], 'iteration'))) manager.extend( extensions.snapshot( target=model, filename='snapshot_model_iter_{.iteration}'), trigger=(config['model_snapshot_freq'], 'iteration')) manager.extend( extensions.snapshot( target=manager, filename='snapshot_manager_iter_{.iteration}', n_retains=1), trigger=(config['manager_snapshot_freq'], 'iteration')) # Run training loop print("Start training...") yu.load_component_fxn(config['training_loop'])( manager, model, optimizer, train_loader, config, device) if __name__ == '__main__': # Loading the configuration arguments from specified config path parser = argparse.ArgumentParser() parser.add_argument('--log_dir', type=str) parser.add_argument('--config_path', type=str) parser.add_argument('-a', '--attrs', nargs='*', default=()) parser.add_argument('-w', '--warning', action='store_true') args = parser.parse_args() with open(args.config_path, 'r') as f: config = yaml.safe_load(f) config['config_path'] = args.config_path config['log_dir'] = args.log_dir # Modify the yaml file using attr for attr in args.attrs: module, new_value = attr.split('=') keys = module.split('.') target = functools.reduce(dict.__getitem__, keys[:-1], config) if keys[-1] in target.keys(): target[keys[-1]] = yaml.safe_load(new_value) else: raise ValueError('The following key is not defined in the config file:{}', keys) for k, v in sorted(config.items()): print("\t{} {}".format(k, v)) # create the result directory and save yaml if not os.path.exists(config['log_dir']): os.makedirs(config['log_dir']) _config = copy.deepcopy(config) configpath = os.path.join(config['log_dir'], "config.yml") open(configpath, 'w').write( yaml.dump(_config, default_flow_style=False) ) # Training train(config)
3,593
31.972477
140
py
meta_sequential_prediction
meta_sequential_prediction-main/models/seqae.py
import numpy as np import torch import torch.nn as nn from models import dynamics_models import torch.nn.utils.parametrize as P from models.dynamics_models import LinearTensorDynamicsLSTSQ, MultiLinearTensorDynamicsLSTSQ, HigherOrderLinearTensorDynamicsLSTSQ from models.base_networks import ResNetEncoder, ResNetDecoder, Conv1d1x1Encoder from einops import rearrange, repeat from utils.clr import simclr class SeqAELSTSQ(nn.Module): def __init__( self, dim_a, dim_m, alignment=False, ch_x=3, k=1.0, kernel_size=3, change_of_basis=False, predictive=True, bottom_width=4, n_blocks=3, *args, **kwargs): super().__init__() self.dim_a = dim_a self.dim_m = dim_m self.predictive = predictive self.enc = ResNetEncoder( dim_a*dim_m, k=k, kernel_size=kernel_size, n_blocks=n_blocks) self.dec = ResNetDecoder( ch_x, k=k, kernel_size=kernel_size, bottom_width=bottom_width, n_blocks=n_blocks) self.dynamics_model = LinearTensorDynamicsLSTSQ(alignment=alignment) if change_of_basis: self.change_of_basis = nn.Parameter( torch.empty(dim_a, dim_a)) nn.init.eye_(self.change_of_basis) def _encode_base(self, xs, enc): shape = xs.shape x = torch.reshape(xs, (shape[0] * shape[1], *shape[2:])) H = enc(x) H = torch.reshape( H, (shape[0], shape[1], *H.shape[1:])) return H def encode(self, xs): H = self._encode_base(xs, self.enc) H = torch.reshape( H, (H.shape[0], H.shape[1], self.dim_m, self.dim_a)) if hasattr(self, "change_of_basis"): H = H @ repeat(self.change_of_basis, 'a1 a2 -> n t a1 a2', n=H.shape[0], t=H.shape[1]) return H def phi(self, xs): return self._encode_base(xs, self.enc.phi) def get_M(self, xs): dyn_fn = self.dynamics_fn(xs) return dyn_fn.M def decode(self, H): if hasattr(self, "change_of_basis"): H = H @ repeat(torch.linalg.inv(self.change_of_basis), 'a1 a2 -> n t a1 a2', n=H.shape[0], t=H.shape[1]) n, t = H.shape[:2] if hasattr(self, "pidec"): H = rearrange(H, 'n t d_s d_a -> (n t) d_a d_s') H = self.pidec(H) else: H = rearrange(H, 'n t d_s d_a -> (n t) (d_s d_a)') x_next_preds = self.dec(H) x_next_preds = torch.reshape( x_next_preds, (n, t, *x_next_preds.shape[1:])) return x_next_preds def dynamics_fn(self, xs, return_loss=False, fix_indices=None): H = self.encode(xs) return self.dynamics_model(H, return_loss=return_loss, fix_indices=fix_indices) def loss(self, xs, return_reg_loss=True, T_cond=2, reconst=False): xs_cond = xs[:, :T_cond] xs_pred = self(xs_cond, return_reg_loss=return_reg_loss, n_rolls=xs.shape[1] - T_cond, predictive=self.predictive, reconst=reconst) if return_reg_loss: xs_pred, reg_losses = xs_pred if reconst: xs_target = xs else: xs_target = xs[:, T_cond:] if self.predictive else xs[:, 1:] loss = torch.mean( torch.sum((xs_target - xs_pred) ** 2, axis=[2, 3, 4])) return (loss, reg_losses) if return_reg_loss else loss def __call__(self, xs_cond, return_reg_loss=False, n_rolls=1, fix_indices=None, predictive=True, reconst=False): # Encoded Latent. Num_ts x len_ts x dim_m x dim_a H = self.encode(xs_cond) # ==Esitmate dynamics== ret = self.dynamics_model( H, return_loss=return_reg_loss, fix_indices=fix_indices) if return_reg_loss: # fn is a map by M_star. Loss is the training external loss fn, losses = ret else: fn = ret if predictive: H_last = H[:, -1:] H_preds = [H] if reconst else [] array = np.arange(n_rolls) else: H_last = H[:, :1] H_preds = [H[:, :1]] if reconst else [] array = np.arange(xs_cond.shape[1] + n_rolls - 1) for _ in array: H_last = fn(H_last) H_preds.append(H_last) H_preds = torch.cat(H_preds, axis=1) # Prediction in the observation space x_preds = self.decode(H_preds) if return_reg_loss: return x_preds, losses else: return x_preds def loss_equiv(self, xs, T_cond=2, reduce=False): bsize = len(xs) xs_cond = xs[:, :T_cond] xs_target = xs[:, T_cond:] H = self.encode(xs_cond[:, -1:]) dyn_fn = self.dynamics_fn(xs_cond) H_last = H H_preds = [] n_rolls = xs.shape[1] - T_cond for _ in np.arange(n_rolls): H_last = dyn_fn(H_last) H_preds.append(H_last) H_pred = torch.cat(H_preds, axis=1) # swapping M dyn_fn.M = dyn_fn.M[torch.arange(-1, bsize-1)] H_last = H H_preds_perm = [] for _ in np.arange(n_rolls): H_last = dyn_fn(H_last) H_preds_perm.append(H_last) H_pred_perm = torch.cat(H_preds_perm, axis=1) xs_pred = self.decode(H_pred) xs_pred_perm = self.decode(H_pred_perm) reduce_dim = (1,2,3,4,5) if reduce else (2,3,4) loss = torch.sum((xs_target-xs_pred)**2, dim=reduce_dim).detach().cpu().numpy() loss_perm = torch.sum((xs_target-xs_pred_perm)**2, dim=reduce_dim).detach().cpu().numpy() return loss, loss_perm class SeqAEHOLSTSQ(SeqAELSTSQ): # Higher order version of SeqAELSTSQ def __init__( self, dim_a, dim_m, alignment=False, ch_x=3, k=1.0, kernel_size=3, change_of_basis=False, predictive=True, bottom_width=4, n_blocks=3, n_order=2, *args, **kwargs): super(SeqAELSTSQ, self).__init__() self.dim_a = dim_a self.dim_m = dim_m self.predictive = predictive self.enc = ResNetEncoder( dim_a*dim_m, k=k, kernel_size=kernel_size, n_blocks=n_blocks) self.dec = ResNetDecoder( ch_x, k=k, kernel_size=kernel_size, bottom_width=bottom_width, n_blocks=n_blocks) self.dynamics_model = HigherOrderLinearTensorDynamicsLSTSQ( alignment=alignment, n_order=n_order) if change_of_basis: self.change_of_basis = nn.Parameter( torch.empty(dim_a, dim_a)) nn.init.eye_(self.change_of_basis) def loss(self, xs, return_reg_loss=True, T_cond=2, reconst=False): if reconst: raise NotImplementedError xs_cond = xs[:, :T_cond] xs_pred = self(xs_cond, predictive=self.predictive, return_reg_loss=return_reg_loss, n_rolls=xs.shape[1] - T_cond) if return_reg_loss: xs_pred, reg_losses = xs_pred xs_target = xs[:, T_cond:] if self.predictive else xs[:, 1:] loss = torch.mean( torch.sum((xs_target - xs_pred) ** 2, axis=[2, 3, 4])) return (loss, reg_losses) if return_reg_loss else loss def __call__(self, xs, n_rolls=1, fix_indices=None, predictive=True, return_reg_loss=False): # Encoded Latent. Num_ts x len_ts x dim_m x dim_a H = self.encode(xs) # ==Esitmate dynamics== ret = self.dynamics_model( H, return_loss=return_reg_loss, fix_indices=fix_indices) if return_reg_loss: # fn is a map by M_star. Loss is the training external loss fn, Ms, losses = ret else: fn, Ms = ret if predictive: Hs_last = [H[:, -1:]] + [M[:, -1:] for M in Ms] array = np.arange(n_rolls) else: Hs_last = [H[:, :1]] + [M[:, :1] for M in Ms] array = np.arange(xs.shape[1] + n_rolls - 1) # Create prediction for the unseen future H_preds = [] for _ in array: Hs_last = fn(Hs_last) H_preds.append(Hs_last[0]) H_preds = torch.cat(H_preds, axis=1) x_preds = self.decode(H_preds) if return_reg_loss: return x_preds, losses else: return x_preds def loss_equiv(self, xs, T_cond=5, reduce=False, return_generated_images=False): bsize = len(xs) xs_cond = xs[:, :T_cond] xs_target = xs[:, T_cond:] H = self.encode(xs_cond[:, -1:]) dyn_fn, Ms = self.dynamics_fn(xs_cond) H_last = [H] + [M[:, -1:] for M in Ms] H_preds = [] n_rolls = xs.shape[1] - T_cond for _ in np.arange(n_rolls): H_last = dyn_fn(H_last) H_preds.append(H_last[0]) H_pred = torch.cat(H_preds, axis=1) # swapping M dyn_fn.M = dyn_fn.M[torch.arange(-1, bsize-1)] Ms = [M[torch.arange(-1, bsize-1)] for M in Ms] H_last = [H] + [M[:, -1:] for M in Ms] H_preds_perm = [] for _ in np.arange(n_rolls): H_last = dyn_fn(H_last) H_preds_perm.append(H_last[0]) H_pred_perm = torch.cat(H_preds_perm, axis=1) xs_pred = self.decode(H_pred) xs_pred_perm = self.decode(H_pred_perm) loss = torch.sum((xs_target-xs_pred)**2, dim=(2,3,4)).detach().cpu().numpy() loss_perm = torch.sum((xs_target-xs_pred_perm)**2, dim=(2,3,4)).detach().cpu().numpy() if reduce: loss = torch.mean(loss) loss_perm = torch.mean(loss_perm) if return_generated_images: return (loss, loss_perm), (xs_pred, xs_pred_perm) else: return loss, loss_perm class SeqAEMultiLSTSQ(SeqAELSTSQ): def __init__( self, dim_a, dim_m, alignment=False, ch_x=3, k=1.0, kernel_size=3, change_of_basis=False, predictive=True, bottom_width=4, n_blocks=3, K=8, *args, **kwargs): super(SeqAELSTSQ, self).__init__() self.dim_a = dim_a self.dim_m = dim_m self.predictive = predictive self.K = K self.enc = ResNetEncoder( dim_a*dim_m, k=k, kernel_size=kernel_size, n_blocks=n_blocks) self.dec = ResNetDecoder( ch_x, k=k, kernel_size=kernel_size, bottom_width=bottom_width, n_blocks=n_blocks) self.dynamics_model = MultiLinearTensorDynamicsLSTSQ( dim_a, alignment=alignment, K=K) if change_of_basis: self.change_of_basis = nn.Parameter( torch.empty(dim_a, dim_a)) nn.init.eye_(self.change_of_basis) def get_blocks_of_M(self, xs): M = self.get_M(xs) blocks = [] for k in range(self.K): dim_block = self.dim_a // self.K blocks.append(M[:, k*dim_block:(k+1)*dim_block] [:, :, k*dim_block:(k+1)*dim_block]) blocks_of_M = torch.stack(blocks, 1) return blocks_of_M class SeqAENeuralM(SeqAELSTSQ): def __init__( self, dim_a, dim_m, ch_x=3, k=1.0, alignment=False, kernel_size=3, predictive=True, bottom_width=4, n_blocks=3, *args, **kwargs): super(SeqAELSTSQ, self).__init__() self.dim_a = dim_a self.dim_m = dim_m self.predictive = predictive self.alignment = alignment self.initial_scale_M = 0.01 self.enc = ResNetEncoder( dim_a*dim_m, k=k, kernel_size=kernel_size, n_blocks=n_blocks) self.M_net = ResNetEncoder( dim_a*dim_a, k=k, kernel_size=kernel_size, n_blocks=n_blocks) self.dec = ResNetDecoder( ch_x, k=k, kernel_size=kernel_size, n_blocks=n_blocks, bottom_width=bottom_width) def dynamics_fn(self, xs): M = self.get_M(xs) dyn_fn = dynamics_models.LinearTensorDynamicsLSTSQ.DynFn(M) return dyn_fn def get_M(self, xs): xs = rearrange(xs, 'n t c h w -> n (t c) h w') M = self.M_net(xs) M = rearrange(M, 'n (a_1 a_2) -> n a_1 a_2', a_1=self.dim_a) M = self.initial_scale_M * M return M def __call__(self, xs, n_rolls=1, return_reg_loss=False, predictive=True, reconst=False): # ==Esitmate dynamics== fn = self.dynamics_fn(xs) if reconst: H = self.encode(xs) if predictive: H_last = H[:, -1:] else: H_last = H[:, :1] else: H_last = self.encode(xs[:, -1:] if predictive else xs[:, :1]) if predictive: H_preds = [H] if reconst else [] array = np.arange(n_rolls) else: H_preds = [H[:, :1]] if reconst else [] array = np.arange(xs.shape[1] + n_rolls - 1) # Create prediction for the unseen future for _ in array: H_last = fn(H_last) H_preds.append(H_last) H_preds = torch.cat(H_preds, axis=1) x_preds = self.decode(H_preds) if return_reg_loss: losses = (dynamics_models.loss_bd(fn.M, self.alignment), dynamics_models.loss_orth(fn.M), 0) return x_preds, losses else: return x_preds class SeqAENeuralMLatentPredict(SeqAENeuralM): def __init__(self, dim_a, dim_m, ch_x=3, k=1.0, alignment=False, kernel_size=3, predictive=True, bottom_width=4, n_blocks=3, loss_latent_coeff=0, loss_pred_coeff=1.0, loss_reconst_coeff=0, normalize=True, *args, **kwargs): assert predictive super().__init__( dim_a=dim_a, dim_m=dim_m, ch_x=ch_x, k=k, alignment=alignment, kernel_size=kernel_size, predictive=predictive, bottom_width=bottom_width, n_blocks=n_blocks, ) self.loss_reconst_coeff = loss_reconst_coeff self.loss_pred_coeff = loss_pred_coeff self.loss_latent_coeff = loss_latent_coeff self.normalize = normalize def normalize_isotypic_copy(self, H): isotype_norm = torch.sqrt(torch.sum(H**2, axis=2, keepdims=True)) H = H / isotype_norm return H #encoding function with isotypic column normalization def encode(self, xs): H = super().encode(xs) if self.normalize: H = self.normalize_isotypic_copy(H) return H def latent_error(self, H_preds, H_target): latent_e = torch.mean(torch.sum((H_preds - H_target)**2, axis=(2,3))) return latent_e def obs_error(self, xs_1, xs_2): obs_e = torch.mean(torch.sum((xs_1 - xs_2)**2, axis=(2,3,4))) return obs_e def __call__(self, xs, n_rolls=1, T_cond=2, return_losses=False, return_reg_losses=False): xs_cond, xs_target =xs[:, :T_cond], xs[:, T_cond:] fn = self.dynamics_fn(xs_cond) H_cond, H_target = self.encode(xs_cond), self.encode(xs_target) H_last = H_cond[:, -1:] H_preds=[H_cond] array = np.arange(n_rolls) for _ in array: H_last = fn(H_last) H_preds.append(H_last) H_preds = torch.cat(H_preds, axis=1) xs_preds = self.decode(H_preds) ret = [xs_preds] if return_losses: losses = {} losses['loss_reconst'] = self.obs_error(xs_preds[:, :T_cond], xs_cond) if self.loss_reconst_coeff > 0 else torch.tensor([0]).to(xs.device) losses['loss_pred'] = self.obs_error(xs_preds[:, T_cond:], xs_target) if self.loss_pred_coeff > 0 else torch.tensor([0]).to(xs.device) losses['loss_latent'] = self.latent_error(H_preds[:, T_cond:], H_target) if self.loss_latent_coeff > 0 else torch.tensor([0]).to(xs.device) ret += [losses] if return_reg_losses: ret += [(dynamics_models.loss_bd(fn.M, self.alignment), dynamics_models.loss_orth(fn.M), 0)] return ret def loss(self, xs, return_reg_loss=True, T_cond=2, reconst=False): ret = self(xs, return_losses=True, return_reg_losses=return_reg_loss, T_cond= T_cond, n_rolls=xs.shape[1] - T_cond) if return_reg_loss: _, losses, reg_losses = ret else: _, losses = ret total_loss = self.loss_reconst_coeff * losses['loss_reconst'] \ + self.loss_pred_coeff * losses['loss_pred'] \ + self.loss_latent_coeff * losses['loss_latent'] return (total_loss, reg_losses) if return_reg_loss else total_loss class SeqAENeuralTransition(SeqAELSTSQ): def __init__( self, dim_a, dim_m, ch_x=3, k=1.0, kernel_size=3, T_cond=2, bottom_width=4, n_blocks=3, *args, **kwargs): super(SeqAELSTSQ, self).__init__() self.dim_a = dim_a self.dim_m = dim_m self.T_cond = T_cond self.enc = ResNetEncoder( dim_a*dim_m, k=k, kernel_size=kernel_size, n_blocks=n_blocks) self.ar = Conv1d1x1Encoder(dim_out=dim_a) self.dec = ResNetDecoder( ch_x, k=k, kernel_size=kernel_size, bottom_width=bottom_width, n_blocks=n_blocks) def loss(self, xs, return_reg_loss=False, T_cond=2, reconst=False): assert T_cond == self.T_cond xs_cond = xs[:, :T_cond] xs_pred = self(xs_cond, n_rolls=xs.shape[1] - T_cond, reconst=reconst) xs_target = xs if reconst else xs[:, T_cond:] loss = torch.mean( torch.sum((xs_target - xs_pred) ** 2, axis=[2, 3, 4])) if return_reg_loss: return loss, [torch.Tensor(np.array(0, dtype=np.float32)).to(xs.device)] * 3 else: return loss def get_M(self, xs): T = xs.shape[1] xs = rearrange(xs, 'n t c h w -> (n t) c h w') H = self.enc(xs) H = rearrange(H, '(n t) c -> n (t c)', t=T) return H def __call__(self, xs, n_rolls=1, reconst=False): # ==Esitmate dynamics== H = self.encode(xs) array = np.arange(n_rolls) H_preds = [H] if reconst else [] # Create prediction for the unseen future for _ in array: H_pred = self.ar(rearrange(H, 'n t s a -> n (t a) s')) H_pred = rearrange( H_pred, 'n (t a) s-> n t s a', t=1, a=self.dim_a) H_preds.append(H_pred) H = torch.cat([H[:, 1:], H_pred], dim=1) H_preds = torch.cat(H_preds, axis=1) # Prediction in the observation space return self.decode(H_preds) class CPC(SeqAELSTSQ): def __init__( self, dim_a, dim_m, k=1.0, kernel_size=3, temp=0.01, normalize=True, loss_type='cossim', n_blocks=3, *args, **kwargs): super(SeqAELSTSQ, self).__init__() self.dim_a = dim_a self.dim_m = dim_m self.normalize = normalize self.temp = temp self.loss_type = loss_type self.enc = ResNetEncoder( dim_a*dim_m, k=k, kernel_size=kernel_size, n_blocks=n_blocks) self.ar = Conv1d1x1Encoder(dim_out=dim_a*dim_m) def __call__(self, xs): H = self.encode(xs) # [n, t, s, a] # Create prediction for the unseen future H = rearrange(H, 'n t s a -> n (t a) s') # Obtain c in CPC H_pred = self.ar(H) # [n a s] H_pred = rearrange(H_pred, 'n a s -> n s a') return H_pred def get_M(self, xs): T = xs.shape[1] xs = rearrange(xs, 'n t c h w -> (n t) c h w') H = self.enc(xs) H = rearrange(H, '(n t) c -> n (t c)', t=T) return H def loss(self, xs, return_reg_loss=True, T_cond=2, reconst=False): T_pred = xs.shape[1] - T_cond assert T_pred == 1 # Encoded Latent. Num_ts x len_ts x dim_m x dim_a H = self.encode(xs) # [n, t, s, a] # Create prediction for the unseen future H_cond = H[:, :T_cond] H_cond = rearrange(H_cond, 'n t s a -> n (t a) s') # Obtain c in CPC H_pred = self.ar(H_cond) # [n a s] H_pred = rearrange(H_pred, 'n a s -> n s a') H_true = H[:, -1] # n s a H_true = rearrange(H_true, 'n s a -> n (s a)') H_pred = rearrange(H_pred, 'n s a -> n (s a)') loss = simclr([H_pred, H_true], self.temp, normalize=self.normalize, loss_type=self.loss_type) if return_reg_loss: reg_losses = [torch.Tensor(np.array(0, dtype=np.float32))]*3 return loss, reg_losses else: return loss
21,519
33.935065
151
py
meta_sequential_prediction
meta_sequential_prediction-main/models/base_networks.py
import numpy as np import torch from torch import nn from models.resblock import Block, Conv1d1x1Block from einops.layers.torch import Rearrange from einops import repeat class Conv1d1x1Encoder(nn.Sequential): def __init__(self, dim_out=16, dim_hidden=128, act=nn.ReLU()): super().__init__( nn.LazyConv1d(dim_hidden, 1, 1, 0), Conv1d1x1Block(dim_hidden, dim_hidden, act=act), Conv1d1x1Block(dim_hidden, dim_hidden, act=act), Rearrange('n c s -> n s c'), nn.LayerNorm((dim_hidden)), Rearrange('n s c-> n c s'), act, nn.LazyConv1d(dim_out, 1, 1, 0) ) class ResNetEncoder(nn.Module): def __init__(self, dim_latent=1024, k=1, act=nn.ReLU(), kernel_size=3, n_blocks=3): super().__init__() self.phi = nn.Sequential( nn.LazyConv2d(int(32 * k), 3, 1, 1), *[Block(int(32 * k) * (2 ** i), int(32 * k) * (2 ** (i+1)), int(32 * k) * (2 ** (i+1)), resample='down', activation=act, kernel_size=kernel_size) for i in range(n_blocks)], nn.GroupNorm(min(32, int(32 * k) * (2 ** n_blocks)), int(32 * k) * (2 ** n_blocks)), act) self.linear = nn.LazyLinear( dim_latent) if dim_latent > 0 else lambda x: x def __call__(self, x): h = x h = self.phi(h) h = h.reshape(h.shape[0], -1) h = self.linear(h) return h class ResNetDecoder(nn.Module): def __init__(self, ch_x, k=1, act=nn.ReLU(), kernel_size=3, bottom_width=4, n_blocks=3): super().__init__() self.bottom_width = bottom_width self.linear = nn.LazyLinear(int(32 * k) * (2 ** n_blocks)) self.net = nn.Sequential( *[Block(int(32 * k) * (2 ** (i+1)), int(32 * k) * (2 ** i), int(32 * k) * (2 ** i), resample='up', activation=act, kernel_size=kernel_size, posemb=True) for i in range(n_blocks-1, -1, -1)], nn.GroupNorm(min(32, int(32 * k)), int(32 * k)), act, nn.Conv2d(int(32 * k), ch_x, 3, 1, 1) ) def __call__(self, x): x = self.linear(x) x = repeat(x, 'n c -> n c h w', h=self.bottom_width, w=self.bottom_width) x = self.net(x) x = torch.sigmoid(x) return x
2,505
33.805556
125
py
meta_sequential_prediction
meta_sequential_prediction-main/models/resblock.py
import sys import os import math import torch import torch.nn as nn from torch.nn import functional as F from utils.weight_standarization import WeightStandarization, WeightStandarization1d import torch.nn.utils.parametrize as P from utils.emb2d import Emb2D def upsample_conv(x, conv): # Upsample -> Conv x = nn.Upsample(scale_factor=2, mode='nearest')(x) x = conv(x) return x def conv_downsample(x, conv): # Conv -> Downsample x = conv(x) h = F.avg_pool2d(x, 2) return h class Block(nn.Module): def __init__(self, in_channels, out_channels, hidden_channels=None, kernel_size=3, padding=None, activation=F.relu, resample=None, group_norm=True, skip_connection=True, posemb=False): super(Block, self).__init__() if padding is None: padding = (kernel_size-1) // 2 self.pe = Emb2D() if posemb else lambda x: x in_ch_conv = in_channels + self.pe.dim if posemb else in_channels self.skip_connection = skip_connection self.activation = activation self.resample = resample initializer = torch.nn.init.xavier_uniform_ if self.resample is None or self.resample == 'up': hidden_channels = out_channels if hidden_channels is None else hidden_channels else: hidden_channels = in_channels if hidden_channels is None else hidden_channels self.c1 = nn.Conv2d(in_ch_conv, hidden_channels, kernel_size=kernel_size, padding=padding) self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=kernel_size, padding=padding) initializer(self.c1.weight, math.sqrt(2)) initializer(self.c2.weight, math.sqrt(2)) P.register_parametrization( self.c1, 'weight', WeightStandarization()) P.register_parametrization( self.c2, 'weight', WeightStandarization()) if group_norm: self.b1 = nn.GroupNorm(min(32, in_channels), in_channels) self.b2 = nn.GroupNorm(min(32, hidden_channels), hidden_channels) else: self.b1 = self.b2 = lambda x: x if self.skip_connection: self.c_sc = nn.Conv2d(in_ch_conv, out_channels, kernel_size=1, padding=0) initializer(self.c_sc.weight) def residual(self, x): x = self.b1(x) x = self.activation(x) if self.resample == 'up': x = nn.Upsample(scale_factor=2, mode='nearest')(x) x = self.pe(x) x = self.c1(x) x = self.b2(x) x = self.activation(x) x = self.c2(x) if self.resample == 'down': x = F.avg_pool2d(x, 2) return x def shortcut(self, x): # Upsample -> Conv if self.resample == 'up': x = nn.Upsample(scale_factor=2, mode='nearest')(x) x = self.pe(x) x = self.c_sc(x) elif self.resample == 'down': x = self.pe(x) x = self.c_sc(x) x = F.avg_pool2d(x, 2) else: x = self.pe(x) x = self.c_sc(x) return x def __call__(self, x): if self.skip_connection: return self.residual(x) + self.shortcut(x) else: return self.residual(x) class Conv1d1x1Block(nn.Module): def __init__(self, in_channels, out_channels, hidden_channels=None, act=F.relu): super().__init__() self.act = act initializer = torch.nn.init.xavier_uniform_ hidden_channels = out_channels if hidden_channels is None else hidden_channels self.c1 = nn.Conv1d(in_channels, hidden_channels, 1, 1, 0) self.c2 = nn.Conv1d(hidden_channels, out_channels, 1, 1, 0) initializer(self.c1.weight, math.sqrt(2)) initializer(self.c2.weight, math.sqrt(2)) P.register_parametrization( self.c1, 'weight', WeightStandarization1d()) P.register_parametrization( self.c2, 'weight', WeightStandarization1d()) self.norm1 = nn.LayerNorm((in_channels)) self.norm2 = nn.LayerNorm((hidden_channels)) self.c_sc = nn.Conv1d(in_channels, out_channels, 1, 1, 0) initializer(self.c_sc.weight) def residual(self, x): x = self.norm1(x.transpose(-2, -1)).transpose(-2, -1) x = self.act(x) x = self.c1(x) x = self.norm2(x.transpose(-2, -1)).transpose(-2, -1) x = self.act(x) x = self.c2(x) return x def shortcut(self, x): x = self.c_sc(x) return x def __call__(self, x): return self.residual(x) + self.shortcut(x)
4,914
32.209459
90
py
meta_sequential_prediction
meta_sequential_prediction-main/models/simclr_models.py
import torch import torch.nn as nn from models.base_networks import ResNetEncoder from einops import rearrange class ResNetwProjHead(nn.Module): def __init__(self, dim_mlp=512, dim_head=128, k=1, act=nn.ReLU(), n_blocks=3): super().__init__() self.enc = ResNetEncoder( dim_latent=0, k=k, n_blocks=n_blocks) self.projhead = nn.Sequential( nn.LazyLinear(dim_mlp), act, nn.LazyLinear(dim_head)) def _encode_base(self, xs, enc): shape = xs.shape x = torch.reshape(xs, (shape[0] * shape[1], *shape[2:])) H = enc(x) H = torch.reshape(H, (shape[0], shape[1], *H.shape[1:])) return H def __call__(self, xs): return self._encode_base(xs, lambda x: self.projhead(self.enc(x))) def phi(self, xs): return self._encode_base(xs, self.enc.phi) def get_M(self, xs): T = xs.shape[1] xs = rearrange(xs, 'n t c h w -> (n t) c h w') H = self.enc(xs) H = rearrange(H, '(n t) c -> n (t c)', t=T) return H
1,078
28.162162
82
py
meta_sequential_prediction
meta_sequential_prediction-main/models/__init__.py
0
0
0
py
meta_sequential_prediction
meta_sequential_prediction-main/models/dynamics_models.py
import numpy as np import torch import torch.nn as nn from utils.laplacian import make_identity_like, tracenorm_of_normalized_laplacian, make_identity, make_diagonal import einops import pytorch_pfn_extras as ppe def _rep_M(M, T): return einops.repeat(M, "n a1 a2 -> n t a1 a2", t=T) def _loss(A, B): return torch.sum((A-B)**2) def _solve(A, B): ATA = A.transpose(-2, -1) @ A ATB = A.transpose(-2, -1) @ B return torch.linalg.solve(ATA, ATB) def loss_bd(M_star, alignment): # Block Diagonalization Loss S = torch.abs(M_star) STS = torch.matmul(S.transpose(-2, -1), S) if alignment: laploss_sts = tracenorm_of_normalized_laplacian( torch.mean(STS, 0)) else: laploss_sts = torch.mean( tracenorm_of_normalized_laplacian(STS), 0) return laploss_sts def loss_orth(M_star): # Orthogonalization of M I = make_identity_like(M_star) return torch.mean(torch.sum((I-M_star @ M_star.transpose(-2, -1))**2, axis=(-2, -1))) class LinearTensorDynamicsLSTSQ(nn.Module): class DynFn(nn.Module): def __init__(self, M): super().__init__() self.M = M def __call__(self, H): return H @ _rep_M(self.M, T=H.shape[1]) def inverse(self, H): M = _rep_M(self.M, T=H.shape[1]) return torch.linalg.solve(M, H.transpose(-2, -1)).transpose(-2, -1) def __init__(self, alignment=True): super().__init__() self.alignment = alignment def __call__(self, H, return_loss=False, fix_indices=None): # Regress M. # Note: backpropagation is disabled when fix_indices is not None. # H0.shape = H1.shape [n, t, s, a] H0, H1 = H[:, :-1], H[:, 1:] # num_ts x ([len_ts -1] * dim_s) x dim_a # The difference between the the time shifted components loss_internal_0 = _loss(H0, H1) ppe.reporting.report({ 'loss_internal_0': loss_internal_0.item() }) _H0 = H0.reshape(H0.shape[0], -1, H0.shape[-1]) _H1 = H1.reshape(H1.shape[0], -1, H1.shape[-1]) if fix_indices is not None: # Note: backpropagation is disabled. dim_a = _H0.shape[-1] active_indices = np.array(list(set(np.arange(dim_a)) - set(fix_indices))) _M_star = _solve(_H0[:, :, active_indices], _H1[:, :, active_indices]) M_star = make_identity(_H1.shape[0], _H1.shape[-1], _H1.device) M_star[:, active_indices[:, np.newaxis], active_indices] = _M_star else: M_star = _solve(_H0, _H1) dyn_fn = self.DynFn(M_star) loss_internal_T = _loss(dyn_fn(H0), H1) ppe.reporting.report({ 'loss_internal_T': loss_internal_T.item() }) # M_star is returned in the form of module, not the matrix if return_loss: losses = (loss_bd(dyn_fn.M, self.alignment), loss_orth(dyn_fn.M), loss_internal_T) return dyn_fn, losses else: return dyn_fn class HigherOrderLinearTensorDynamicsLSTSQ(LinearTensorDynamicsLSTSQ): class DynFn(nn.Module): def __init__(self, M): super().__init__() self.M = M def __call__(self, Hs): nHs = [None]*len(Hs) for l in range(len(Hs)-1, -1, -1): if l == len(Hs)-1: nHs[l] = Hs[l] @ _rep_M(self.M, Hs[l].shape[1]) else: nHs[l] = Hs[l] @ nHs[l+1] return nHs def __init__(self, alignment=True, n_order=2): super().__init__(alignment) self.n_order = n_order def __call__(self, H, return_loss=False, fix_indices=None): assert H.shape[1] > self.n_order # H0.shape = H1.shape [n, t, s, a] H0, Hn = H[:, :-self.n_order], H[:, self.n_order:] loss_internal_0 = _loss(H0, Hn) ppe.reporting.report({ 'loss_internal_0': loss_internal_0.item() }) Ms = [] _H = H if fix_indices is not None: raise NotImplementedError else: for n in range(self.n_order): # H0.shape = H1.shape [n, t, s, a] _H0, _H1 = _H[:, :-1], _H[:, 1:] if n == self.n_order - 1: _H0 = _H0.reshape(_H0.shape[0], -1, _H0.shape[-1]) _H1 = _H1.reshape(_H1.shape[0], -1, _H1.shape[-1]) _H = _solve(_H0, _H1) # [N, a, a] else: _H = _solve(_H0, _H1)[:, 1:] # [N, T-n, a, a] Ms.append(_H) dyn_fn = self.DynFn(Ms[-1]) loss_internal_T = _loss(dyn_fn([H0] + Ms[:-1])[0], Hn) ppe.reporting.report({ 'loss_internal_T': loss_internal_T.item() }) # M_star is returned in the form of module, not the matrix if return_loss: losses = (loss_bd(dyn_fn.M, self.alignment), loss_orth(dyn_fn.M), loss_internal_T) return dyn_fn, Ms[:-1], losses else: return dyn_fn, Ms[:-1] # The fixed block model class MultiLinearTensorDynamicsLSTSQ(LinearTensorDynamicsLSTSQ): def __init__(self, dim_a, alignment=True, K=4): super().__init__(alignment=alignment) self.dim_a = dim_a self.alignment = alignment assert dim_a % K == 0 self.K = K def __call__(self, H, return_loss=False, fix_indices=None): H0, H1 = H[:, :-1], H[:, 1:] # num_ts x ([len_ts -1] * dim_s) x dim_a # The difference between the the time shifted components loss_internal_0 = _loss(H0, H1) _H0 = H0.reshape(H.shape[0], -1, H.shape[3]) _H1 = H1.reshape(H.shape[0], -1, H.shape[3]) ppe.reporting.report({ 'loss_internal_0': loss_internal_0.item() }) M_stars = [] for k in range(self.K): if fix_indices is not None and k in fix_indices: M_stars.append(make_identity( H.shape[0], self.dim_a//self.K, H.device)) else: st = k*(self.dim_a//self.K) ed = (k+1)*(self.dim_a//self.K) M_stars.append(_solve(_H0[:, :, st:ed], _H1[:, :, st:ed])) # Contstruct block diagonals for k in range(self.K): if k == 0: M_star = M_stars[0] else: M1 = M_star M2 = M_stars[k] _M1 = torch.cat( [M1, torch.zeros(H.shape[0], M2.shape[1], M1.shape[2]).to(H.device)], axis=1) _M2 = torch.cat( [torch.zeros(H.shape[0], M1.shape[1], M2.shape[2]).to(H.device), M2], axis=1) M_star = torch.cat([_M1, _M2], axis=2) dyn_fn = self.DynFn(M_star) loss_internal_T = _loss(dyn_fn(H0), H1) ppe.reporting.report({ 'loss_internal_T': loss_internal_T.item() }) # M_star is returned in the form of module, not the matrix if return_loss: losses = (loss_bd(dyn_fn.M, self.alignment), loss_orth(dyn_fn.M), loss_internal_T) return dyn_fn, losses else: return dyn_fn
7,338
33.134884
111
py
meta_sequential_prediction
meta_sequential_prediction-main/datasets/three_dim_shapes.py
import numpy as np import torch import torchvision from collections import OrderedDict import os _FACTORS_IN_ORDER = ['floor_hue', 'wall_hue', 'object_hue', 'scale', 'shape', 'orientation'] _NUM_VALUES_PER_FACTOR = OrderedDict({'floor_hue': 10, 'wall_hue': 10, 'object_hue': 10, 'scale': 8, 'shape': 4, 'orientation': 15}) def get_index(factors): """ Converts factors to indices in range(num_data) Args: factors: np array shape [6,batch_size]. factors[i]=factors[i,:] takes integer values in range(_NUM_VALUES_PER_FACTOR[_FACTORS_IN_ORDER[i]]). Returns: indices: np array shape [batch_size]. """ indices = 0 base = 1 for factor, name in reversed(list(enumerate(_FACTORS_IN_ORDER))): indices += factors[factor] * base base *= _NUM_VALUES_PER_FACTOR[name] return indices class ThreeDimShapesDataset(object): default_active_actions = [0,1,2,3,5] def __init__(self, root, train=True, T=3, label_velo=False, transforms=torchvision.transforms.ToTensor(), active_actions=None, force_moving=False, shared_transition=False, rng=None): assert T <= 8 self.images = torch.load(os.path.join(root, '3dshapes/images.pt')).astype(np.float32) self.label_velo = label_velo self.train = train self.T = T self.transforms = transforms self.active_actions = self.default_active_actions if active_actions is None else active_actions self.force_moving = force_moving self.rng = rng if rng is not None else np.random self.shared_transition = shared_transition if self.shared_transition: self.init_shared_transition_parameters() def init_shared_transition_parameters(self): vs = {} for kv in _NUM_VALUES_PER_FACTOR.items(): key, value = kv[0], kv[1] vs[key] = self.gen_v(value) self.vs = vs def __len__(self): return 5000 def gen_pos(self, max_n, v): _x = np.abs(v) * (self.T-1) if v < 0: return self.rng.randint(_x, max_n) else: return self.rng.randint(0, max_n-_x) def gen_v(self, max_n): v = self.rng.randint(1 if self.force_moving else 0, max_n//self.T + 1) if self.rng.uniform() > 0.5: v = -v return v def gen_factors(self): # initial state p_and_v_list = [] sampled_indices = [] for action_index, kv in enumerate(_NUM_VALUES_PER_FACTOR.items()): key, value = kv[0], kv[1] if key == 'shape': p_and_v_list.append([0, 0]) if self.train: shape = self.rng.choice([0]) else: shape = self.rng.choice([1,2,3]) sampled_indices.append([shape]*self.T) else: if not(action_index in self.active_actions): v = 0 else: if self.shared_transition: v = self.vs[key] else: v = self.gen_v(value) p = self.gen_pos(value, v) p_and_v_list.append((p, v)) indices = [p + t * v for t in range(self.T)] sampled_indices.append(indices) return np.array(p_and_v_list, dtype=np.uint8), np.array(sampled_indices, dtype=np.uint8).T def __getitem__(self, i): p_and_v_list, sample_indices = self.gen_factors() imgs = [] for t in range(self.T): img = self.images[get_index(sample_indices[t])] / 255. img = self.transforms(img) imgs.append(img) if self.label_velo: return imgs, p_and_v_list[0][1][None], p_and_v_list[1][1][None], p_and_v_list[2][1][None], p_and_v_list[3][1][None], p_and_v_list[5][1][None] else: return imgs
4,023
33.991304
153
py
meta_sequential_prediction
meta_sequential_prediction-main/datasets/small_norb.py
import numpy as np import torch import torchvision from collections import OrderedDict import os import numpy as np _FACTORS_IN_ORDER = ['category', 'instance', 'lighting', 'elevation', 'azimuth'] _ELEV_V = [30, 35, 40, 45, 50, 55, 60, 65, 70] _AZIM_V = np.arange(0, 350, 20) assert len(_AZIM_V) == 18 _NUM_VALUES_PER_FACTOR = OrderedDict( {'category': 5, 'instance': 5, 'lighting': 6, 'elevation': 9, 'azimuth': 18}) def get_index(factors): """ Converts factors to indices in range(num_data) Args: factors: np array shape [6,batch_size]. factors[i]=factors[i,:] takes integer values in range(_NUM_VALUES_PER_FACTOR[_FACTORS_IN_ORDER[i]]). Returns: indices: np array shape [batch_size]. """ indices = 0 base = 1 for factor, name in reversed(list(enumerate(_FACTORS_IN_ORDER))): indices += factors[factor] * base base *= _NUM_VALUES_PER_FACTOR[name] return indices class SmallNORBDataset(object): default_active_actions = [3,4] def __init__(self, root, train=True, T=3, label=False, label_velo=False, force_moving=False, active_actions=None, transforms=torchvision.transforms.ToTensor(), shared_transition=False, rng=None): assert T <= 6 self.data = torch.load(os.path.join( root, 'smallNORB/train.pt' if train else 'smallNORB/test.pt')) self.label = label self.label_velo = label_velo print(self.data.shape) self.T = T self.transforms = transforms self.active_actions = self.default_active_actions if active_actions is None else active_actions self.force_moving = force_moving self.rng = rng if rng is not None else np.random self.shared_transition = shared_transition if self.shared_transition: self.init_shared_transition_parameters() def init_shared_transition_parameters(self): self.vs = {} for kv in _NUM_VALUES_PER_FACTOR.items(): key, value = kv[0], kv[1] self.vs[key] = self.gen_v(value) def __len__(self): return 5000 def gen_pos(self, max_n, v): _x = np.abs(v) * (self.T-1) if v < 0: return self.rng.randint(_x, max_n) else: return self.rng.randint(0, max_n-_x) def gen_v(self, max_n): v = self.rng.randint(1 if self.force_moving else 0, max_n//self.T + 1) if self.rng.uniform() > 0.5: v = -v return v def gen_factors(self): # initial state p_and_v_list = [] sampled_indices = [] for action_index, kv in enumerate(_NUM_VALUES_PER_FACTOR.items()): key, value = kv[0], kv[1] if key == 'category' or key == 'instance' or key == 'lighting': p_and_v_list.append([0, 0]) index = self.rng.randint(0, _NUM_VALUES_PER_FACTOR[key]) sampled_indices.append([index]*self.T) else: if not(action_index in self.active_actions): v = 0 else: if self.shared_transition: v = self.vs[key] else: v = self.gen_v(value) p = self.gen_pos(value, v) p_and_v_list.append((p, v)) indices = [p + t * v for t in range(self.T)] sampled_indices.append(indices) #print(p_and_v_list) return np.array(p_and_v_list, dtype=np.uint8), np.array(sampled_indices, dtype=np.uint8).T def __getitem__(self, i): p_and_v_list, sample_indices = self.gen_factors() imgs = [] for t in range(self.T): ind = sample_indices[t] img = self.data[ind[0], ind[1], ind[2], ind[3], ind[4]] img = img/255. img = self.transforms(img[:, :, None]) imgs.append(img) if self.T == 1: imgs = imgs[0] if self.label or self.label_velo: ret = [imgs] if self.label: ret += [sample_indices[0][0]] if self.label_velo: ret += [p_and_v_list[3][1][None], p_and_v_list[4][1][None]] return ret else: return imgs
4,475
32.402985
103
py
meta_sequential_prediction
meta_sequential_prediction-main/datasets/seq_mnist.py
import os import numpy as np import cv2 import torch import torchvision import math import colorsys from skimage.transform import resize from copy import deepcopy from utils.misc import get_RTmat from utils.misc import freq_to_wave class SequentialMNIST(): # Rotate around z axis only. default_active_actions = [0, 1, 2] def __init__( self, root, train=True, transforms=torchvision.transforms.ToTensor(), T=3, max_angle_velocity_ratio=[-0.5, 0.5], max_angle_accl_ratio=[-0.0, 0.0], max_color_velocity_ratio=[-0.5, 0.5], max_color_accl_ratio=[-0.0, 0.0], max_pos=[-10, 10], max_trans_accl=[-0.0, 0.0], label=False, label_velo=False, label_accl=False, active_actions=None, max_T=9, only_use_digit4=False, backgrnd=False, shared_transition=False, color_off=False, rng=None ): self.T = T self.max_T = max_T self.rng = rng if rng is not None else np.random self.transforms = transforms self.data = torchvision.datasets.MNIST(root, train, download=True) self.angle_velocity_range = (-max_angle_velocity_ratio, max_angle_velocity_ratio) if isinstance( max_angle_velocity_ratio, (int, float)) else max_angle_velocity_ratio self.color_velocity_range = (-max_color_velocity_ratio, max_color_velocity_ratio) if isinstance( max_color_velocity_ratio, (int, float)) else max_color_velocity_ratio self.angle_accl_range = (-max_angle_accl_ratio, max_angle_accl_ratio) if isinstance( max_angle_accl_ratio, (int, float)) else max_angle_accl_ratio self.color_accl_range = (-max_color_accl_ratio, max_color_accl_ratio) if isinstance( max_color_accl_ratio, (int, float)) else max_color_accl_ratio self.color_off = color_off self.max_pos = max_pos self.max_trans_accl = max_trans_accl self.label = label self.label_velo = label_velo self.label_accl = label_accl self.active_actions = self.default_active_actions if active_actions is None else active_actions if backgrnd: print(""" ============= background ON ============= """) fname = "MNIST/train_dat.pt" if train else "MNIST/test_dat.pt" self.backgrnd_data = torch.load(os.path.join(root, fname)) if only_use_digit4: datas = [] for pair in self.data: if pair[1] == 4: datas.append(pair) self.data = datas self.shared_transition = shared_transition if self.shared_transition: self.init_shared_transition_parameters() def init_shared_transition_parameters(self): self.angles_v = self.rng.uniform(math.pi * self.angle_velocity_range[0], math.pi * self.angle_velocity_range[1], size=1) self.angles_a = self.rng.uniform(math.pi * self.angle_accl_range[0], math.pi * self.angle_accl_range[1], size=1) self.color_v = 0.5 * self.rng.uniform(self.color_velocity_range[0], self.color_velocity_range[1], size=1) self.color_a = 0.5 * \ self.rng.uniform( self.color_accl_range[0], self.color_accl_range[1], size=1) pos0 = self.rng.uniform(self.max_pos[0], self.max_pos[1], size=[2]) pos1 = self.rng.uniform(self.max_pos[0], self.max_pos[1], size=[2]) self.pos_v = (pos1-pos0)/(self.max_T - 1) self.pos_a = self.rng.uniform( self.max_trans_accl[0], self.max_trans_accl[1], size=[2]) def __len__(self): return len(self.data) def __getitem__(self, i): image = np.array(self.data[i][0], np.float32).reshape(28, 28) image = resize(image, [24, 24]) image = cv2.copyMakeBorder( image, 4, 4, 4, 4, cv2.BORDER_CONSTANT, value=(0, 0, 0)) angles_0 = self.rng.uniform(0, 2 * math.pi, size=1) color_0 = self.rng.uniform(0, 1, size=1) pos0 = self.rng.uniform(self.max_pos[0], self.max_pos[1], size=[2]) pos1 = self.rng.uniform(self.max_pos[0], self.max_pos[1], size=[2]) if self.shared_transition: (angles_v, angles_a) = (self.angles_v, self.angles_a) (color_v, color_a) = (self.color_v, self.color_a) (pos_v, pos_a) = (self.pos_v, self.pos_a) else: angles_v = self.rng.uniform(math.pi * self.angle_velocity_range[0], math.pi * self.angle_velocity_range[1], size=1) angles_a = self.rng.uniform(math.pi * self.angle_accl_range[0], math.pi * self.angle_accl_range[1], size=1) color_v = 0.5 * self.rng.uniform(self.color_velocity_range[0], self.color_velocity_range[1], size=1) color_a = 0.5 * \ self.rng.uniform( self.color_accl_range[0], self.color_accl_range[1], size=1) pos_v = (pos1-pos0)/(self.max_T - 1) pos_a = self.rng.uniform( self.max_trans_accl[0], self.max_trans_accl[1], size=[2]) images = [] for t in range(self.T): angles_t = (0.5 * angles_a * t**2 + angles_v * t + angles_0) if 0 in self.active_actions else angles_0 color_t = ((0.5 * color_a * t**2 + t * color_v + color_0) % 1) if 1 in self.active_actions else color_0 pos_t = (0.5 * pos_a * t**2 + pos_v * t + pos0) if 2 in self.active_actions else pos0 mat = get_RTmat(0, 0, float(angles_t), 32, 32, pos_t[0], pos_t[1]) _image = cv2.warpPerspective(image.copy(), mat, (32, 32)) rgb = np.asarray(colorsys.hsv_to_rgb( color_t, 1, 1), dtype=np.float32) _image = np.concatenate( [_image[:, :, None]] * 3, axis=-1) * rgb[None, None] _image = _image / 255. if hasattr(self, 'backgrnd_data'): _imagemask = (np.sum(_image, axis=2, keepdims=True) < 3e-1) _image = torch.tensor( _image) + self.backgrnd_data[i].permute([1, 2, 0]) * (_imagemask) _image = np.array(torch.clip(_image, max=1.)) images.append(self.transforms(_image.astype(np.float32))) if self.label or self.label_velo: ret = [images] if self.label: ret += [self.data[i][1]] if self.label_velo: ret += [ freq_to_wave(angles_v.astype(np.float32)), freq_to_wave((2 * math.pi * color_v).astype(np.float32)), pos_v.astype(np.float32) ] if self.label_accl: ret += [ freq_to_wave(angles_a.astype(np.float32)), freq_to_wave((2 * math.pi * color_a).astype(np.float32)), pos_a.astype(np.float32) ] return ret else: return images
7,439
42.508772
104
py
meta_sequential_prediction
meta_sequential_prediction-main/datasets/__init__.py
0
0
0
py
meta_sequential_prediction
meta_sequential_prediction-main/utils/misc.py
import math import torch from torch import nn from einops import repeat import numpy as np def freq_to_wave(freq, is_radian=True): _freq_rad = 2 * math.pi * freq if not is_radian else freq return torch.hstack([torch.cos(_freq_rad), torch.sin(_freq_rad)]) def unsqueeze_at_the_end(x, n): return x[(...,) + (None,)*n] def get_RTmat(theta, phi, gamma, w, h, dx, dy): d = np.sqrt(h ** 2 + w ** 2) f = d / (2 * np.sin(gamma) if np.sin(gamma) != 0 else 1) # Projection 2D -> 3D matrix A1 = np.array([[1, 0, -w / 2], [0, 1, -h / 2], [0, 0, 1], [0, 0, 1]]) # Rotation matrices around the X, Y, and Z axis RX = np.array([[1, 0, 0, 0], [0, np.cos(theta), -np.sin(theta), 0], [0, np.sin(theta), np.cos(theta), 0], [0, 0, 0, 1]]) RY = np.array([[np.cos(phi), 0, -np.sin(phi), 0], [0, 1, 0, 0], [np.sin(phi), 0, np.cos(phi), 0], [0, 0, 0, 1]]) RZ = np.array([[np.cos(gamma), -np.sin(gamma), 0, 0], [np.sin(gamma), np.cos(gamma), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) # Composed rotation matrix with (RX, RY, RZ) R = np.dot(np.dot(RX, RY), RZ) # Translation matrix T = np.array([[1, 0, 0, dx], [0, 1, 0, dy], [0, 0, 1, f], [0, 0, 0, 1]]) # Projection 3D -> 2D matrix A2 = np.array([[f, 0, w / 2, 0], [0, f, h / 2, 0], [0, 0, 1, 0]]) return np.dot(A2, np.dot(T, np.dot(R, A1)))
1,664
29.272727
69
py
meta_sequential_prediction
meta_sequential_prediction-main/utils/yaml_utils.py
import os import sys import functools import argparse import yaml import pdb sys.path.append('../') sys.path.append('./') # Originally created by @msaito def load_module(fn, name): mod_name = os.path.splitext(os.path.basename(fn))[0] mod_path = os.path.dirname(fn) sys.path.insert(0, mod_path) return getattr(__import__(mod_name), name) def load_component(config): class_fn = load_module(config['fn'], config['name']) return class_fn(**config['args']) if 'args' in config.keys() else class_fn() def load_component_fxn(config): fxn = load_module(config['fn'], config['name']) return fxn def make_function(module, name): fxn = getattr(module, name) return fxn def make_instance(module, config=[], args=None): Class = getattr(module, config['name']) kwargs = config['args'] if args is not None: kwargs.update(args) return Class(**kwargs) ''' conbines multiple configs ''' def make_config(conf_dicts, attr_lists=None): def merge_dictionary(base, diff): for key, value in diff.items(): if (key in base and isinstance(base[key], dict) and isinstance(diff[key], dict)): merge_dictionary(base[key], diff[key]) else: base[key] = diff[key] config = {} for diff in conf_dicts: merge_dictionary(config, diff) if attr_lists is not None: for attr in attr_lists: module, new_value = attr.split('=') keys = module.split('.') target = functools.reduce(dict.__getitem__, keys[:-1], config) target[keys[-1]] = yaml.load(new_value) return config ''' argument parser that uses make_config ''' def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( 'infiles', nargs='+', type=argparse.FileType('r'), default=()) parser.add_argument('-a', '--attrs', nargs='*', default=()) parser.add_argument('-c', '--comment', default='') parser.add_argument('-w', '--warning', action='store_true') parser.add_argument('-o', '--output-config', default='') args = parser.parse_args() conf_dicts = [yaml.load(fp) for fp in args.infiles] config = make_config(conf_dicts, args.attrs) return config, args
2,281
25.534884
80
py
meta_sequential_prediction
meta_sequential_prediction-main/utils/clr.py
import torch import torch.nn as nn import torch.nn.functional as F import pytorch_pfn_extras as ppe def simclr(zs, temperature=1.0, normalize=True, loss_type='cossim'): if normalize: zs = [F.normalize(z, p=2, dim=1) for z in zs] m = len(zs) n = zs[0].shape[0] device = zs[0].device mask = torch.eye(n * m, device=device) label0 = torch.fmod(n + torch.arange(0, m * n, device=device), n * m) z = torch.cat(zs, 0) if loss_type == 'euclid': sim = - torch.cdist(z, z) elif loss_type == 'sq': sim = - torch.cdist(z, z) ** 2 elif loss_type == 'cossim': sim = torch.matmul(z, z.transpose(0, 1)) else: raise NotImplementedError logit_zz = sim / temperature logit_zz += mask * -1e8 loss = nn.CrossEntropyLoss()(logit_zz, label0) return loss
834
28.821429
73
py
meta_sequential_prediction
meta_sequential_prediction-main/utils/optimize_bd_cob.py
import torch import torch.nn as nn from einops import repeat from utils.laplacian import tracenorm_of_normalized_laplacian, make_identity_like def optimize_bd_cob(mats, batchsize=32, n_epochs=50, epochs_monitor=10): # Optimize change of basis matrix U by minimizing block diagonalization loss class ChangeOfBasis(torch.nn.Module): def __init__(self, d): super().__init__() self.U = nn.Parameter(torch.empty(d, d)) torch.nn.init.orthogonal_(self.U) def __call__(self, mat): _U = repeat(self.U, "a1 a2 -> n a1 a2", n=mat.shape[0]) n_mat = torch.linalg.solve(_U, mat) @ _U return n_mat change_of_basis = ChangeOfBasis(mats.shape[-1]).to(mats.device) dataloader = torch.utils.data.DataLoader( mats, batch_size=batchsize, shuffle=True, num_workers=0) optimizer = torch.optim.Adam(change_of_basis.parameters(), lr=0.1) for ep in range(n_epochs): total_loss, total_N = 0, 0 for mat in dataloader: n_mat = change_of_basis(mat) n_mat = torch.abs(n_mat) n_mat = torch.matmul(n_mat.transpose(-2, -1), n_mat) loss = torch.mean( tracenorm_of_normalized_laplacian(n_mat)) optimizer.zero_grad() loss.backward() optimizer.step() total_loss += loss.item() * mat.shape[0] total_N += mat.shape[0] if ((ep+1) % epochs_monitor) == 0: print('ep:{} loss:{}'.format(ep, total_loss/total_N)) return change_of_basis
1,579
37.536585
81
py
meta_sequential_prediction
meta_sequential_prediction-main/utils/emb2d.py
import math import numpy as np import torch import torch.nn as nn class Emb2D(nn.modules.lazy.LazyModuleMixin, nn.Module): def __init__(self, dim=64): super().__init__() self.dim = dim self.emb = torch.nn.parameter.UninitializedParameter() def __call__(self, x): if torch.nn.parameter.is_lazy(self.emb): _, h, w = x.shape[1:] self.emb.materialize((self.dim, h, w)) self.emb.data = positionalencoding2d(self.dim, h, w) emb = torch.tile(self.emb[None].to(x.device), [x.shape[0], 1, 1, 1]) x = torch.cat([x, emb], axis=1) return x # Copied from https://github.com/wzlxjtu/PositionalEncoding2D/blob/master/positionalembedding2d.py def positionalencoding2d(d_model, height, width): """ :param d_model: dimension of the model :param height: height of the positions :param width: width of the positions :return: d_model*height*width position matrix """ if d_model % 4 != 0: raise ValueError("Cannot use sin/cos positional encoding with " "odd dimension (got dim={:d})".format(d_model)) pe = torch.zeros(d_model, height, width) # Each dimension use half of d_model d_model = int(d_model / 2) div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model)) pos_w = torch.arange(0., width).unsqueeze(1) pos_h = torch.arange(0., height).unsqueeze(1) pe[0:d_model:2, :, :] = torch.sin( pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1) pe[1:d_model:2, :, :] = torch.cos( pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1) pe[d_model::2, :, :] = torch.sin( pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width) pe[d_model + 1::2, :, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width) return pe
1,949
36.5
98
py
meta_sequential_prediction
meta_sequential_prediction-main/utils/__init__.py
0
0
0
py
meta_sequential_prediction
meta_sequential_prediction-main/utils/laplacian.py
import torch import numpy as np from einops import repeat def make_identity(N, D, device): if N is None: return torch.Tensor(np.array(np.eye(D))).to(device) else: return torch.Tensor(np.array([np.eye(D)] * N)).to(device) def make_identity_like(A): assert A.shape[-2] == A.shape[-1] # Ensure A is a batch of squared matrices device = A.device shape = A.shape[:-2] eye = torch.eye(A.shape[-1], device=device)[(None,)*len(shape)] return eye.repeat(*shape, 1, 1) def make_diagonal(vecs): vecs = vecs[..., None].repeat(*([1,]*len(vecs.shape)), vecs.shape[-1]) return vecs * make_identity_like(vecs) # Calculate Normalized Laplacian def tracenorm_of_normalized_laplacian(A): D_vec = torch.sum(A, axis=-1) D = make_diagonal(D_vec) L = D - A inv_A_diag = make_diagonal( 1 / torch.sqrt(1e-10 + D_vec)) L = torch.matmul(inv_A_diag, torch.matmul(L, inv_A_diag)) sigmas = torch.linalg.svdvals(L) return torch.sum(sigmas, axis=-1)
1,012
28.794118
79
py
meta_sequential_prediction
meta_sequential_prediction-main/utils/weight_standarization.py
import torch.nn as nn import torch.nn.functional as F class WeightStandarization(nn.Module): def forward(self, weight): weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True) weight = weight - weight_mean std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5 weight = weight / std.expand_as(weight) return weight class WeightStandarization1d(nn.Module): def forward(self, weight): weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) weight = weight - weight_mean std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1) + 1e-5 weight = weight / std.expand_as(weight) return weight class WeightStandarization0d(nn.Module): def forward(self, weight): weight_mean = weight.mean(dim=1, keepdim=True) weight = weight - weight_mean std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1) + 1e-5 weight = weight / std.expand_as(weight) return weight
1,165
37.866667
81
py
MosfireDRP
MosfireDRP-master/setup.py
#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst import glob import os import sys import ah_bootstrap from setuptools import setup #A dirty hack to get around some early import/configurations ambiguities if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins builtins._ASTROPY_SETUP_ = True from astropy_helpers.setup_helpers import ( register_commands, adjust_compiler, get_debug_option, get_package_info) from astropy_helpers.git_helpers import get_git_devstr from astropy_helpers.version_helpers import generate_version_py # Get some values from the setup.cfg import configparser as config conf = config.ConfigParser() conf.read(['setup.cfg']) metadata = dict(conf.items('metadata')) PACKAGENAME = metadata.get('package_name', 'packagename') DESCRIPTION = metadata.get('description', 'Astropy affiliated package') AUTHOR = metadata.get('author', '') AUTHOR_EMAIL = metadata.get('author_email', '') LICENSE = metadata.get('license', 'unknown') URL = metadata.get('url', 'http://astropy.org') # Get the long description from the package's docstring __import__(PACKAGENAME) package = sys.modules[PACKAGENAME] LONG_DESCRIPTION = package.__doc__ # Store the package name in a built-in variable so it's easy # to get from other parts of the setup infrastructure builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME # VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386) VERSION = '1.0.dev' # Indicates if this version is a release version RELEASE = 'dev' not in VERSION if not RELEASE: VERSION += get_git_devstr(False) # Populate the dict of setup command overrides; this should be done before # invoking any other functionality from distutils since it can potentially # modify distutils' behavior. cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE) # Adjust the compiler in case the default on this platform is to use a # broken one. adjust_compiler(PACKAGENAME) # Freeze build information in version.py generate_version_py(PACKAGENAME, VERSION, RELEASE, get_debug_option(PACKAGENAME)) # Treat everything in scripts except README.rst as a script to be installed scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) if os.path.basename(fname) != 'README.rst'] # Get configuration information from all of the various subpackages. # See the docstring for setup_helpers.update_package_files for more # details. package_info = get_package_info() # Add the project-global data package_info['package_data'].setdefault(PACKAGENAME, []) package_info['package_data'][PACKAGENAME].append('data/*') # Define entry points for command-line scripts entry_points = {} entry_points['console_scripts'] = [ 'astropy-package-template-example = packagename.example_mod:main', ] # Include all .c files, recursively, including those generated by # Cython, since we can not do this in MANIFEST.in with a "dynamic" # directory name. c_files = [] for root, dirs, files in os.walk(PACKAGENAME): for filename in files: if filename.endswith('.c'): c_files.append( os.path.join( os.path.relpath(root, PACKAGENAME), filename)) package_info['package_data'][PACKAGENAME].extend(c_files) setup(name=PACKAGENAME, version=VERSION, description=DESCRIPTION, scripts=scripts, requires=['astropy'], install_requires=['astropy'], provides=[PACKAGENAME], author=AUTHOR, author_email=AUTHOR_EMAIL, license=LICENSE, url=URL, long_description=LONG_DESCRIPTION, cmdclass=cmdclassd, zip_safe=False, use_2to3=True, entry_points=entry_points, **package_info )
3,746
31.025641
79
py
MosfireDRP
MosfireDRP-master/ez_setup.py
#!python """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import os import shutil import sys import tempfile import tarfile import optparse import subprocess import platform from distutils import log try: from site import USER_SITE except ImportError: USER_SITE = None DEFAULT_VERSION = "1.4.2" DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/" def _python_cmd(*args): args = (sys.executable,) + args return subprocess.call(args) == 0 def _check_call_py24(cmd, *args, **kwargs): res = subprocess.call(cmd, *args, **kwargs) class CalledProcessError(Exception): pass if not res == 0: msg = "Command '%s' return non-zero exit status %d" % (cmd, res) raise CalledProcessError(msg) vars(subprocess).setdefault('check_call', _check_call_py24) def _install(tarball, install_args=()): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # installing log.warn('Installing Setuptools') if not _python_cmd('setup.py', 'install', *install_args): log.warn('Something went wrong during the installation.') log.warn('See the error message above.') # exitcode will be 2 return 2 finally: os.chdir(old_wd) shutil.rmtree(tmpdir) def _build_egg(egg, tarball, to_dir): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # building an egg log.warn('Building a Setuptools egg in %s', to_dir) _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) finally: os.chdir(old_wd) shutil.rmtree(tmpdir) # returning the result log.warn(egg) if not os.path.exists(egg): raise IOError('Could not build the egg.') def _do_download(version, download_base, to_dir, download_delay): egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg' % (version, sys.version_info[0], sys.version_info[1])) if not os.path.exists(egg): tarball = download_setuptools(version, download_base, to_dir, download_delay) _build_egg(egg, tarball, to_dir) sys.path.insert(0, egg) # Remove previously-imported pkg_resources if present (see # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). if 'pkg_resources' in sys.modules: del sys.modules['pkg_resources'] import setuptools setuptools.bootstrap_install_from = egg def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15): # making sure we use the absolute path to_dir = os.path.abspath(to_dir) was_imported = 'pkg_resources' in sys.modules or \ 'setuptools' in sys.modules try: import pkg_resources except ImportError: return _do_download(version, download_base, to_dir, download_delay) try: pkg_resources.require("setuptools>=" + version) return except pkg_resources.VersionConflict: e = sys.exc_info()[1] if was_imported: sys.stderr.write( "The required version of setuptools (>=%s) is not available,\n" "and can't be installed while this script is running. Please\n" "install a more recent version first, using\n" "'easy_install -U setuptools'." "\n\n(Currently using %r)\n" % (version, e.args[0])) sys.exit(2) else: del pkg_resources, sys.modules['pkg_resources'] # reload ok return _do_download(version, download_base, to_dir, download_delay) except pkg_resources.DistributionNotFound: return _do_download(version, download_base, to_dir, download_delay) def _clean_check(cmd, target): """ Run the command to download target. If the command fails, clean up before re-raising the error. """ try: subprocess.check_call(cmd) except subprocess.CalledProcessError: if os.access(target, os.F_OK): os.unlink(target) raise def download_file_powershell(url, target): """ Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete. """ target = os.path.abspath(target) cmd = [ 'powershell', '-Command', "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(), ] _clean_check(cmd, target) def has_powershell(): if platform.system() != 'Windows': return False cmd = ['powershell', '-Command', 'echo test'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_powershell.viable = has_powershell def download_file_curl(url, target): cmd = ['curl', url, '--silent', '--output', target] _clean_check(cmd, target) def has_curl(): cmd = ['curl', '--version'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_curl.viable = has_curl def download_file_wget(url, target): cmd = ['wget', url, '--quiet', '--output-document', target] _clean_check(cmd, target) def has_wget(): cmd = ['wget', '--version'] devnull = open(os.path.devnull, 'wb') try: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except: return False finally: devnull.close() return True download_file_wget.viable = has_wget def download_file_insecure(url, target): """ Use Python to download the file, even though it cannot authenticate the connection. """ try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen src = dst = None try: src = urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = src.read() dst = open(target, "wb") dst.write(data) finally: if src: src.close() if dst: dst.close() download_file_insecure.viable = lambda: True def get_best_downloader(): downloaders = [ download_file_powershell, download_file_curl, download_file_wget, download_file_insecure, ] for dl in downloaders: if dl.viable(): return dl def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. ``downloader_factory`` should be a function taking no arguments and returning a function for downloading a URL to a target. """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) tgz_name = "setuptools-%s.tar.gz" % version url = download_base + tgz_name saveto = os.path.join(to_dir, tgz_name) if not os.path.exists(saveto): # Avoid repeated downloads log.warn("Downloading %s", url) downloader = downloader_factory() downloader(url, saveto) return os.path.realpath(saveto) def _extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ import copy import operator from tarfile import ExtractError directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 448 # decimal for oct 0700 self.extract(tarinfo, path) # Reverse sort directories. if sys.version_info < (2, 4): def sorter(dir1, dir2): return cmp(dir1.name, dir2.name) directories.sort(sorter) directories.reverse() else: directories.sort(key=operator.attrgetter('name'), reverse=True) # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError: e = sys.exc_info()[1] if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def _build_install_args(options): """ Build the arguments to 'python setup.py install' on the setuptools package """ install_args = [] if options.user_install: if sys.version_info < (2, 6): log.warn("--user requires Python 2.6 or later") raise SystemExit(1) install_args.append('--user') return install_args def _parse_args(): """ Parse the command line for options """ parser = optparse.OptionParser() parser.add_option( '--user', dest='user_install', action='store_true', default=False, help='install in user site package (requires Python 2.6 or later)') parser.add_option( '--download-base', dest='download_base', metavar="URL", default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option( '--insecure', dest='downloader_factory', action='store_const', const=lambda: download_file_insecure, default=get_best_downloader, help='Use internal, non-validating downloader' ) options, args = parser.parse_args() # positional arguments are ignored return options def main(version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" options = _parse_args() tarball = download_setuptools(download_base=options.download_base, downloader_factory=options.downloader_factory) return _install(tarball, _build_install_args(options)) if __name__ == '__main__': sys.exit(main())
12,155
30.738903
87
py
MosfireDRP
MosfireDRP-master/ah_bootstrap.py
""" This bootstrap module contains code for ensuring that the astropy_helpers package will be importable by the time the setup.py script runs. It also includes some workarounds to ensure that a recent-enough version of setuptools is being used for the installation. This module should be the first thing imported in the setup.py of distributions that make use of the utilities in astropy_helpers. If the distribution ships with its own copy of astropy_helpers, this module will first attempt to import from the shipped copy. However, it will also check PyPI to see if there are any bug-fix releases on top of the current version that may be useful to get past platform-specific bugs that have been fixed. When running setup.py, use the ``--offline`` command-line option to disable the auto-upgrade checks. When this module is imported or otherwise executed it automatically calls a main function that attempts to read the project's setup.cfg file, which it checks for a configuration section called ``[ah_bootstrap]`` the presences of that section, and options therein, determine the next step taken: If it contains an option called ``auto_use`` with a value of ``True``, it will automatically call the main function of this module called `use_astropy_helpers` (see that function's docstring for full details). Otherwise no further action is taken (however, ``ah_bootstrap.use_astropy_helpers`` may be called manually from within the setup.py script). Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same names as the arguments to `use_astropy_helpers`, and can be used to configure the bootstrap script when ``auto_use = True``. See https://github.com/astropy/astropy-helpers for more details, and for the latest version of this module. """ import contextlib import errno import imp import io import locale import os import re import subprocess as sp import sys try: from ConfigParser import ConfigParser, RawConfigParser except ImportError: from configparser import ConfigParser, RawConfigParser if sys.version_info[0] < 3: _str_types = (str, unicode) _text_type = unicode PY3 = False else: _str_types = (str, bytes) _text_type = str PY3 = True # Some pre-setuptools checks to ensure that either distribute or setuptools >= # 0.7 is used (over pre-distribute setuptools) if it is available on the path; # otherwise the latest setuptools will be downloaded and bootstrapped with # ``ez_setup.py``. This used to be included in a separate file called # setuptools_bootstrap.py; but it was combined into ah_bootstrap.py try: import pkg_resources _setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7') # This may raise a DistributionNotFound in which case no version of # setuptools or distribute is properly installed _setuptools = pkg_resources.get_distribution('setuptools') if _setuptools not in _setuptools_req: # Older version of setuptools; check if we have distribute; again if # this results in DistributionNotFound we want to give up _distribute = pkg_resources.get_distribution('distribute') if _setuptools != _distribute: # It's possible on some pathological systems to have an old version # of setuptools and distribute on sys.path simultaneously; make # sure distribute is the one that's used sys.path.insert(1, _distribute.location) _distribute.activate() imp.reload(pkg_resources) except: # There are several types of exceptions that can occur here; if all else # fails bootstrap and use the bootstrapped version from ez_setup import use_setuptools use_setuptools() from distutils import log from distutils.debug import DEBUG # In case it didn't successfully import before the ez_setup checks import pkg_resources from setuptools import Distribution from setuptools.package_index import PackageIndex from setuptools.sandbox import run_setup # Note: The following import is required as a workaround to # https://github.com/astropy/astropy-helpers/issues/89; if we don't import this # module now, it will get cleaned up after `run_setup` is called, but that will # later cause the TemporaryDirectory class defined in it to stop working when # used later on by setuptools try: import setuptools.py31compat except ImportError: pass # TODO: Maybe enable checking for a specific version of astropy_helpers? DIST_NAME = 'astropy-helpers' PACKAGE_NAME = 'astropy_helpers' # Defaults for other options DOWNLOAD_IF_NEEDED = True INDEX_URL = 'https://pypi.python.org/simple' USE_GIT = True AUTO_UPGRADE = True def use_astropy_helpers(path=None, download_if_needed=None, index_url=None, use_git=None, auto_upgrade=None): """ Ensure that the `astropy_helpers` module is available and is importable. This supports automatic submodule initialization if astropy_helpers is included in a project as a git submodule, or will download it from PyPI if necessary. Parameters ---------- path : str or None, optional A filesystem path relative to the root of the project's source code that should be added to `sys.path` so that `astropy_helpers` can be imported from that path. If the path is a git submodule it will automatically be initialzed and/or updated. The path may also be to a ``.tar.gz`` archive of the astropy_helpers source distribution. In this case the archive is automatically unpacked and made temporarily available on `sys.path` as a ``.egg`` archive. If `None` skip straight to downloading. download_if_needed : bool, optional If the provided filesystem path is not found an attempt will be made to download astropy_helpers from PyPI. It will then be made temporarily available on `sys.path` as a ``.egg`` archive (using the ``setup_requires`` feature of setuptools. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. index_url : str, optional If provided, use a different URL for the Python package index than the main PyPI server. use_git : bool, optional If `False` no git commands will be used--this effectively disables support for git submodules. If the ``--no-git`` option is given at the command line the value of this argument is overridden to `False`. auto_upgrade : bool, optional By default, when installing a package from a non-development source distribution ah_boostrap will try to automatically check for patch releases to astropy-helpers on PyPI and use the patched version over any bundled versions. Setting this to `False` will disable that functionality. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. """ # True by default, unless the --offline option was provided on the command # line if '--offline' in sys.argv: download_if_needed = False auto_upgrade = False offline = True sys.argv.remove('--offline') else: offline = False if '--no-git' in sys.argv: use_git = False sys.argv.remove('--no-git') if path is None: path = PACKAGE_NAME if download_if_needed is None: download_if_needed = DOWNLOAD_IF_NEEDED if index_url is None: index_url = INDEX_URL # If this is a release then the .git directory will not exist so we # should not use git. git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git')) if use_git is None and not git_dir_exists: use_git = False if use_git is None: use_git = USE_GIT if auto_upgrade is None: auto_upgrade = AUTO_UPGRADE # Declared as False by default--later we check if astropy-helpers can be # upgraded from PyPI, but only if not using a source distribution (as in # the case of import from a git submodule) is_submodule = False if not isinstance(path, _str_types): if path is not None: raise TypeError('path must be a string or None') if not download_if_needed: log.debug('a path was not given and download from PyPI was not ' 'allowed so this is effectively a no-op') return elif not os.path.exists(path) or os.path.isdir(path): # Even if the given path does not exist on the filesystem, if it *is* a # submodule, `git submodule init` will create it is_submodule = _check_submodule(path, use_git=use_git, offline=offline) if is_submodule or os.path.isdir(path): log.info( 'Attempting to import astropy_helpers from {0} {1!r}'.format( 'submodule' if is_submodule else 'directory', path)) dist = _directory_import(path) else: dist = None if dist is None: msg = ( 'The requested path {0!r} for importing {1} does not ' 'exist, or does not contain a copy of the {1} package. ' 'Attempting download instead.'.format(path, PACKAGE_NAME)) if download_if_needed: log.warn(msg) else: raise _AHBootstrapSystemExit(msg) elif os.path.isfile(path): # Handle importing from a source archive; this also uses setup_requires # but points easy_install directly to the source archive try: dist = _do_download(find_links=[path]) except Exception as e: if download_if_needed: log.warn('{0}\nWill attempt to download astropy_helpers from ' 'PyPI instead.'.format(str(e))) dist = None else: raise _AHBootstrapSystemExit(e.args[0]) else: msg = ('{0!r} is not a valid file or directory (it could be a ' 'symlink?)'.format(path)) if download_if_needed: log.warn(msg) dist = None else: raise _AHBootstrapSystemExit(msg) if dist is not None and auto_upgrade and not is_submodule: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = _do_upgrade(dist, index_url) if upgrade is not None: dist = upgrade elif dist is None: # Last resort--go ahead and try to download the latest version from # PyPI try: if download_if_needed: log.warn( "Downloading astropy_helpers; run setup.py with the " "--offline option to force offline installation.") dist = _do_download(index_url=index_url) else: raise _AHBootstrapSystemExit( "No source for the astropy_helpers package; " "astropy_helpers must be available as a prerequisite to " "installing this package.") except Exception as e: if DEBUG: raise else: raise _AHBootstrapSystemExit(e.args[0]) if dist is not None: # Otherwise we found a version of astropy-helpers so we're done # Just activate the found distribibution on sys.path--if we did a # download this usually happens automatically but do it again just to # be sure # Note: Adding the dist to the global working set also activates it by # default pkg_resources.working_set.add(dist) def _do_download(version='', find_links=None, index_url=None): try: if find_links: allow_hosts = '' index_url = None else: allow_hosts = None # Annoyingly, setuptools will not handle other arguments to # Distribution (such as options) before handling setup_requires, so it # is not straightfoward to programmatically augment the arguments which # are passed to easy_install class _Distribution(Distribution): def get_option_dict(self, command_name): opts = Distribution.get_option_dict(self, command_name) if command_name == 'easy_install': if find_links is not None: opts['find_links'] = ('setup script', find_links) if index_url is not None: opts['index_url'] = ('setup script', index_url) if allow_hosts is not None: opts['allow_hosts'] = ('setup script', allow_hosts) return opts if version: req = '{0}=={1}'.format(DIST_NAME, version) else: req = DIST_NAME attrs = {'setup_requires': [req]} if DEBUG: dist = _Distribution(attrs=attrs) else: with _silence(): dist = _Distribution(attrs=attrs) # If the setup_requires succeeded it will have added the new dist to # the main working_set return pkg_resources.working_set.by_key.get(DIST_NAME) except Exception as e: if DEBUG: raise msg = 'Error retrieving astropy helpers from {0}:\n{1}' if find_links: source = find_links[0] elif index_url: source = index_url else: source = 'PyPI' raise Exception(msg.format(source, repr(e))) def _do_upgrade(dist, index_url): # Build up a requirement for a higher bugfix release but a lower minor # release (so API compatibility is guaranteed) # sketchy version parsing--maybe come up with something a bit more # robust for this major, minor = (int(part) for part in dist.parsed_version[:2]) next_minor = '.'.join([str(major), str(minor + 1), '0']) req = pkg_resources.Requirement.parse( '{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_minor)) package_index = PackageIndex(index_url=index_url) upgrade = package_index.obtain(req) if upgrade is not None: return _do_download(version=upgrade.version, index_url=index_url) def _directory_import(path): """ Import astropy_helpers from the given path, which will be added to sys.path. Must return True if the import succeeded, and False otherwise. """ # Return True on success, False on failure but download is allowed, and # otherwise raise SystemExit path = os.path.abspath(path) # Use an empty WorkingSet rather than the man pkg_resources.working_set, # since on older versions of setuptools this will invoke a VersionConflict # when trying to install an upgrade ws = pkg_resources.WorkingSet([]) ws.add_entry(path) dist = ws.by_key.get(DIST_NAME) if dist is None: # We didn't find an egg-info/dist-info in the given path, but if a # setup.py exists we can generate it setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): with _silence(): run_setup(os.path.join(path, 'setup.py'), ['egg_info']) for dist in pkg_resources.find_distributions(path, True): # There should be only one... return dist return dist def _check_submodule(path, use_git=True, offline=False): """ Check if the given path is a git submodule. See the docstrings for ``_check_submodule_using_git`` and ``_check_submodule_no_git`` for futher details. """ if use_git: return _check_submodule_using_git(path, offline) else: return _check_submodule_no_git(path) def _check_submodule_using_git(path, offline): """ Check if the given path is a git submodule. If so, attempt to initialize and/or update the submodule if needed. This function makes calls to the ``git`` command in subprocesses. The ``_check_submodule_no_git`` option uses pure Python to check if the given path looks like a git submodule, but it cannot perform updates. """ if PY3 and not isinstance(path, _text_type): fs_encoding = sys.getfilesystemencoding() path = path.decode(fs_encoding) try: p = sp.Popen(['git', 'submodule', 'status', '--', path], stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = p.communicate() except OSError as e: if DEBUG: raise if e.errno == errno.ENOENT: # The git command simply wasn't found; this is most likely the # case on user systems that don't have git and are simply # trying to install the package from PyPI or a source # distribution. Silently ignore this case and simply don't try # to use submodules return False else: raise _AHBoostrapSystemExit( 'An unexpected error occurred when running the ' '`git submodule status` command:\n{0}'.format(str(e))) # Can fail of the default locale is not configured properly. See # https://github.com/astropy/astropy/issues/2749. For the purposes under # consideration 'latin1' is an acceptable fallback. try: stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' except ValueError: # Due to an OSX oddity locale.getdefaultlocale() can also crash # depending on the user's locale/language settings. See: # http://bugs.python.org/issue18378 stdio_encoding = 'latin1' if p.returncode != 0 or stderr: # Unfortunately the return code alone cannot be relied on, as # earlier versions of git returned 0 even if the requested submodule # does not exist stderr = stderr.decode(stdio_encoding) # This is a warning that occurs in perl (from running git submodule) # which only occurs with a malformatted locale setting which can # happen sometimes on OSX. See again # https://github.com/astropy/astropy/issues/2749 perl_warning = ('perl: warning: Falling back to the standard locale ' '("C").') if not stderr.strip().endswith(perl_warning): # Some other uknown error condition occurred log.warn('git submodule command failed ' 'unexpectedly:\n{0}'.format(stderr)) return False stdout = stdout.decode(stdio_encoding) # The stdout should only contain one line--the status of the # requested submodule m = _git_submodule_status_re.match(stdout) if m: # Yes, the path *is* a git submodule _update_submodule(m.group('submodule'), m.group('status'), offline) return True else: log.warn( 'Unexpected output from `git submodule status`:\n{0}\n' 'Will attempt import from {1!r} regardless.'.format( stdout, path)) return False def _check_submodule_no_git(path): """ Like ``_check_submodule_using_git``, but simply parses the .gitmodules file to determine if the supplied path is a git submodule, and does not exec any subprocesses. This can only determine if a path is a submodule--it does not perform updates, etc. This function may need to be updated if the format of the .gitmodules file is changed between git versions. """ gitmodules_path = os.path.abspath('.gitmodules') if not os.path.isfile(gitmodules_path): return False # This is a minimal reader for gitconfig-style files. It handles a few of # the quirks that make gitconfig files incompatible with ConfigParser-style # files, but does not support the full gitconfig syntaix (just enough # needed to read a .gitmodules file). gitmodules_fileobj = io.StringIO() # Must use io.open for cross-Python-compatible behavior wrt unicode with io.open(gitmodules_path) as f: for line in f: # gitconfig files are more flexible with leading whitespace; just # go ahead and remove it line = line.lstrip() # comments can start with either # or ; if line and line[0] in (':', ';'): continue gitmodules_fileobj.write(line) gitmodules_fileobj.seek(0) cfg = RawConfigParser() try: cfg.readfp(gitmodules_fileobj) except Exception as exc: log.warn('Malformatted .gitmodules file: {0}\n' '{1} cannot be assumed to be a git submodule.'.format( exc, path)) return False for section in cfg.sections(): if not cfg.has_option(section, 'path'): continue submodule_path = cfg.get(section, 'path').rstrip(os.sep) if submodule_path == path.rstrip(os.sep): return True return False def _update_submodule(submodule, status, offline): if status == ' ': # The submodule is up to date; no action necessary return elif status == '-': if offline: raise _AHBootstrapSystemExit( "Cannot initialize the {0} submodule in --offline mode; this " "requires being able to clone the submodule from an online " "repository.".format(submodule)) cmd = ['update', '--init'] action = 'Initializing' elif status == '+': cmd = ['update'] action = 'Updating' if offline: cmd.append('--no-fetch') elif status == 'U': raise _AHBoostrapSystemExit( 'Error: Submodule {0} contains unresolved merge conflicts. ' 'Please complete or abandon any changes in the submodule so that ' 'it is in a usable state, then try again.'.format(submodule)) else: log.warn('Unknown status {0!r} for git submodule {1!r}. Will ' 'attempt to use the submodule as-is, but try to ensure ' 'that the submodule is in a clean state and contains no ' 'conflicts or errors.\n{2}'.format(status, submodule, _err_help_msg)) return err_msg = None cmd = ['git', 'submodule'] + cmd + ['--', submodule] log.warn('{0} {1} submodule with: `{2}`'.format( action, submodule, ' '.join(cmd))) try: p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = p.communicate() except OSError as e: err_msg = str(e) else: if p.returncode != 0: stderr_encoding = locale.getdefaultlocale()[1] err_msg = stderr.decode(stderr_encoding) if err_msg: log.warn('An unexpected error occurred updating the git submodule ' '{0!r}:\n{1}\n{2}'.format(submodule, err_msg, _err_help_msg)) class _DummyFile(object): """A noop writeable object.""" errors = '' # Required for Python 3.x encoding = 'utf-8' def write(self, s): pass def flush(self): pass @contextlib.contextmanager def _silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() exception_occurred = False try: yield except: exception_occurred = True # Go ahead and clean up so that exception handling can work normally sys.stdout = old_stdout sys.stderr = old_stderr raise if not exception_occurred: sys.stdout = old_stdout sys.stderr = old_stderr _err_help_msg = """ If the problem persists consider installing astropy_helpers manually using pip (`pip install astropy_helpers`) or by manually downloading the source archive, extracting it, and installing by running `python setup.py install` from the root of the extracted source code. """ class _AHBootstrapSystemExit(SystemExit): def __init__(self, *args): if not args: msg = 'An unknown problem occurred bootstrapping astropy_helpers.' else: msg = args[0] msg += '\n' + _err_help_msg super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:]) if sys.version_info[:2] < (2, 7): # In Python 2.6 the distutils log does not log warnings, errors, etc. to # stderr so we have to wrap it to ensure consistency at least in this # module import distutils class log(object): def __getattr__(self, attr): return getattr(distutils.log, attr) def warn(self, msg, *args): self._log_to_stderr(distutils.log.WARN, msg, *args) def error(self, msg): self._log_to_stderr(distutils.log.ERROR, msg, *args) def fatal(self, msg): self._log_to_stderr(distutils.log.FATAL, msg, *args) def log(self, level, msg, *args): if level in (distutils.log.WARN, distutils.log.ERROR, distutils.log.FATAL): self._log_to_stderr(level, msg, *args) else: distutils.log.log(level, msg, *args) def _log_to_stderr(self, level, msg, *args): # This is the only truly 'public' way to get the current threshold # of the log current_threshold = distutils.log.set_threshold(distutils.log.WARN) distutils.log.set_threshold(current_threshold) if level >= current_threshold: if args: msg = msg % args sys.stderr.write('%s\n' % msg) sys.stderr.flush() log = log() # Output of `git submodule status` is as follows: # # 1: Status indicator: '-' for submodule is uninitialized, '+' if submodule is # initialized but is not at the commit currently indicated in .gitmodules (and # thus needs to be updated), or 'U' if the submodule is in an unstable state # (i.e. has merge conflicts) # # 2. SHA-1 hash of the current commit of the submodule (we don't really need # this information but it's useful for checking that the output is correct) # # 3. The output of `git describe` for the submodule's current commit hash (this # includes for example what branches the commit is on) but only if the # submodule is initialized. We ignore this information for now _git_submodule_status_re = re.compile( '^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) (?P<submodule>\S+)( .*)?$') # Implement the auto-use feature; this allows use_astropy_helpers() to be used # at import-time automatically so long as the correct options are specified in # setup.cfg _CFG_OPTIONS = [('auto_use', bool), ('path', str), ('download_if_needed', bool), ('index_url', str), ('use_git', bool), ('auto_upgrade', bool)] def _main(): if not os.path.exists('setup.cfg'): return cfg = ConfigParser() try: cfg.read('setup.cfg') except Exception as e: if DEBUG: raise log.error( "Error reading setup.cfg: {0!r}\nastropy_helpers will not be " "automatically bootstrapped and package installation may fail." "\n{1}".format(e, _err_help_msg)) return if not cfg.has_section('ah_bootstrap'): return kwargs = {} for option, type_ in _CFG_OPTIONS: if not cfg.has_option('ah_bootstrap', option): continue if type_ is bool: value = cfg.getboolean('ah_bootstrap', option) else: value = cfg.get('ah_bootstrap', option) kwargs[option] = value if kwargs.pop('auto_use', False): use_astropy_helpers(**kwargs) _main()
27,951
35.538562
84
py
MosfireDRP
MosfireDRP-master/drivers/Driver.py
import os, time, logging import MOSFIRE from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify, Wavelength from MOSFIRE.MosfireDrpLog import info, debug, warning, error logger = logging.getLogger(__name__) import numpy as np from matplotlib import pyplot as pl try: from astropy.io import fits as pf except: import pyfits as pf np.seterr(all='ignore') flatops = Options.flat waveops = Options.wavelength #Driver file automatically generated on Sat Jul 25 17:33:42 2015 #For questions and comments, email mosfiredrp@gmail.com, submit a ticket on the ticketing system, or contact Luca Rizzi @ WMKO maskname = 'maskname' band = 'band' #Set noninteractive to True to autofit wavelenth solution instead of manually fitting. noninteractiveflag=False obsfiles=['Offset_1.25.txt','Offset_-1.25.txt'] Flats.handle_flats('Flat.txt', maskname, band, flatops) Wavelength.imcombine(obsfiles, maskname, band, waveops) Wavelength.fit_lambda_interactively(maskname, band, obsfiles,waveops, noninteractive=noninteractiveflag) Wavelength.fit_lambda(maskname, band, obsfiles, obsfiles,waveops) Wavelength.apply_lambda_simple(maskname, band, obsfiles, waveops) # modify this variable to point to the correct wavelength file created on the previous step Wavelength_file = 'lambda_solution_wave_stack_H_m141130_0323-0338.fits' Background.handle_background(obsfiles,Wavelength_file,maskname,band,waveops) redfiles = ["eps_" + file + ".fits" for file in obsfiles] Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles,waveops)
1,568
37.268293
126
py
MosfireDRP
MosfireDRP-master/drivers/K_driver.py
import os, time, logging import MOSFIRE from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify, Wavelength from MOSFIRE.MosfireDrpLog import info, debug, warning, error logger = logging.getLogger(__name__) import numpy as np from matplotlib import pyplot as pl try: from astropy.io import fits as pf except: import pyfits as pf np.seterr(all='ignore') flatops = Options.flat waveops = Options.wavelength #Driver file automatically generated on Sat Jul 25 17:46:43 2015 #For questions and comments, email mosfiredrp@gmail.com, submit a ticket on the ticketing system, or contact Luca Rizzi @ WMKO maskname = 'maskname' band = 'band' #Set noninteractive to True to autofit wavelenth solution instead of manually fitting. noninteractiveflag=False obsfiles=['Offset_1.25.txt','Offset_-1.25.txt'] Flats.handle_flats('Flat.txt', maskname, band, flatops,lampOffList='FlatThermal.txt') Wavelength.imcombine(obsfiles, maskname, band, waveops) # if you have Ar Wavelength.imcombine('Ar.txt', maskname, band, waveops) # if you have Ne Wavelength.imcombine('Ne.txt', maskname, band, waveops) Wavelength.fit_lambda_interactively(maskname, band, obsfiles,waveops, noninteractive=noninteractiveflag) Wavelength.apply_interactive(maskname, band, waveops, apply=obsfiles, to='Ar.txt', argon=True) Wavelength.apply_interactive(maskname, band, waveops, apply=obsfiles, to='Ne.txt', neon=True) Wavelength.fit_lambda(maskname, band, obsfiles, obsfiles,waveops) Wavelength.fit_lambda(maskname, band, 'Ne.txt', 'Ne.txt',waveops, wavenames2='Ar.txt') LROI = [[21000,22800]]*1 LROIs = Wavelength.check_wavelength_roi(maskname, band, obsfiles, 'Ne.txt', LROI, waveops) Wavelength.apply_lambda_simple(maskname, band, obsfiles, waveops) Wavelength.apply_lambda_sky_and_arc(maskname, band, obsfiles, 'Ne.txt', LROIs, waveops) Wavelength_file = 'merged_lambda_solution_wave_stack_K_*.fits' Background.handle_background(obsfiles,Wavelength_file,maskname,band,waveops) redfiles = ["eps_" + file + ".fits" for file in obsfiles] Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles,waveops)
2,127
39.150943
126
py
MosfireDRP
MosfireDRP-master/drivers/Long2pos.py
import os, time, logging import MOSFIRE from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify, Wavelength from MOSFIRE.MosfireDrpLog import info, debug, warning, error logger = logging.getLogger(__name__) import numpy as np from matplotlib import pyplot as pl try: from astropy.io import fits as pf except: import pyfits as pf np.seterr(all='ignore') flatops = Options.flat waveops = Options.wavelength #Driver file automatically generated on Wed Jul 29 15:04:02 2015 #For questions and comments, email mosfiredrp@gmail.com, submit a ticket on the ticketing system, or contact Luca Rizzi @ WMKO # THIS DRIVER IS SETUP TO REDUCE A STAR CALLED HIP85871_7.25, change this string on the entried below maskname = 'long2pos_specphot (align)' band = 'H' #Set noninteractive to True to autofit wavelenth solution instead of manually fitting. noninteractiveflag=False # these are the narrow slits obsfiles_posCnarrow = ['Offset_-21_HIP85871_7.25_PosC.txt', 'Offset_-7_HIP85871_7.25_PosC.txt'] target_posCnarrow = "HIP85871_7.25_POSC_NARROW" IO.fix_long2pos_headers(obsfiles_posCnarrow) obsfiles_posAnarrow = ['Offset_7_HIP85871_7.25_PosA.txt', 'Offset_21_HIP85871_7.25_PosA.txt'] target_posAnarrow = "HIP85871_7.25_POSA_NARROW" IO.fix_long2pos_headers(obsfiles_posAnarrow) # these are the wide slits, comment out if you are not using specphot obsfiles_posCwide = ['Offset_-14_HIP85871_7.25_PosC.txt', 'Offset_-7_HIP85871_7.25_PosC.txt'] target_posCwide = "HIP85871_7.25_POSC_WIDE" IO.fix_long2pos_headers(obsfiles_posCwide) obsfiles_posAwide = ['Offset_14_HIP85871_7.25_PosA.txt', 'Offset_21_HIP85871_7.25_PosA.txt'] target_posAwide = "HIP85871_7.25_POSA_WIDE" IO.fix_long2pos_headers(obsfiles_posAwide) # Note: for long2pos, the row position is ignored, and the middle point of the slit is used longslit = {'yrange': [[1062,1188],[887,1010]], 'row_position': 0, 'mode':'long2pos'} Flats.handle_flats('Flat.txt', maskname, band, flatops,longslit=longslit) # in this case, we are using the argon lines. # replace this with neon=['Ne.txt'] if you prefer to use Ne, and edit the following lines accordingly argon = ['Ar.txt'] Wavelength.imcombine(argon, maskname, band, waveops) Wavelength.fit_lambda_interactively(maskname, band, argon,waveops,longslit=longslit, argon=True, noninteractive=noninteractiveflag) Wavelength.fit_lambda(maskname, band, argon,argon,waveops,longslit=longslit) Wavelength.apply_lambda_simple(maskname, band, argon, waveops, longslit=longslit, smooth=True) # make sure you use the correct wavelength file generated before Wavelength_file = 'lambda_solution_wave_stack_H_m150428_0091-0091.fits' # narrow Background.handle_background(obsfiles_posAnarrow,Wavelength_file,maskname,band,waveops, target=target_posAnarrow) Background.handle_background(obsfiles_posCnarrow,Wavelength_file,maskname,band,waveops, target=target_posCnarrow) # wide Background.handle_background(obsfiles_posAwide,Wavelength_file,maskname,band,waveops, target=target_posAwide) Background.handle_background(obsfiles_posCwide,Wavelength_file,maskname,band,waveops, target=target_posCwide) # narrow redfiles = ["eps_" + file + ".fits" for file in obsfiles_posAnarrow] Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles_posAnarrow,waveops, target=target_posAnarrow) redfiles = ["eps_" + file + ".fits" for file in obsfiles_posCnarrow] Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles_posCnarrow,waveops, target=target_posCnarrow) # wide redfiles = ["eps_" + file + ".fits" for file in obsfiles_posAwide] redfiles = [redfiles[0]] Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles_posAwide,waveops, target=target_posAwide) redfiles = ["eps_" + file + ".fits" for file in obsfiles_posCwide] redfiles = [redfiles[0]] Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles_posCwide,waveops, target=target_posCwide)
3,944
48.936709
131
py
MosfireDRP
MosfireDRP-master/drivers/Longslit_driver.py
import os, time, logging import MOSFIRE from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify, Wavelength from MOSFIRE.MosfireDrpLog import info, debug, warning, error logger = logging.getLogger(__name__) import numpy as np from matplotlib import pyplot as pl try: from astropy.io import fits as pf except: import pyfits as pf np.seterr(all='ignore') flatops = Options.flat waveops = Options.wavelength #Driver file automatically generated on Wed Jul 29 15:11:22 2015 #For questions and comments, email mosfiredrp@gmail.com, submit a ticket on the ticketing system, or contact Luca Rizzi @ WMKO maskname = 'LONGSLIT-3x0.7' band = 'H' #Set noninteractive to True to autofit wavelenth solution instead of manually fitting. noninteractiveflag=False # modify the target name to match your observations obsfiles=['Offset_5_HIP17971.txt','Offset_-5_HIP17971.txt'] target="HIP17971" # modify the yrange to match the size of your longslit # row position is the extraction line used for the initial wavelength solution. It should be away from your target longslit = {'yrange':[968,1100],'row_position':1034,'mode':'longslit'} Flats.handle_flats('Flat.txt', maskname, band, flatops,longslit=longslit) Wavelength.imcombine(obsfiles, maskname, band, waveops) Wavelength.fit_lambda_interactively(maskname, band, obsfiles,waveops,longslit=longslit, noninteractive=noninteractiveflag) Wavelength.fit_lambda(maskname, band, obsfiles, obsfiles,waveops,longslit=longslit) Wavelength.apply_lambda_simple(maskname, band, obsfiles, waveops,longslit=longslit) # make sure you use the file generated on the previous step Wavelength_file = 'lambda_solution_wave_stack_H_m121227_0162-0311.fits' Background.handle_background(obsfiles,Wavelength_file,maskname,band,waveops,target=target) redfiles = ["eps_" + file + ".fits" for file in obsfiles] Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles,waveops, target=target)
1,964
40.808511
126
py
MosfireDRP
MosfireDRP-master/drivers/Long2pos_K_driver.py
# Help, bugs to: http://mosfire.googlecode.com # # Instructions # 1. edit band = '' to band = 'Y' or 'J' or 'H' or 'K' # e.g. band = 'J' # 2. edit [709, 1350] to be the pixel values at the beginning and end # of the long slit. Look at the raw data. # 3. edit row_position to be a location where the standard star is not. # 4. Decide if you want to use sky lines or Neon lamps for lambda calibration # 5. Uncomment one line at a time and run mospy on the driver file # import os, time import MOSFIRE from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify from MOSFIRE import Wavelength, Longslit import numpy as np from matplotlib import pyplot as pl try: from astropy.io import fits as pf except: import pyfits as pf np.seterr(all="ignore") maskname = 'long2pos' band = 'K' flatops = Options.flat waveops = Options.wavelength # Note: for long2pos, the row position is ignored, and the middle point of the slit is used longslit = {'yrange': [[1062,1188],[887,1010]], 'row_position': 0, 'mode':'long2pos'} # SETUP FILES FOR DATES before June 10, 2015 #obsfiles_posC_narrow = ['Offset_-21_HIP85871_7.25.txt', 'Offset_-7_HIP85871_7.25.txt'] #targetCnarrow = "HIP85871_posC_narrow" #obsfiles_posA_narrow = ['Offset_7_HIP85871_7.25.txt', 'Offset_21_HIP85871_7.25.txt'] #targetAnarrow = "HIP85871_posA_narrow" #obsfiles_posC_wide = ['Offset_-14_HIP85871_7.25.txt','Offset_-7_HIP85871_7.25.txt'] #targetCwide = "HIP85871_posC_wide" #obsfiles_posA_wide = ['Offset_14_HIP85871_7.25.txt','Offset_21_HIP85871_7.25.txt'] #targetAwide = "HIP85871_posA_wide" # SETUP FILES for DATES after June 10,2015 obsfiles_posC_narrow = ['Offset_7_FS134_posC.txt','Offset_-7_FS134_PosC.txt'] targetCnarrow = "FS134_posC_narrow" obsfiles_posA_narrow = ['Offset_7_FS134_posA.txt','Offset_-7_FS134_PosA.txt'] targetAnarrow = "FS134_posA_narrow" # Argon files argon = ['Ar.txt'] #Flats.handle_flats('Flat.txt', maskname, band, flatops, longslit = longslit) #Flats.handle_flats('Flat.txt', maskname, band, flatops,lampOffList='FlatThermal.txt', longslit=longslit) # Uses the argon calibration taken in the afternoon with long2pos for the wavelength calibration #Wavelength.imcombine(argon, maskname, band, waveops) #Wavelength.fit_lambda_interactively(maskname, band, argon, waveops, longslit=longslit, argon=True) #Wavelength.fit_lambda(maskname, band, argon, argon, waveops, longslit=longslit) #Wavelength.apply_lambda_simple(maskname, band, argon, waveops, longslit=longslit, smooth=True) ########### NARROW SLITS ############ # Long2POS: NARROW SLITS: use the -7 and -21 for posC and 7 and 21 for posA obsfiles = obsfiles_posC_narrow target = targetCnarrow #IO.fix_long2pos_headers(obsfiles) #Background.handle_background(obsfiles, # 'lambda_solution_wave_stack_K_m150610_0168-0170.fits', # maskname, band, waveops, plan=[["A","B"]], target=target) redfiles = ["eps_" + file + ".fits" for file in obsfiles] #update the "lambda_solution_wave_stack_K*.fits" file name # to the output file name from the apply_lambda process above. # Update the name of the first file in the offset file (use the full path name. # e.g. "/Users/user1/MOSFIRE/DRP_CODE/DATA/2014may08/m130114_0451.fits", #Rectify.handle_rectification(maskname, redfiles, # "lambda_solution_wave_stack_K_m150610_0168-0170.fits", # band, # obsfiles, # waveops, target=target) obsfiles = obsfiles_posA_narrow target = targetAnarrow #IO.fix_long2pos_headers(obsfiles) #Background.handle_background(obsfiles, # 'lambda_solution_wave_stack_K_m150610_0168-0170.fits', # maskname, band, waveops, plan=[["A","B"]], target=target) redfiles = ["eps_" + file + ".fits" for file in obsfiles] #update the "lambda_solution_wave_stack_K*.fits" file name # to the output file name from the apply_lambda process above. # Update the name of the first file in the offset file (use the full path name. # e.g. "/Users/user1/MOSFIRE/DRP_CODE/DATA/2014may08/m130114_0451.fits", Rectify.handle_rectification(maskname, redfiles, "lambda_solution_wave_stack_K_m150610_0168-0170.fits", band, obsfiles, waveops, target=target) ########### WIDE SLITS ############ # SPECTROPHOTOMETRIC Long2POS: THIS SECTION IS FOR THE REDUCTION OF THE WIDE SLITS. #obsfiles = obsfiles_posC_wide #target = targetCwide #IO.fix_long2pos_headers(obsfiles) #Background.handle_background(obsfiles, # 'lambda_solution_wave_stack_H_m150428_0091-0091.fits', # maskname, band, waveops, plan=[["A","B"]], target=target) #obsfiles = obsfiles_posA_wide #target = targetAwide #IO.fix_long2pos_headers(obsfiles) #Background.handle_background(obsfiles, # 'lambda_solution_wave_stack_H_m150428_0091-0091.fits', # maskname, band, waveops, plan=[["A","B"]], target=target) # NEON # Use neon for wavelength calibrations #Wavelength.imcombine('Ne.txt', maskname, band, waveops) #Wavelength.fit_lambda_interactively(maskname, band, 'Ne.txt', waveops, longslit=longslit, neon=True) #Wavelength.fit_lambda(maskname, band, 'Ne.txt', 'Ne.txt', waveops, longslit=longslit) #Wavelength.apply_lambda_simple(maskname, band, 'Ne.txt', waveops, longslit=longslit, smooth=True) #print redfiles #obsfiles = ['eps_off_-10.txt', 'eps_off_10.txt'] # Update the following line after the apply_lambda_simple step #Longslit.go(maskname, band, obsfiles, # 'lambda_solution_wave_stack_H_m150112_0199-0201.fits', # waveops, longslit, extension='/Volumes/PromiseRAID/MOSFIRE/DRP_CODE/DATA/2015jan12/m150112_0199.fits')
5,542
32.79878
107
py
MosfireDRP
MosfireDRP-master/apps/db.py
import time import traceback import getpass import os import pdb import pprint import sets import sqlite3 import sys import textwrap from operator import itemgetter from itertools import groupby import MOSFIRE from MOSFIRE import Options, IO def load_db(): indir = Options.indir outname = os.path.join(Options.outdir, "mosfire_files.db") print("Database: {0}".format(outname)) conn = sqlite3.connect(outname) return conn def create(cursor): cursor.execute(''' CREATE TABLE if not exists files (id integer primary key, path text, fdate text, number integer) ''') keys = [] def append_column(cursor, name, typename): qry = "alter table files\nadd {0} {1}".format(name, typename) try: cursor.execute(qry) print("Added {0} as {1}".format(name, typename)) except sqlite3.OperationalError: pass def make(): """Make the database""" db = load_db() c = db.cursor() create(c) dirs = os.walk(Options.indir) Options.indir = Options.indir.rstrip("/") for root, dirs, files in dirs: if root == Options.indir: continue ignore, path = root.split(Options.indir) if len(path.split("/")) != 2: continue try: date = int(path.split("/")[1][0:4]) except: continue if (date < 2012) or (date > 2030): continue for file in files: if len(file) != 17: continue p = os.path.join(root, file) num = db.execute('select count(*) from files where path = "%s"' % p).fetchall() if num[0][0] > 0: print("Skipping: " + p + " [already in db]") continue print(p) hdr = IO.readheader(p) try: fdate = file.split("_")[0][1:] number = file.split("_")[1][:-5] except: print("Skipping: " + p) continue insert_sql = "insert into files(path,fdate,number," vals = "?,?,?," values = [p, fdate, number] for key in list(hdr.keys()): if key == 'COMMENT': continue value = hdr[key] T = type(value) key = key.replace("-","_") insert_sql += key + "," vals += "?," values.append(value) if key in keys: continue keys.append(key) if T == int: typename = 'integer' if T == float: typename = 'real' else: typename = 'text' append_column(c, key, typename) insert_sql = insert_sql[:-1] + ") values (" + vals[:-1] + ")" try: c.execute(insert_sql, tuple(values)) except: print("Query failed on:") print(insert_sql) traceback.print_exc() #sys.exit() db.commit() def find_continuous(data): '''Find all continuous numbers in a list''' # http://stackoverflow.com/questions/2154249/identify-groups-of-continuous-numbers-in-a-list ranges = [] for k, g in groupby(enumerate(data), lambda (i,x):i-x): group = list(map(itemgetter(1), g)) ranges.append((group[0], group[-1])) return ranges def underline_ansi(str): return chr(27) + '[4m' + str + chr(27) + '[0m' def bold_ansi(str): return chr(27) + '[1m' + str + chr(27) + '[0m' def boldunderline_ansi(str): return chr(27) + '[1m' + chr(27) + '[4m' + str + chr(27) + '[0m' def sql_for_mask_group_filter(db, maskname): cur = db.execute( ''' select count(filter), filter, itime/1000.0, yoffset from files where maskname = "{0}" and substr(obsmode, -12, 12) = "spectroscopy" group by filter'''. format(maskname)) return cur.fetchall() def sql_for_mask_filter_flats(db, maskname, filter): query = ''' select path, fdate, number from files where maskname = "{0}" and substr(obsmode, -12, 12) = "spectroscopy" and filter = "{1}" and (el-45) < .1 and flatspec = 1 order by fdate, number '''.format(maskname, filter) print("Flat Query is:", query) cursor = db.execute(query) return cursor.fetchall() def sql_for_mask_filter_spectra(db, maskname, filter): query = ''' select fdate from files where maskname = "{0}" and substr(obsmode, -12, 12) = "spectroscopy" and filter = "{1}" and (itime/1000.0) > 30 and flatspec = 0 and (domestat = "tracking" or domestat = 0) and aborted = 0 group by fdate '''.format(maskname, filter) #print("DB Query is: ", query cur = db.execute(query) return cur.fetchall() def sql_for_mask_filter_date(db, maskname, filter, date): query = ''' select path, fdate, number, yoffset, itime/1000.0 from files where maskname = "{0}" and filter = "{1}" and (itime/1000.0) > 30 and fdate = {2} and flatspec = 0 and (domestat = "tracking" or domestat = 0) and aborted = 0 order by fdate, number '''.format(maskname, filter, date) print("DB Query is: ", query) cur = db.execute(query) return cur.fetchall() def plan_to_fname(plan): return "%s_%s.py" % (plan["maskname"], plan["filter"]) longslit_plan_file =''' # This file was automatically generated by the mospy db application # The application was executed by {uid} on {createdate} # # Help, bugs to: http://mosfire.googlecode.com # # Instructions # 1. edit band = 'fixme' to band = 'Y' or 'J' or 'H' or 'K' # e.g. band = 'J' # 2. edit range(a,b) to be a list of flat names # 3. edit range(c,d) to be a list of long names # Note for steps 2&3 most likely these will be a continuous sequence # 4. edit [709, 1350] to be the pixel values at the beginning and end # of the long slit. Look at the raw data. import os, time import MOSFIRE from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify from MOSFIRE import Wavelength, Longslit import numpy as np from matplotlib import pyplot as pl try: from astropy.io import fits as pf except: import pyfits as pf np.seterr(all="ignore") maskname = '{maskname}' band = '{band}' flatnames = {flatnames} longnames = {longnames} flatops = Options.flat waveops = Options.wavelength {lslitoptions} Flats.handle_flats(flatnames, maskname, band, flatops) Wavelength.imcombine(longnames, maskname, band, waveops) Wavelength.fit_lambda_interactively(maskname, band, longnames, waveops) Wavelength.fit_lambda(maskname, band, longnames, longnames, waveops, longslit=longslit) Wavelength.apply_lambda_simple(maskname, band, longnames, waveops, longslit=longslit, smooth=True) Longslit.go(maskname, band, longnames, waveops, longslit) ''' plan_file =''' # This file was automatically generated by the mospy db application # The application was executed by {uid} on {createdate} # # Help, bugs to: http://mosfire.googlecode.com import os, time import MOSFIRE from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify from MOSFIRE import Wavelength import numpy as np from matplotlib import pyplot as pl try: from astropy.io import fits as pf except: import pyfits as pf np.seterr(all="ignore") maskname = '{maskname}' band = '{band}' num_dates = {num_dates} flatnames = {flatnames} sciframes = {sciframes} wavenames = {wavenames} flatops = Options.flat waveops = Options.wavelength Flats.handle_flats(flatnames, maskname, band, flatops) {wavecombine} Combine.handle_combine(wavenames, maskname, band, waveops) ''' def plan_to_python(plans): '''Convert the python list/dictionary created by masks() into a python script''' ''' A plan is a structure that looks something like Plan { filter -> Filter name (string) maskname -> maskname (string) flatlist -> ["YYmmDD_####"...] list of flats dates -> [{ date -> YYmmDD (string) observations -> [{ observation -> (a,b) (tuple of file number range) offsets [{ "name" -> ["YYmmDD_####" ...] list of sci frames at offset "name" }] }] }] } This function unpacks the above structure into a python program that will produce a data reduction plan file. ''' for plan in plans: fname = plan_to_fname(plan) if os.path.exists(fname): #print("Plan '%s' already exists, remove the plan file " #"to overwrite" % fname) os.remove(fname) #REMOVE COMMENT BELOW: #continue outf = open(fname, "w") num_dates = len(plan["dates"]) waves = [] scis = [] for date in plan["dates"]: for observation in date["observations"]: obs_wave = [] obs_sci = {} offsets = list(observation["offsets"].keys()) if (len(offsets) == 1) and offsets[0] is 'Unknown': fnames = observation["offsets"]['Unknown']['fname'] obs_sci["A"] = fnames[0:-1:2] obs_sci["B"] = fnames[1:-1:2] obs_wave.extend(fnames) else: for offset in offsets: fnames = observation["offsets"][offset]['fname'] obs_sci[offset] = fnames obs_wave.extend(fnames) scis.append(obs_sci) waves.append(obs_wave) wavecombine = "" for i in range(len(waves)): wavecombine += "Wavelength.imcombine(wavenames[%i], maskname, " \ "band, waveops)\n" % (i) if i == 0: wavecombine += "Wavelength.fit_lambda_interactively(" \ "maskname, band, wavenames[0], waveops)\n" wavecombine += "Wavelength.fit_lambda(" \ "maskname, band, wavenames[%i], wavenames[0], " \ " waveops)\n" % i wavecombine += "Wavelength.apply_lambda_simple(maskname, band, " \ " wavenames[%i], waveops)\n" % i pos = list(scis[i].keys()) if len(pos) != 2: print("Only handling A/B subtraction currently") continue wavecombine += \ "Background.handle_background(sciframes[%i]['%s'], " \ "sciframes[%i]['%s'], wavenames[%i], maskname, band, " \ "waveops)\n" % (i, pos[0], i, pos[1], i) wavecombine += \ "Rectify.handle_rectification(maskname, ['A', 'B'], " \ "wavenames[%i], band, waveops)" % (i) wavecombine += "\n" res = { "uid": getpass.getuser(), "createdate": time.asctime(), "maskname": plan["maskname"], "band": plan["filter"], "flatnames": plan["flatlist"], "sciframes": scis, "wavenames": waves, "wavecombine": wavecombine, "num_dates": num_dates} outf.write(plan_file.format(**res)) outf.close() def longslits(): """List all longslits""" if len(sys.argv) == 4: db = load_db() fdate = int(sys.argv[3]) query = """ select object, path, fdate, number, filter, yoffset, maskname, gratmode, itime, el from files where substr(maskname,0,9) == 'LONGSLIT' and fdate = "{0}" order by number """.format(fdate, fdate) cur = db.execute(query) ress = cur.fetchall() if len(ress) == 0: raise Exception("No such objects in database. Query:\n%s" % query) return print("{0}".format(ress[0][-1])) print("{0}".format(object)) print("{0:6s} {1:6s} {2:3s} {3:6s} {4:4s} {5:15s}".format("type", "date", "num", "band", "offset", "object")) objs = {} for res in ress: obj, path, fdate, number, filter, yoffset, maskname, gratmode, exptime, el = res guess = '?' if gratmode == 'imaging': guess = "align" elif filter == 'Dark': guess = 'dark' elif filter == 'Moving': guess = 'bad' elif len(obj) > 4 and obj[0:4] == 'Flat': guess = 'flat' key = "flat_{0}".format(filter) else: guess = "sci" key = "{0}_{1}".format(obj,filter) if guess == 'flat' or guess == 'sci': if objs.has_key(key): objs[key].append(path) else: objs[key] = [path] if res[5] is None: offset = -999 else: offset = float(res[5]) print("{0:6s} {1:6s} {2:3g} {3:6s} {4:5.1f} {5:15s}".format(guess, res[2], res[3], res[4], offset, obj)) print("") print("--- SUMMARY ---") for key, value in objs.items(): print("{0:10s}: {1:5g} frames".format(key, len(value))) else: print("Not enough arguments") sys.exit() res = { "uid": getpass.getuser(), "createdate": time.asctime(), 'maskname': "longslit_%s" % (fdate), 'band' : 'fixme', 'flatnames': "['m%s_%%4.4i.fits' %% i for i in range(a,b)]" % (fdate), 'longnames': "['m%s_%%4.4i.fits' %% i for i in range(c,d)]" % (fdate), 'lslitoptions': "longslit = {'yrange': [709, 1350]}" } fout = "%s_longslit.py" % fdate try: f = open(fout, "w") f.write(longslit_plan_file.format(**res)) f.close() except: print("Could not open and write to {0}".format(fout)) def masks(): """List all slit masks""" db = load_db() if len(sys.argv) == 3: cur = db.execute("select maskname, count(maskname) from files group by maskname") ress = cur.fetchall() print("{0:74s} {1:5s}".format("Mask Name", "Count")) print("-"*80) bold_on = False for res in ress: output = "{0:74s} {1:5g}".format(res[0], res[1]) if bold_on: print bold_ansi(output) else: print output bold_on = not bold_on print print('''Execute: mospy db masks [maskname] to generate a mask plan''') if len(sys.argv) == 4: maskname = sys.argv[3] FILTERS = sql_for_mask_group_filter(db, maskname) plans = [] for res in FILTERS: num_frames, filter, itime, yoffset = res if yoffset is None: yoffset='Unknown' this_plan = {"maskname": maskname, "filter": filter} print print(boldunderline_ansi("{0:45s} {1:4s}".format(maskname, filter))) if filter == 'Dark': print(" Dark frames not fully supported yet") continue FL = sql_for_mask_filter_flats(db, maskname, filter) print("%i flats on %i nights " % (len(FL), len(set([str(S[1]) for S in FL])))) this_plan["flatlist"] = [str("m%s_%4.4i.fits" % (f[1],f[2])) for f in FL] DATES = sql_for_mask_filter_spectra(db, maskname, filter) this_plan["dates"] = [] for date in DATES: date = date[0] this_date = {"date": date} FRAMES = sql_for_mask_filter_date(db, maskname, filter, date) print(underline_ansi("{0}: {1} frames:".format(date, len(FRAMES)))) nums = [int(S[2]) for S in FRAMES] observations = find_continuous(nums) this_date["observations"] = [] for observation in observations: this_observation = {"observation": observation} offsets = {} for frame in FRAMES: path, fdate, number, yoffset, itime = frame if yoffset is None: yoffset = "Unknown" if (number < observation[0]) or (number > observation[1]): continue if float(yoffset) == 0: pdb.set_trace() if offsets.has_key(yoffset): offsets[yoffset]["fname"].append( str("m%s_%4.4i.fits" % (fdate,number))) offsets[yoffset]["itime"] += itime else: offsets[yoffset] = {} offsets[yoffset]["fname"] = [ str("m%s_%4.4i.fits" % (fdate, number))] offsets[yoffset]["itime"] = itime offsets[yoffset]["start/stop"] = observation this_observation["offsets"] = offsets this_date["observations"].append(this_observation) for observation in this_date["observations"]: for k,v in observation["offsets"].items(): print("\tOffset {0:5s} has {1:3g} frames ({2}-{3}) " "total exptime is {4:5g} s".format(str(k), len(v["fname"]), v["start/stop"][0], v["start/stop"][1], v["itime"])) this_plan["dates"].append(this_date) plans.append(this_plan) plan_to_python(plans) commands = [make, masks, longslits] def usage(): print(""") Commands: """ for command in commands: print("\t" + command.__name__ + ": " + command.__doc__) print("\n") if __name__ == '__main__': if len(sys.argv) < 3: usage() sys.exit() if sys.argv[2] == 'make': print("Making database") make() if sys.argv[2] == 'masks': masks() if sys.argv[2] == 'longslits': longslits() else: usage() sys.exit()
18,595
27.653313
96
py
MosfireDRP
MosfireDRP-master/apps/AutoDriver.py
#!/usr/local/bin/python import MOSFIRE from MOSFIRE import IO, Wavelength from MOSFIRE.IO import fname_to_path import os try: from astropy.io import fits as pf except: import pyfits as pf import time import sys import glob class Driver(object): def __init__(self,outputFile,type): self.outputFile = outputFile self.type = type self.offsetFiles = [] allowedTypes = ['slitmask', 'longslit', 'long2pos', 'long2pos_specphot'] if self.type not in allowedTypes: print("Unknown driver type") else: print("Generating automatic driver file "+outputFile) self.target = open(outputFile,'w') self.import_section() def addLine(self, line): self.target.write(line+"\n") def import_section(self): self.addLine("import matplotlib") self.addLine("matplotlib.use('TkAgg') # Force TkAgg backend for interactivity. This is") self.addLine(" # critical to bypass a bug in the MacOSX backend.") self.addLine("import os") self.addLine("import time") self.addLine("import logging") self.addLine("logger = logging.getLogger(__name__)") self.addLine("") self.addLine("import MOSFIRE") self.addLine("from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify, Wavelength, Extract") self.addLine("from MOSFIRE.MosfireDrpLog import info, debug, warning, error") self.addLine("") self.addLine("import numpy as np") self.addLine("np.seterr(all='ignore')") # self.addLine("") # self.addLine("from matplotlib import pyplot as pl") # self.addLine("from astropy.io import fits as pf") self.addLine("") self.addLine("flatops = Options.flat") self.addLine("waveops = Options.wavelength") self.addLine("") def addOffsetFiles(self,offsetFiles, resetList=False): # might not be needed if resetList: self.offsetFiles = [] for offsetfile in offsetFiles: self.offsetFiles.append(offsetfile) def printObsfiles(self,obsfiles): for obsfile in obsfiles: self.addLine(obsfile) self.addLine("") def printnoninteractive(self,noninteractive=False): self.addLine("#Set noninteractive to True to autofit wavelenth solution instead of manually fitting.") self.addLine("noninteractiveflag="+str(noninteractive)) def printMaskAndBand(self): offsetfile = self.offsetFiles[0] fname = IO.list_file_to_strings(offsetfile) if os.path.isabs(fname[0]): path = fname[0] else: path = os.path.join(fname_to_path(fname[0]), fname[0]) hdulist = pf.open(path) header = hdulist[0].header self.maskName = header['maskname'] self.band = header['filter'] self.addLine("maskname = '"+str(self.maskName)+"'") self.addLine("band = '"+str(self.band)+"'") self.addLine("") def isEmpty(self,file): if not os.path.exists(file): return True fname = IO.list_file_to_strings(file) if len(fname): return False else: return True def printFlat(self): longslit="" if self.type is 'long2pos' or self.type is 'long2pos_specphot' or self.type is 'longslit': longslit=",longslit=longslit" # using only Flat.txt if os.path.isfile('Flat.txt'): if self.isEmpty('Flat.txt') is True: self.addLine("### WARNING: Flat.txt is empty! ###") flatLine = "Flats.handle_flats('Flat.txt', maskname, band, flatops"+longslit+")" # using both Flat.txt and FlatThermal.txt if os.path.isfile('FlatThermal.txt') and self.band is 'K': if self.isEmpty('FlatThermal.txt') is True: self.addLine("### WARNING: FlatThermal.txt is empty! ###") flatLine = "Flats.handle_flats('Flat.txt', maskname, band, flatops,lampOffList='FlatThermal.txt'"+longslit+")" # write the resulting line self.addLine(flatLine) self.addLine("") def addLongslit(self): if self.type is 'long2pos' or self.type is 'long2pos_specphot': self.addLine("# Note: for long2pos, the row position is ignored, and the middle point of the slit is used") self.addLine("longslit = {'yrange': [[1062,1188],[887,1010]], 'row_position': 0, 'mode':'long2pos'}") if self.type is 'longslit': # use the slitname to determine the range (such as LONGSLIT-3x0.7) numberOfSlits = int(self.maskName.lstrip("LONGSLIT-").split("x")[0]) verticalOffset = 10 # this is the vertical offset to apply to each measurement to shift the position up in the detector. It seems to be around 10 slitSizePixels = int(numberOfSlits*(2048/46)) slitTop = 1024+slitSizePixels//2+verticalOffset slitBottom = 1024-slitSizePixels//2+verticalOffset RowPosition = 1024+verticalOffset self.addLine("longslit = {'yrange':["+str(slitBottom)+","+str(slitTop)+"],'row_position':"+str(RowPosition)+",'mode':'longslit'}") def printWavelengthFit(self): if self.type is 'longslit' or self.type is 'long2pos': addLongSlit = ",longslit=longslit" else: addLongSlit = "" if self.type is 'slitmask' or self.type is 'longslit': self.useNeon = False self.useArgon = False # determine is Argon and Neon files contain data for K bands if self.isEmpty('Ar.txt') is False and self.band is 'K': self.useArgon = True if self.isEmpty('Ne.txt') is False and self.band is 'K': self.useNeon = True self.addLine("Wavelength.imcombine(obsfiles, maskname, band, waveops)") if self.useArgon: self.addLine("Wavelength.imcombine('Ar.txt', maskname, band, waveops)") if self.useNeon: self.addLine("Wavelength.imcombine('Ne.txt', maskname, band, waveops)") self.addLine("Wavelength.fit_lambda_interactively(maskname, band, obsfiles,waveops"+addLongSlit+", noninteractive=noninteractiveflag)") if self.useArgon: self.addLine("Wavelength.apply_interactive(maskname, band, waveops, apply=obsfiles, to='Ar.txt', argon=True)") if self.useNeon: self.addLine("Wavelength.apply_interactive(maskname, band, waveops, apply=obsfiles, to='Ne.txt', neon=True)") self.addLine("Wavelength.fit_lambda(maskname, band, obsfiles, obsfiles,waveops"+addLongSlit+")") if self.useArgon and self.useNeon: self.addLine("Wavelength.fit_lambda(maskname, band, 'Ne.txt', 'Ne.txt',waveops, wavenames2='Ar.txt'"+addLongSlit+")") if self.useArgon and not self.useNeon: self.addLine("Wavelength.fit_lambda(maskname, band, 'Ar.txt', 'Ar.txt',waveops"+addLongSlit+")") if self.useNeon and not self.useArgon: self.addLine("Wavelength.fit_lambda(maskname, band, 'Ne.txt', 'Ne.txt',waveops"+addLongSlit+")") if self.useNeon or self.useArgon: self.addLine("LROI = [[21000,22800]]*1") if self.useNeon: self.addLine("LROIs = Wavelength.check_wavelength_roi(maskname, band, obsfiles, 'Ne.txt', LROI, waveops)") if self.useArgon and not self.useNeon: self.addLine("LROIs = Wavelength.check_wavelength_roi(maskname, band, obsfiles, 'Ar.txt', LROI, waveops)") self.addLine("Wavelength.apply_lambda_simple(maskname, band, obsfiles, waveops"+addLongSlit+")") if self.useArgon and self.useNeon: self.addLine("Wavelength.apply_lambda_sky_and_arc(maskname, band, obsfiles, 'Ne.txt', LROIs, waveops)") if self.useArgon and not self.useNeon: self.addLine("Wavelength.apply_lambda_sky_and_arc(maskname, band, obsfiles, 'Ar.txt', LROIs, waveops)") if self.useNeon and not self.useArgon: self.addLine("Wavelength.apply_lambda_sky_and_arc(maskname, band, obsfiles, 'Ne.txt', LROIs, waveops)") # determine waveleng name files = IO.list_file_to_strings(self.offsetFiles) if self.useNeon: neon_files = IO.list_file_to_strings('Ne.txt') self.waveName = "merged_lambda_solution_"+str(Wavelength.filelist_to_wavename(files, self.band, self.maskName,"")).rstrip(".fits")+"_and_"+str(Wavelength.filelist_to_wavename(neon_files, self.band, self.maskName,"")) elif self.useArgon and not self.useNeon: argon_files = IO.list_file_to_strings('Ar.txt') self.waveName = "merged_lambda_solution_"+str(Wavelength.filelist_to_wavename(files, self.band, self.maskName,"")).rstrip(".fits")+"_and_"+str(Wavelength.filelist_to_wavename(argon_files, self.band, self.maskName,"")) else: self.waveName = "lambda_solution_"+str(Wavelength.filelist_to_wavename(files, self.band, self.maskName,"")) if self.type is 'long2pos' or self.type is 'long2pos_specphot': calibWith = "" if self.isEmpty('Ar.txt') is False: self.addLine("argon = ['Ar.txt']") calibWith = "argon" waveFiles = IO.list_file_to_strings('Ar.txt') if self.isEmpty('Ne.txt') is False: self.addLine("neon = ['Ne.txt']") calibWith = "neon" waveFiles = IO.list_file_to_strings('Ne.txt') if calibWith: # we have either Argon, or Neon, or both, so we can use arcs for the reduction self.addLine("Wavelength.imcombine("+str(calibWith)+", maskname, band, waveops)") self.addLine("Wavelength.fit_lambda_interactively(maskname, band, "+str(calibWith)+",waveops,longslit=longslit, "+str(calibWith)+"=True, noninteractive=noninteractiveflag)") self.addLine("Wavelength.fit_lambda(maskname, band, "+str(calibWith)+","+str(calibWith)+",waveops,longslit=longslit)") self.addLine("Wavelength.apply_lambda_simple(maskname, band, "+str(calibWith)+", waveops, longslit=longslit, smooth=True)") self.waveName = "lambda_solution_"+str(Wavelength.filelist_to_wavename(waveFiles, self.band, self.maskName,"")) else: # we have no arcs. For the time being, we can try with sky lines but this only works with long2pos specphot print("#####################################################################################################") print("WARNING: There are no arc calibration files") print(" The pipeline will try to use sky lines but this only works if the observation is long enough") print(" and if you are only using long2pos. It will NOT work on long2pos_specphot") print(" Please contact the MosfireDRP team to obtain a standard wavelength solution") print("#####################################################################################################" ) self.addLine("obsfiles = obsfiles_posAnarrow + obsfiles_posCnarrow") self.addLine("Wavelength.imcombine(obsfiles, maskname, band, waveops)") self.addLine("Wavelength.fit_lambda_interactively(maskname, band, obsfiles ,waveops,longslit=longslit, noninteractive=noninteractiveflag)") self.addLine("Wavelength.fit_lambda(maskname, band, obsfiles,obsfiles ,waveops,longslit=longslit)") self.addLine("Wavelength.apply_lambda_simple(maskname, band, obsfiles, waveops, longslit=longslit, smooth=True)") files = IO.list_file_to_strings(self.offsetFiles) self.waveName = "lambda_solution_"+str(Wavelength.filelist_to_wavename(files, self.band, self.maskName,"")) self.addLine("") self.addLine("Wavelength_file = '"+str(self.waveName)+"'") self.addLine("") def printBackground(self): if self.type is 'long2pos_specphot': for slit in ['posAnarrow','posCnarrow','posAwide','posCwide']: self.addLine("Background.handle_background(obsfiles_"+str(slit)+",Wavelength_file,maskname,band,waveops, target=target_"+str(slit)+")") if self.type is 'long2pos': for slit in ['posAnarrow','posCnarrow']: self.addLine("Background.handle_background(obsfiles_"+str(slit)+",Wavelength_file,maskname,band,waveops, target=target_"+str(slit)+")") if self.type is 'slitmask': self.addLine("Background.handle_background(obsfiles,Wavelength_file,maskname,band,waveops)") if self.type is 'longslit': self.addLine("Background.handle_background(obsfiles,Wavelength_file,maskname,band,waveops,target=target)") self.addLine("") def printRectification(self): if self.type is 'slitmask': self.addLine('redfiles = ["eps_" + file + ".fits" for file in obsfiles]') self.addLine('Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles,waveops)') if self.type is 'longslit': self.addLine('redfiles = ["eps_" + file + ".fits" for file in obsfiles]') self.addLine('Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles,waveops, target=target)') if self.type is 'long2pos' or self.type is 'long2pos_specphot': for slit in ['posAnarrow','posCnarrow']: self.addLine('redfiles = ["eps_" + file + ".fits" for file in obsfiles_'+str(slit)+']') self.addLine('Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles_'+str(slit)+',waveops, target=target_'+str(slit)+')') if self.type is 'long2pos_specphot': for slit in ['posAwide','posCwide']: self.addLine('redfiles = ["eps_" + file + ".fits" for file in obsfiles_'+str(slit)+']') self.addLine('redfiles = [redfiles[0]]') self.addLine('Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles_'+str(slit)+',waveops, target=target_'+str(slit)+')') self.addLine("") def printExtraction(self): if self.type is 'slitmask': self.addLine('Extract.extract_spectra(maskname, band, width=10, interactive=(not noninteractiveflag))') elif self.type is 'longslit': self.addLine('Extract.extract_spectra(maskname, band, width=10, target=target, interactive=(not noninteractiveflag))') elif self.type is 'long2pos': self.addLine('Extract.extract_spectra(maskname, band, width=10, target=target_posAnarrow, interactive=(not noninteractiveflag))') self.addLine('Extract.extract_spectra(maskname, band, width=10, target=target_posCnarrow, interactive=(not noninteractiveflag))') elif self.type is 'long2pos_specphot': self.addLine('Extract.extract_spectra(maskname, band, width=10, target=target, interactive=(not noninteractiveflag))') def printHeader(self): now = time.strftime("%c") self.addLine("# Driver file automatically generated on "+str(now)) self.addLine("") self.addLine("# If you have questions, please submit a ticket on the github issue page:") self.addLine("# https://github.com/Keck-DataReductionPipelines/MosfireDRP/issues") self.addLine("# Alternatively, email the developers at mosfiredrp@gmail.com") self.addLine("") def CloseFile(self): self.target.close() def OffsetPairs(): offsetFiles = glob.glob("Offset_*.txt") # remove Offset_ and remote .txt tmpOffsets = [off.replace("Offset_","").replace(".txt","") for off in offsetFiles] if len(tmpOffsets) == 0: return {}, '' else: # for each name, separate using _ as a separator slitmaskOffset = [] processedTargets= [] targets_and_offsets= {} for off in tmpOffsets: # separate using _ off_array = off.split('_') # the first value of the array is the offset value # if the array has only one element, this is a slitmask (Offset_1.5.txt), add this value to the slitmask offsets if len(off_array)==1: type = 'slitmask' if "slitmask" in targets_and_offsets: tmp = targets_and_offsets["slitmask"] tmp.append(float(off_array[0])) targets_and_offsets["slitmask"]=tmp else: targets_and_offsets["slitmask"]=[float(off_array[0]),] else: # if the array has more than one element, we are in a long2pos or longslit mode # if the last element is a PosC or PosA, then it's long2pos #print off_array if off_array[-1] in ['PosA','PosC']: type = 'long2pos' # check if we have already seen this target tname = "_".join(off_array[1:-1]) # we are doing this for cases in which the file is Offset_-7_HIP87_7.25_PosA.txt (the _ in the file name is a problem) else: type = 'longslit' tname = "_".join(off_array[1:]) if tname not in processedTargets: # add the new target to the list processedTargets.append(tname) # add the current offset to the list of offsets files for this target if tname in targets_and_offsets: tmp=targets_and_offsets[tname] tmp.append(float(off_array[0])) #print("adding new offset to target "+str(tname)) targets_and_offsets[tname]=tmp else: #print("creating new offset set for target "+str(tname)) targets_and_offsets[tname]=[float(off_array[0]),] return targets_and_offsets,type def SetupFiles(target=None, offsets=None, type=None): # convert numbers such as 1.0 to 1, but leaves 1.5 as 1.5 offsets = [int(f) if f % 1 ==0 else f for f in offsets] setupLines = [] obsFiles = [] specphot = False type=type # slitmask if type is 'slitmask': offsets = [f for f in offsets if f>0] for off in offsets: obsFiles.append("Offset_"+str(off)+".txt") obsFiles.append("Offset_"+str(off*-1)+".txt") setupLines.append("obsfiles=['"+str("','".join(obsFiles))+"']") elif type is 'longslit': # files are assumed to be in pairs, and we drop the "0" value is present. # remove negative and 0 offsets offsets = [f for f in offsets if f>0] for off in offsets: obsFiles.append("Offset_"+str(off)+"_"+str(target)+".txt") obsFiles.append("Offset_"+str(off*-1)+"_"+str(target)+".txt") setupLines.append("obsfiles=['"+str("','".join(obsFiles))+"']") setupLines.append('target="'+str(target)+'"') elif type is 'long2pos' or type is 'long2pos_specphot': # old long 2 pos (-7,-14,-21, 7,14,21) # narrow slits if set([-7,-21,7,21]).issubset(offsets): setupLines.append("obsfiles_posCnarrow = ['Offset_-21_"+str(target)+"_PosC.txt', 'Offset_-7_"+str(target)+"_PosC.txt']") obsFiles.append("Offset_7_"+str(target)+"_PosA.txt") # we are using this to determine maskname and band obsFiles.append("Offset_-7_"+str(target)+"_PosC.txt") # we are using this to determine maskname and band setupLines.append('target_posCnarrow = "'+str(target)+'_POSC_NARROW"') setupLines.append("IO.fix_long2pos_headers(obsfiles_posCnarrow)") setupLines.append("obsfiles_posAnarrow = ['Offset_7_"+str(target)+"_PosA.txt', 'Offset_21_"+str(target)+"_PosA.txt']") setupLines.append('target_posAnarrow = "'+str(target)+'_POSA_NARROW"') setupLines.append("IO.fix_long2pos_headers(obsfiles_posAnarrow)") # wide slits if set([-7,-14,7,14]).issubset(offsets): setupLines.append("obsfiles_posCwide = ['Offset_-14_"+str(target)+"_PosC.txt', 'Offset_-7_"+str(target)+"_PosC.txt']") setupLines.append('target_posCwide = "'+str(target)+'_POSC_WIDE"') setupLines.append("IO.fix_long2pos_headers(obsfiles_posCwide)") setupLines.append("obsfiles_posAwide = ['Offset_14_"+str(target)+"_PosA.txt', 'Offset_-7_"+str(target)+"_PosA.txt']") setupLines.append('target_posAwide = "'+str(target)+'_POSA_WIDE"') setupLines.append("IO.fix_long2pos_headers(obsfiles_posAwide)") specphot = True # new long 2 pos (-7,0,7) # narrow slits if set([-7,7]).issubset(offsets) and not(set([21,21]).issubset(offsets)): setupLines.append("obsfiles_posCnarrow = ['Offset_7_"+str(target)+"_PosC.txt', 'Offset_-7_"+str(target)+"_PosC.txt']") obsFiles.append("Offset_7_"+str(target)+"_PosA.txt") obsFiles.append("Offset_-7_"+str(target)+"_PosC.txt") # we are using this to determine maskname and band setupLines.append('target_posCnarrow = "'+str(target)+'_POSC_NARROW"') setupLines.append("obsfiles_posAnarrow = ['Offset_7_"+str(target)+"_PosA.txt', 'Offset_-7_"+str(target)+"_PosA.txt']") setupLines.append('target_posAnarrow = "'+str(target)+'_POSA_NARROW"') # wide slits if set([-7,0,7]).issubset(offsets): setupLines.append("obsfiles_posCwide = ['Offset_0_"+str(target)+"_PosC.txt', 'Offset_-7_"+str(target)+"_PosC.txt']") setupLines.append('target_posCwide = "'+str(target)+'_POSC_WIDE"') setupLines.append("obsfiles_posAwide = ['Offset_0_"+str(target)+"_PosA.txt', 'Offset_-7_"+str(target)+"_PosA.txt']") setupLines.append('target_posAwide = "'+str(target)+'_POSA_WIDE"') specphot=True return setupLines, obsFiles, specphot #set noninteractive variable if len(sys.argv) > 3: print("Usage: mospy AutoDriver [True|False]") sys.exit() noninteractiveval=False if len(sys.argv) == 3: if str(sys.argv[2]) in ("t", "T" "true", "True"): noninteractiveval=True elif str(sys.argv[2]) in ("f", "F" "false", "False"): noninteractiveval=False else: print("Usage: mospy AutoDriver [True|False]") sys.exit() targets_and_offsets,type = OffsetPairs() if 'slitmask' in targets_and_offsets: print("slitmask mode") mydriver=Driver("Driver.py","slitmask") mydriver.printHeader() obsLines,obsFiles,specphot = SetupFiles('slitmask',targets_and_offsets['slitmask'],type) mydriver.addOffsetFiles(obsFiles) mydriver.printMaskAndBand() mydriver.printnoninteractive(noninteractive=noninteractiveval) mydriver.printObsfiles(obsLines) mydriver.printFlat() mydriver.printWavelengthFit() mydriver.printBackground() mydriver.printRectification() mydriver.printExtraction() mydriver.CloseFile() elif type is 'long2pos' or type is 'longslit': Targets = list(targets_and_offsets.keys()) for target in Targets: print(str(type)+" mode") obsLines,obsFiles,specphot = SetupFiles(target,targets_and_offsets[target],type) if type is 'longslit': mydriver=Driver("Longslit_"+str(target)+".py","longslit") elif specphot: mydriver=Driver("Long2pos_"+str(target)+".py","long2pos_specphot") else: mydriver=Driver("Long2pos_"+str(target)+".py","long2pos") mydriver.printHeader() mydriver.addOffsetFiles(obsFiles) mydriver.printMaskAndBand() mydriver.printnoninteractive(noninteractive=noninteractiveval) mydriver.printObsfiles(obsLines) mydriver.addLongslit() mydriver.printFlat() mydriver.printWavelengthFit() mydriver.printBackground() mydriver.printRectification() mydriver.printExtraction() mydriver.CloseFile() else: print('No data found in Offsets*txt files. No driver file generated')
24,849
51.536998
244
py
MosfireDRP
MosfireDRP-master/apps/CSU_plot_confirm.py
import numpy as np from matplotlib import pyplot as pl import scipy as sp import scipy.io import os from MOSFIRE import CSU from matplotlib.backends.backend_pdf import PdfPages from IPython.Shell import IPShellEmbed start_shell = IPShellEmbed() def fit_line_with_sigclip(xs, data, i = 0): ps = np.polyfit(xs, data, 1) pf = np.poly1d(ps) residual = np.abs(pf(xs) - data) sd = np.std(residual) ok = np.where(residual < 3.0*sd)[0] ps = np.polyfit(xs[ok], data[ok], 1) pf = np.poly1d(ps) return [pf, ok] path = "/users/npk/dropbox/mosfire/cooldown 9/csu_meas/" proto = "m11031%1.1i_%4.4i.fits.sav.mat" pl.ion() rs = [] for i in range(1754, 1793): if os.path.exists(path + proto % (6, i)): ld = scipy.io.loadmat(path + proto % (6, i)) elif os.path.exists(path + proto % (7, i)): ld = scipy.io.loadmat(path + proto % (7, i)) elif os.path.exists(path + proto % (8, i)): ld = scipy.io.loadmat(path + proto % (8, i)) elif os.path.exists(path + proto % (9, i)): ld = scipy.io.loadmat(path + proto % (9, i)) else: print "No luck" continue ld["img_num"] = i rs.append(ld) bar_posns = [[] for i in range(92)] for r in rs: for i in range(92): assert(r["bars"][i] == i+1) p = r["poss_mm"][i] d = r["deltas_mm"][i] if not np.isfinite(p): continue if not np.isfinite(d): continue if p < 20: continue if p > 200: continue bar_posns[i].append([p, d, r["bars"][i]]) even = np.arange(1, 92, 2) odd = np.arange(0, 92, 2) ds = [] ff = np.poly1d([.08/45, -0.04]) pl.figure(1) pl.clf() fits = [] for r in rs: pm = r["poss_mm"] ok = np.where(np.isfinite(pm))[0] if len(ok) < 1: continue b = r["bars"][ok] #pl.plot(r["deltas_mm"][ok] + 0.0416 - 0.0010392*b,'o') pl.plot(r["deltas_mm"][ok],'o') fits.append(np.polyfit(r["bars"][ok].ravel(), r["deltas_mm"][ok].ravel(), 1)) #ds.extend(r["deltas_mm"][ok] + 0.0416 - 0.0010392*b) ds.extend(r["deltas_mm"][ok]) ds = np.array(ds).ravel() # Measured by hand groups = [75, 100, 125, 150, 172, 196] groups = [25, 50, 75, 100, 125, 150, 172, 196, 220, 244] pl.figure(2) pl.clf() pl.axis("scaled") pl.ylim(-140,140) pl.xlim(-140,140) pl.xlabel("Keck Field Position (mm)") pl.ylabel("Keck Field Position (mm)") pl.grid() scale = 300 mn = ds.mean() for bar in range(92): bv = np.array(bar_posns[bar]) if len(bv) == 0: continue pos = bv[:,0] delts = bv[:,1] yshift = (bar % 2)*1. for group in groups: p = np.where(np.isfinite(pos) & (np.abs(pos-group)<5))[0] position = pos[p].mean() - mn - 137.4 delta = delts[p].mean() - mn ypos = (np.round(bar/2.)-23.0) * 5.8 * CSU.tempscale + yshift if delta > 0: pl.arrow(position, ypos, delta*scale, 0, color='red', head_width=.8) else: pl.arrow(position, ypos, delta*scale, 0, color='blue',head_width=.8) pl.text(-132,-117,"0.1 arcsecond") pl.arrow(-120,-120,scale*.725*.1,0, head_width=.9) pl.figure(3) pl.clf() arc = (ds-mn)/.725 pl.hist(arc,40,color='w') pl.title("(Achieved - Requested) Bar Positions") pl.xlabel("Bar Offsets [arcsecond]") pl.ylabel("N (total = %i)" % len(arc)) pl.text(-0.1, 50, "SD: %3.2f arcsecond\nP-V: %3.2f arcsecond" % (arc.std(), arc.max()-arc.min())) print print "Mean offset of: %4.3f" % mn pdf = PdfPages("CSU_confirmation_mar_16_2011.pdf") pl.figure(2).savefig(pdf, format="pdf") pl.figure(3).savefig(pdf, format="pdf") pdf.close()
3,824
26.517986
98
py
MosfireDRP
MosfireDRP-master/apps/check_CSU_pos.py
''' Written March 18th 2011 by npk ''' import sys, datetime, getpass, os import numpy as np try: from astropy.io import fits as pf except: import pyfits as pf from matplotlib import pyplot as pl import scipy.ndimage.filters, scipy.io from pyraf import iraf from MOSFIRE import CSU, Detector, IO, Fit from IPython.Shell import IPShellEmbed start_shell = IPShellEmbed() NaN = np.nan def save_region(bs, fname): s = bs.to_ds9_region() try: f = open(fname, "w") f.writelines(s) f.close() except: return Exception("Could not write barset region file to: %s" % fname) def fit_line_with_sigclip(xs, data, i=0): ps = Fit.do_fit_edge(xs, data) pf = lambda x: Fit.fit_bar_edge(ps, x) residual = np.abs(pf(xs) - data) sd = np.std(residual) ok = np.where(residual < 2.5 * sd)[0] if len(ok) == 0: return [lambda x: NaN, []] ps = Fit.do_fit_edge(xs[ok], data[ok]) pf = lambda x: Fit.fit_bar_edge(ps, x) return [pf, ok] if False: def fit_line_with_sigclip(xs, data, i = 0): ps = np.polyfit(xs, data, 1) pf = np.poly1d(ps) residual = np.abs(pf(xs) - data) sd = np.std(residual) ok = np.where(residual < 2.5*sd)[0] ps = np.polyfit(xs[ok], data[ok], 1) pf = np.poly1d(ps) return [pf, ok] if len(ok) == len(residual): return [pf, ok] elif i > 2: return [pf, ok] else: return fit_line_with_sigclip(xs[ok], data[ok], i+1) def median_tails(v): a = np.median(v[0:2]) b = np.median(v[-3:-1]) t = v - np.float(a+b)/2. return t def make_slice(pos, offset, w, h): '''Returns [ [xslice, ylsice], [x0, x1, y0, x1] ] where xslice is used as Array[x0:x1] yslice is used as Array[y0:y1]''' x0 = round(pos[0]- w + offset) if x0 < 0: x0 = 0 x1 = round(pos[0] + w + offset) if x1 > 2047: x1 = 2047 if x0 > x1: x0 = x1 xs = slice(x0, x1) y0 = round(pos[1]-h) if y0 < 0: y0 = 0 y1 = round(pos[1]+h) if y1 > 2047: y1 = 2047 if y0 > y1: y0 = y1 ys = slice(y0,y1) return [[xs,ys],[x0,x1,y0,y1],round(pos[1])] deg = np.pi/180. np.set_printoptions(precision=2) np.set_printoptions(suppress=True) reload(CSU) reload(IO) reload(Fit) reload(Detector) def sigclip(data, low=4, high=4): c = data.ravel() delta = 1 while delta: s = c.std() m = c.mean() size = c.size c = c[(c > (m - s*low)) & (c < (m + s*high))] delta = size-c.size return c.mean() def is_in_bounds(extent): if extent[1] > 2047: return False if extent[0] < 0: return False if extent[2] > 2047: return False if extent[0] < 0: return False if extent[0] == extent[1]: return False if extent[2] == extent[3]: return False for i in [0,1]: for j in [2,3]: if not CSU.in_field(extent[i],extent[j]): return False return True def is_odd(n): return (n % 2) == 1 def go(fname): global plot_arr print fname (header, data) = IO.readfits(fname) bs = CSU.Barset() bs.set_header(header) bars = [] means = [] sds = [] deltas = [] deltas_mm = [] poss = [] poss_mm = [] poss_y = [] request = [] qs = [] bars = range(1, 93) fit_fun = Fit.residual_single for bar in bars: pos = bs.get_bar_pix(bar) if bar % 8 == 0: print "%2.0i: (%7.2f, %7.2f)" % (bar, pos[0], pos[1]) if is_odd(bar): if (bs.pos[bar] - bs.pos[bar-1]) < 2.7: fit_fun = Fit.residual_pair else: fit_fun = Fit.residual_single width = 19 [[xslice, yslice],extent,ystart] = make_slice(pos,0,width,30) if not is_in_bounds(extent): fits = [0,0,0,0,0] [ff,ok] = [np.poly1d(0,0), []] means.append(fits) sds.append(fits) drop_this = True else: drop_this = False fits = [] ys = np.arange(-10,10, dtype=np.float32) for i in ys: tofit = data[ystart-i, xslice] y = median_tails(tofit) ps = Fit.do_fit(y, fit_fun) fits.append(ps[0]) fits = np.array(fits) fits[:,1] += 1 # fit to the ridgeline [ff, ok] = fit_line_with_sigclip(ys, fits[:,1]) m = [np.mean(fits[:,i]) for i in range(5)] s = [np.std(fits[:,i]) for i in range(5)] means.append(m) sds.append(s) slit_center_offset = pos[1] - ystart fc = ff(slit_center_offset) slit_center_pos = np.float(extent[0] + fc ) if drop_this: poss.append(NaN) poss_y.append(NaN) poss_mm.append(NaN) else: poss.append(slit_center_pos) poss_y.append(ystart) poss_mm.append(CSU.csu_pix_to_mm_poly(slit_center_pos, ystart)[0]) delta = np.float(slit_center_pos - pos[0]) if drop_this: deltas.append(NaN) deltas_mm.append(NaN) else: deltas.append(delta) b = CSU.csu_pix_to_mm_poly(slit_center_pos + delta, ystart)[0] deltas_mm.append(b - poss_mm[-1]) q = np.float(np.degrees(np.tan(ff(1)-ff(0)))) if drop_this: qs.append(NaN) qs.append(q) means = np.array(means) f = lambda x: np.array(x).ravel() sds = f(sds) deltas = f(deltas) poss = f(poss) poss_y = f(poss_y) poss_mm = f(poss_mm) deltas_mm = f(deltas_mm) qs = f(qs) bars = f(bars) fout = "/users/npk/dropbox/mosfire/cooldown 9/csu_meas/" + fname.split("/")[-1] + ".sav" print "saving" tosav = {"bars": bars, "request": bs.pos, "deltas_mm": deltas_mm, "poss": poss, "poss_mm": poss_mm, "deltas": deltas, "means": means, "qs": qs} scipy.io.savemat(fout, tosav) save_region(bs, "/users/npk/dropbox/mosfire/cooldown 9/csu_meas/" + fname.split("/")[-1] + ".reg") print "saved" regout = "/users/npk/dropbox/mosfire/cooldown 9/csu_meas/" + fname.split("/")[-1] + ".meas.reg" pairs = np.array([poss,poss_y]).transpose() s = CSU.to_ds9_region(pairs, dash=0, color="blue", label=False) try: f = open(regout, "w") f.writelines(s) f.close() except: print "Couldn't write: %s" % regout return [tosav, bs] path = "/users/npk/desktop/c9/" def generate_fname(num): global path files = os.listdir(path) for fn in files: if fn.find("12_%4.4i" % num) > 0: return fn if fn.find("13_%4.4i" % num) > 0: return fn if fn.find("16_%4.4i" % num) > 0: return fn if fn.find("17_%4.4i" % num) > 0: return fn if fn.find("18_%4.4i" % num) > 0: return fn if fn.find("19_%4.4i" % num) > 0: return fn if fn.find("20_%4.4i" % num) > 0: return fn if fn.find("21_%4.4i" % num) > 0: return fn if fn.find("24_%4.4i" % num) > 0: return fn [ts, bs] = go(path + generate_fname(2846))
8,772
27.669935
151
py
MosfireDRP
MosfireDRP-master/apps/show_img.py
#!/usr/bin/python ''' Written March 15th 2011 by npk ''' import os, numpy, scipy, sys path = "/users/npk/desktop/c9/" def generate_fname(num): global path files = os.listdir(path) for fn in files: if fn.find("12_%4.4i" % num) > 0: return fn if fn.find("13_%4.4i" % num) > 0: return fn if fn.find("14_%4.4i" % num) > 0: return fn if fn.find("15_%4.4i" % num) > 0: return fn if fn.find("16_%4.4i" % num) > 0: return fn if fn.find("17_%4.4i" % num) > 0: return fn if fn.find("18_%4.4i" % num) > 0: return fn if fn.find("19_%4.4i" % num) > 0: return fn if fn.find("20_%4.4i" % num) > 0: return fn if fn.find("21_%4.4i" % num) > 0: return fn if fn.find("24_%4.4i" % num) > 0: return fn fn = generate_fname(int(sys.argv[1])) print fn regpath = "/users/npk/dropbox/mosfire/cooldown\ 9/csu_meas/" os.system("ds9 %s -regions %s -regions %s" %(path+fn, regpath+fn+".reg", regpath+fn+".meas.reg")) #os.system("ds9 %s -regions %s" %(path+fn, regpath+fn+".reg"))
1,424
29.978261
97
py
MosfireDRP
MosfireDRP-master/apps/CSU_Check.py
''' Written March 3rd 2011 by npk ''' import numpy as np try: from astropy.io import fits as pf except: import pyfits as pf from matplotlib import pyplot as pl from pyraf import iraf from MOSFIRE import CSU, Detector, IO, Fit def fit_line_with_sigclip(xs, data, i = 0): ps = np.polyfit(xs, data, 1) pf = np.poly1d(ps) residual = np.abs(pf(xs) - data) sd = np.std(residual) ok = np.where(residual < 2*sd)[0] ps = np.polyfit(xs[ok], data[ok], 1) pf = np.poly1d(ps) return [pf, ok] if len(ok) == len(residual): return [pf, ok] elif i > 2: return [pf, ok] else: return fit_line_with_sigclip(xs[ok], data[ok], i+1) def median_tails(v): a = np.median(v[0:2]) b = np.median(v[-3:-1]) t = v - np.float(a+b)/2. return t def make_slice(pos, w, h): '''Returns [ [xslice, ylsice], [x0, x1, y0, x1] ] where xslice is used as Array[x0:x1] yslice is used as Array[y0:y1]''' x0 = pos[0]-w if x0 < 0: x0 = 0 x1 = pos[0] + w if x1 > 2047: x1 = 2047 if x0 > x1: x0 = x1 xs = slice(x0, x1) y0 = pos[1]-h if y0 < 0: y0 = 0 y1 = pos[1]+h if y1 > 2047: y1 = 2047 if y0 > y1: y0 = y1 ys = slice(y0,y1) return [[xs,ys],[x0,x1,y0,y1]] (header, data) = IO.readfits("/users/npk/desktop/c8/m101029_0233.ref.fits") (header2, data2) = IO.readfits("/users/npk/desktop/c8/m101029_0425.ref.fits") (header3, data3) = IO.readfits("/users/npk/desktop/c8/m101029_0427.ref.fits") data = data3 deg = np.pi/180. np.set_printoptions(precision=3) np.set_printoptions(suppress=True) reload(CSU) reload(IO) reload(Fit) reload(Detector) pl.ion() bs = CSU.Barset() bs.set_header(header) pl.figure(1) pl.clf() means = [] sds = [] deltas = [] poss = [] qs = [] cnt = 1 cntfit = 1 pl.figure(5) pl.clf() pl.subplot(7, 7, cntfit) pl.figure(6) pl.clf() pl.subplot(7, 7, cntfit) for bar in range(4, 92, 2): print bar pos = bs.get_bar_pix(bar) [[xslice, yslice],extent] = make_slice(pos, 6,25) if extent[0] == extent[1]: cnt += 1 continue if extent[2] == extent[3]: cnt += 1 continue cnt+=1 fits = [] xs = np.arange(-10,10) for i in xs: tofit = data[pos[1]-i, xslice] y = median_tails(tofit) ps = Fit.do_fit(y, Fit.residual_pair) fits.append(ps[0]) fits = np.array(fits) m = [np.mean(fits[:,i]) for i in range(5)] s = [np.std(fits[:,i]) for i in range(5)] means.append(m) sds.append(s) [ff, ok] = fit_line_with_sigclip(xs, fits[:,1]) pl.figure(5) pl.subplot(7,7,cntfit) pl.plot(xs, fits[:,1] - ff(xs), '*-') pl.plot(xs[ok], fits[ok,1] - ff(xs[ok]), 'or-') pl.ylim([-.1,.1]) pl.title("%2i" % bar) pl.figure(6) pl.subplot(7,7,cntfit) pl.plot(xs, fits[:,4],'*-') pl.plot(xs[ok], fits[ok,4],'or-') pl.ylim([2.9,4]) cntfit += 1 delta = (extent[0] + ff[0]) - pos[0] poss.append(extent[0] + ff[0]) deltas.append(delta) q = np.degrees(ff[1]) - np.degrees(CSU.rotation) qs.append(q) pl.figure(1) pl.text(pos[0], pos[1], 'b%2.0i: w=%3.2f p=%5.2f q=%3.2f d=%1.3f' % (bar, np.mean(fits[:,4]), extent[0]+ff[0], q, delta), fontsize=11, family='monospace', horizontalalignment='center') pl.xlim([0,2048]) pl.ylim([0,2048]) means = np.array(means) sds = np.array(sds) pl.draw()
3,761
21.662651
192
py
MosfireDRP
MosfireDRP-master/apps/what.py
#!/usr/local/bin/python ''' MOSFIRE 'what' command: Spits out an informative summary of files in the current directory. Or files selected via a glob. i.e. what *0311.fits npk - March 23 2011 ''' import MOSFIRE import MOSFIRE.IO import glob import sys files = [] if len(sys.argv) == 1: files = glob.iglob('*') else: for i in range(1, len(sys.argv)): files.extend(glob.iglob(sys.argv[i])) #print("filename object exptime maskname lamp filt Turret") for fname in files: try: header = MOSFIRE.IO.readheader(fname) except IOError, err: print("Couldn't IO %s" % fname) continue except: print("%s is unreadable" % fname) continue lamps = "" try: if header["pwstata7"] == 1: lamps += header["pwloca7"][0:2] if header["pwstata8"] == 1: lamps += header["pwloca8"][0:2] except KeyError: lamps = "???" header.update("lamps", lamps) try: if header["aborted"]: header.update("object", "ABORTED") except: print("Missing header file in: %s" % fname) try: print("%(datafile)12s %(object)40s %(truitime)6.1f s %(maskname)35s %(lamps)3s %(filter)4s %(mgtname)7s" % (header)) except: try: print("%(datafile)12s %(object)25s %(truitime)6.1f s %(lamps)3s %(filter)6s %(mgtname)7s" % (header)) except: print("%s Skipped" % fname)
1,491
21.268657
124
py
MosfireDRP
MosfireDRP-master/apps/plot_scale.py
from MOSFIRE import CSU from matplotlib import pyplot as pl import numpy as np from numpy import sin, cos reload(CSU) pl.ion() pl.figure(1) pl.clf() pl.xlim([-100,2200]) pl.ylim([-200,2200]) dxs = [] for s in range(4): for x in range(0,281,280): p1 = CSU.csu_mm_to_pix_poly(x, s) p2 = CSU.csu_mm_to_pix(x,s,Please_Use=True) dxs.append(p1[0]-p2[0]) dx = p1[0]-p2[0] dy = p1[1]-p2[1] scale=50 pl.arrow(p1[0], p1[1], dx*scale, dy*scale) if s == 0: pl.axvline(p2[0],color='black',ls='-.',lw=.5) pl.figure(2) pl.clf() try: f = open("../platescale/finalshift.xyXrotYrot.4.972.120k.dat") lines = f.readlines() f.close() except: print "Failed to read file" pl.figure(3) pl.clf() pl.xlim([-100,2200]) pl.ylim([-100,2200]) for line in lines: [px,py,xmm,ymm]= map(np.float,line.split()) px_lin = 7.670726 * xmm py_lin = 7.670726 * ymm d = np.radians(360-359.762) px_lin = cos(d) * px_lin - sin(d) * py_lin py_lin = sin(d) * px_lin + cos(d) * py_lin px_lin += 1042.986 py_lin += 1035.879 dx = px-px_lin dy = py-py_lin scale =200 print "%3.2f %3.2f" % (dx, dy) pl.arrow(px_lin, py_lin, scale*dx, scale*dy) pl.plot([px_lin],[py_lin],'ok') pl.arrow(100,1800,scale/2.,0) pl.title("Distortion Map: ccs solution - linear solution")
1,546
20.486111
70
py
MosfireDRP
MosfireDRP-master/apps/audit.py
import time import traceback import getpass import os import pdb import pprint import sets import sqlite3 import sys import textwrap import numpy as np from matplotlib import pyplot as pl from operator import itemgetter from itertools import groupby import MOSFIRE from MOSFIRE import Options, IO, Wavelength def audit(filename): header, data = IO.readfits(filename) ll0 = header['crval1'] dlam = header['cd1_1'] ls = ll0 + dlam * np.arange(data.shape[1]) linelist = Wavelength.pick_linelist(header) deltas = [] sigs = [] xpos = [] ys = [] for y in np.linspace(750, 1100, 30): #for y in np.linspace(5, 640, 50): sp = np.ma.array(data[y,:]) xs, sxs, sigmas = Wavelength.find_known_lines(linelist, ls, sp, Options.wavelength) xpos.append(xs) ys.append([y] * len(xs)) deltas.append(xs - (linelist - ll0)/dlam) sigs.append(sxs) xpos, ys, deltas, sigs = map(np.array, [xpos, ys, deltas, sigs]) deltas[np.abs(deltas) > .75] = np.nan sigs[np.abs(sigs) > .001] = np.nan pl.clf() size = 0.003/sigs size[size > 30] = 30 size[size < 1] = 1 pl.scatter( xpos, ys, c=deltas, s=size) pl.xlim([0, data.shape[1]]) pl.ylim([0, data.shape[0]]) pl.xlabel("Spectral pixel") pl.ylabel("Spatial pixel") pl.title("Night sky line deviation from solution [pixel]") pl.colorbar() pl.savefig("audit.pdf") pdb.set_trace() def usage(): print(""" audit [filename] Commands: """) print("\n") if __name__ == '__main__': if len(sys.argv) != 3: usage() sys.exit() audit(sys.argv[-1])
1,697
17.456522
71
py
MosfireDRP
MosfireDRP-master/apps/handle.py
#!/usr/local/bin/python ''' MOSFIRE 'handle' command: (c) npk - Dec 2013 ''' import MOSFIRE.IO as IO import os import numpy as np import sys import glob from MOSFIRE.MosfireDrpLog import debug, info, warning, error if len(sys.argv) < 3: print('''Usage: mospy handle [target]''') sys.exit() ## Output the file list to a text file for later examination if os.path.exists('filelist.txt'): debug('Removing old filelist.txt') os.remove('filelist.txt') fl = open('filelist.txt', 'w') files = [] for i in range(1, len(sys.argv)): files.extend(glob.iglob(sys.argv[i])) masks = {} info('Examining {} files'.format(len(files))) for fname in files: try: header = IO.readheader(fname) except IOError:#, err: fl.write("Couldn't IO %s\n" % fname) continue except: fl.write("%s is unreadable\n" % fname) continue lamps = "" try: if header["pwstata7"] == 1: lamps += header["pwloca7"][0:2] if header["pwstata8"] == 1: lamps += header["pwloca8"][0:2] except KeyError: lamps = "???" header['lamps'] = lamps try: if header["aborted"]: header['object' ] = 'ABORTED' except: fl.write("Missing header file in: %s\n" % fname) try: fl.write("%(datafile)12s %(object)35s %(truitime)6.1fs %(maskname)35s %(lamps)3s %(filter)4s %(mgtname)7s\n" % (header)) except: try: fl.write("%(datafile)12s %(object)25s %(truitime)6.1fs %(lamps)3s %(filter)6s %(mgtname)7s\n" % (header)) except: fl.write("%s Skipped\n" % fname) continue datafile = header['datafile'] + '.fits' maskname = str(header['maskname']) target = str(header['targname']) filter = header['filter'] yr,mn,dy = IO.fname_to_date_tuple(datafile) date = str(yr)+mn+str(dy) object = header['object'] frameid = header['FRAMEID'].strip() itime = header['truitime'] grating_turret = header['mgtname'] if object.find("MIRA") == -1: mira = False else: mira = True if header['MGTNAME'] is not 'mirror': mira = False if maskname.find(" (align)") == -1: align = False else: maskname = maskname.replace(" (align)", "") align = True if maskname.find('LONGSLIT') != -1: # print("longslit file") align = False if maskname.find('long2pos') != -1: if grating_turret != 'mirror': align = False empty_files = {'Align': [], 'Ne': [], 'Ar': [], 'Flat': [], 'FlatThermal': [], 'Dark': [], 'Aborted': [], 'Image': [], 'MIRA': [], 'Unknown': []} if maskname not in masks: masks[maskname] = {date: {filter: empty_files}} if date not in masks[maskname]: masks[maskname][date] = {filter: empty_files} if filter not in masks[maskname][date]: masks[maskname][date][filter] = empty_files # convert numbers such as 1.0 to 1, but leaves 1.5 as 1.5 # - added to match AutoDriver.py code offset_hdr = float(header['YOFFSET']) if offset_hdr % 1 == 0: offsetvalue = int(offset_hdr) else: offsetvalue = offset_hdr offset = 'Offset_' + str(offsetvalue) if (maskname.find('long2pos') != -1 and align is False) or maskname.find('LONGSLIT') != -1: # if the target name contains a /, replace it with _ target_name = target.replace("/","_") # if the target name contains a space, remove it target_name = target_name.replace(" ","") # add a posC and posA to the offset names position = '' if header['XOFFSET']>0: position = 'PosC' if header['XOFFSET']<0: position = 'PosA' offset = offset+'_'+str(target_name) if position is not '': offset = offset+'_'+position if mira: masks[maskname][date][filter]['MIRA'].append(fname) elif align: masks[maskname][date][filter]['Align'].append(fname) elif 'Ne' in header['lamps']: masks[maskname][date][filter]['Ne'].append(fname) elif 'Ar' in header['lamps']: masks[maskname][date][filter]['Ar'].append(fname) elif header['ABORTED']: masks[maskname][date][filter]['Aborted'].append(fname) elif header['FILTER'] == 'Dark': masks[maskname][date][filter]['Dark'].append(fname) elif header['FLATSPEC'] == 1: masks[maskname][date][filter]['Flat'].append(fname) elif object.find("Flat:") != -1 and ( object.find("lamps off") != -1 or object.find("Flat:Off")) != -1 : masks[maskname][date][filter]['FlatThermal'].append(fname) elif header['mgtname'] == 'mirror': masks[maskname][date][filter]['Image'].append(fname) elif offset != 0: # print "offset is now:"+str(offset) if frameid in ["A", "B", "A'", "B'","D","C", "E"]: if offset in masks[maskname][date][filter]: masks[maskname][date][filter][offset].append((fname, itime)) # print("adding file to existing offset file") else: masks[maskname][date][filter][offset] = [(fname, itime)] # print("creating new offset file") else: fl.write('{} has unexpected FRAMEID: {}\n'.format(fname, frameid)) else: masks[maskname][date][filter]['Unknown'].append(fname) ##### Now handle mask dictionary def descriptive_blurb(): import getpass, time uid = getpass.getuser() date = time.asctime() return "# Created by '%s' on %s\n" % (uid, date) # Write out the list of files in filepath # list = ['/path/to/mYYmmDD_####.fits' ...] # filepath is absolute path to the file name to write to # # Result, is a file called filepath is written with # fits files in the list. def handle_file_list(output_file, files): '''Write a list of paths to MOSFIRE file to output_file.''' if os.path.isfile(output_file): print("%s: already exists, skipping" % output_file ) # pass if len(files) > 0: with open(output_file, "w") as f: f = open(output_file, "w") f.write(descriptive_blurb()) picker = lambda x: x if len(files[0]) == 2: picker = lambda x: x[0] # Identify unique path to files: paths = [os.path.dirname(picker(file)) for file in files] paths = list(set(paths)) if len(paths) == 1: path_to_all = paths[0] converter = os.path.basename f.write("%s # Abs. path to files [optional]\n" % path_to_all) else: converter = lambda x: x info('Writing {} files to {}'.format(len(files), output_file)) for path in files: if len(path) == 2: to_write = "%s # %s s\n" % (converter(path[0]), path[1]) else: to_write = "%s\n" % converter(path) f.write("%s" % to_write) def handle_date_and_filter(mask, date, filter, mask_info): path = os.path.join(mask,date,filter) try: os.makedirs(path) except OSError: pass for type in list(mask_info.keys()): handle_file_list(os.path.join(path, type + ".txt"), mask_info[type]) for mask in list(masks.keys()): for date in list(masks[mask].keys()): for filter in list(masks[mask][date].keys()): handle_date_and_filter(mask, date, filter, masks[mask][date][filter])
7,565
29.508065
128
py