diff --git a/cas/tests/cas_tests.py b/cas/tests/cas_tests.py
index d546786a6..32d006329 100755
--- a/cas/tests/cas_tests.py
+++ b/cas/tests/cas_tests.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
# Shell python script to test a live SSO set up - Ed Crewe 26 Nov 2010
# It can be really fiddly testing out SSO proxy auth via typing in URLs etc
# see Dave Spencer's guide at https://wiki.jasig.org/display/CAS/Proxy+CAS+Walkthrough
@@ -65,55 +66,55 @@ def test_cas(self):
NB cant put these into separate tests since tickets
are required to be passed between tests
"""
- print 'Testing with following URLs'
- print '---------------------------'
- print 'CAS server = %s' % self.urls['cas']
- print 'Application server = %s' % self.urls['app']
- print 'Proxy CAS server = %s' % self.urls['proxy']
- print ''
- print 'Test ordinary CAS login'
- print '-----------------------'
+ print('Testing with following URLs')
+ print('---------------------------')
+ print('CAS server = %s' % self.urls['cas'])
+ print('Application server = %s' % self.urls['app'])
+ print('Proxy CAS server = %s' % self.urls['proxy'])
+ print('')
+ print('Test ordinary CAS login')
+ print('-----------------------')
self.ticket = self.login()
self.get_restricted(opener=self.opener)
self.logout()
- print ''
- print 'Test get proxy ticket'
- print '---------------------'
+ print('')
+ print('Test get proxy ticket')
+ print('---------------------')
self.ticket = self.login()
iou = self.proxy1_iou()
if iou.startswith('PGT'):
- print 'PASS: Got IOU - %s for %s' % (iou, self.urls['proxy'])
+ print('PASS: Got IOU - %s for %s' % (iou, self.urls['proxy']))
else:
- print iou
+ print(iou)
pgt = self.proxy2_pgt(iou)
if pgt.startswith('PGT'):
- print 'PASS: Got PGT - %s' % pgt
+ print('PASS: Got PGT - %s' % pgt)
else:
- print pgt
+ print(pgt)
pt = self.proxy3_pt(pgt)
if pt.startswith('PT'):
- print 'PASS: Got PT - %s' % pt
+ print('PASS: Got PT - %s' % pt)
else:
- print pt
+ print(pt)
# NB: Dont logout proxy app, but test proxy auth with new openers
# for the tests to be valid...
- print ''
- print 'Test SSO server login with proxy ticket'
- print '---------------------------------------'
+ print('')
+ print('Test SSO server login with proxy ticket')
+ print('---------------------------------------')
proxy = self.proxy4_login(pt)
if proxy:
- print 'PASS: Got Success response for app %s using proxy %s' % (self.urls['app'], proxy)
+ print('PASS: Got Success response for app %s using proxy %s' % (self.urls['app'], proxy))
else:
- print 'FAIL: The proxy login to %s via %s has failed' % (self.urls['app'], self.urls['proxy'])
+ print('FAIL: The proxy login to %s via %s has failed' % (self.urls['app'], self.urls['proxy']))
- print ''
- print 'Test direct proxy login'
- print '-----------------------'
+ print('')
+ print('Test direct proxy login')
+ print('-----------------------')
new_pt = self.proxy3_pt(pgt)
self.proxy5_login(new_pt)
return
@@ -189,12 +190,12 @@ def login(self):
token = self.get_token(url)
if token:
if token.startswith('FAIL'):
- print token
+ print(token)
return ticket
else:
self.auth[TOKEN] = token
else:
- print 'FAIL: CSRF Token could not be found on page'
+ print('FAIL: CSRF Token could not be found on page')
return ticket
self.auth['service'] = self.urls['app']
data = urllib.urlencode(self.auth)
@@ -204,9 +205,9 @@ def login(self):
sso_resp.close()
if found:
ticket = self.get_ticket(sso_page, self.urls['app'])
- print 'PASS: CAS logged in to %s' % url
+ print('PASS: CAS logged in to %s' % url)
else:
- print 'FAIL: Couldnt login to %s' % url
+ print('FAIL: Couldnt login to %s' % url)
return ticket
def logout(self):
@@ -215,7 +216,7 @@ def logout(self):
app_resp = self.opener.open(url)
app_resp.close()
self.cj.clear()
- print 'Logged out'
+ print('Logged out')
return
def get_restricted(self, ticket='', opener=None, print_page=False):
@@ -230,19 +231,19 @@ def get_restricted(self, ticket='', opener=None, print_page=False):
app_resp = opener.open(url)
ok = app_resp.code == 200
except:
- print 'FAIL: couldnt log in to restricted app at %s' % url
+ print('FAIL: couldnt log in to restricted app at %s' % url)
return
page = app_resp.read()
if ok:
token = self.get_token(page=page)
if token and not token.startswith('FAIL'):
- print 'FAIL: couldnt log in to restricted app at %s' % url
+ print('FAIL: couldnt log in to restricted app at %s' % url)
else:
- print 'PASS: logged in to restricted app at %s' % url
+ print('PASS: logged in to restricted app at %s' % url)
else:
- print 'FAIL: couldnt log in to restricted app at %s' % url
+ print('FAIL: couldnt log in to restricted app at %s' % url)
if print_page:
- print page
+ print(page)
app_resp.close()
def proxy1_iou(self):
@@ -298,8 +299,8 @@ def proxy3_pt(self, pgt):
'cas:proxyTicket'])
return pt_ticket
except:
- print url
- print page
+ print(url)
+ print(page)
return ''
return None
@@ -316,7 +317,7 @@ def proxy4_login(self, pt):
except:
return 'FAIL: PTURL=%s not found' % url
page = login.read()
- print page
+ print(page)
if page.find('cas:authenticationSuccess') > -1:
proxy = self.find_in_dom(page,['cas:proxies',
'cas:proxy'])
diff --git a/cas/tests/cas_tests.py.bak b/cas/tests/cas_tests.py.bak
new file mode 100755
index 000000000..d546786a6
--- /dev/null
+++ b/cas/tests/cas_tests.py.bak
@@ -0,0 +1,334 @@
+# Shell python script to test a live SSO set up - Ed Crewe 26 Nov 2010
+# It can be really fiddly testing out SSO proxy auth via typing in URLs etc
+# see Dave Spencer's guide at https://wiki.jasig.org/display/CAS/Proxy+CAS+Walkthrough
+# This does script does it for you against the deployed servers
+
+# Run via python 2.4 or above ...
+# python cas_tests.py [username]
+# You will need to edit the constants below to match your setup ...
+
+import unittest
+import sys
+import commands
+import getpass
+import urllib2
+import urllib
+from urlparse import urljoin
+import cookielib
+from xml.dom import minidom
+
+# Add in a separate test_config file if you wish of the following format
+try:
+ from test_config import *
+except:
+ # Please edit these urls to match your cas server, proxy and app server urls
+ CAS_SERVER_URL = 'https://signin.k-state.edu/WebISO/login'
+ APP_URL = 'http://webdev.labs.ome.ksu.edu/'
+ APP_RESTRICTED = 'connect'
+ PROXY_URL = 'https://webdev.labs.ome.ksu.edu/accounts/login/casProxyCallback/'
+ # Depending on your cas login form you may need to adjust these field name keys
+ TOKEN = '_eventID' # CSRF token field name
+ CAS_SUCCESS = 'Login successful' # CAS server successful login flag (find string in html page)
+ AUTH = {'username' : 'garrett', # user field name
+ 'password' : 'password', # password field name
+ 'submit' : 'submit' # login submit button
+ }
+ SCRIPT = 'manage.py shell --plain < get_pgt.py' # A script to extract the PGT from your proxying server
+
+class TestCAS(unittest.TestCase):
+ """ A class for testing a CAS setup both for standard and proxy authentication """
+
+ opener = None
+ auth = {}
+ urls = {}
+
+ def setUp(self):
+ self.cj = cookielib.CookieJar()
+ opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
+ urllib2.install_opener(opener)
+ self.opener = opener
+ self.get_auth()
+ self.set_url('cas', CAS_SERVER_URL)
+ self.set_url('app', APP_URL)
+ self.set_url('proxy', PROXY_URL)
+
+ def set_url(self, name, url):
+ """ Make sure valid url with query string appended """
+ for end in ['/','.html','.htm']:
+ if url.endswith(end):
+ self.urls[name] = url
+ return
+ self.urls[name] = '%s/' % url
+
+ def test_cas(self):
+ """ Test ordinary and proxy CAS login
+ NB cant put these into separate tests since tickets
+ are required to be passed between tests
+ """
+ print 'Testing with following URLs'
+ print '---------------------------'
+ print 'CAS server = %s' % self.urls['cas']
+ print 'Application server = %s' % self.urls['app']
+ print 'Proxy CAS server = %s' % self.urls['proxy']
+ print ''
+ print 'Test ordinary CAS login'
+ print '-----------------------'
+ self.ticket = self.login()
+ self.get_restricted(opener=self.opener)
+ self.logout()
+
+ print ''
+ print 'Test get proxy ticket'
+ print '---------------------'
+ self.ticket = self.login()
+ iou = self.proxy1_iou()
+ if iou.startswith('PGT'):
+ print 'PASS: Got IOU - %s for %s' % (iou, self.urls['proxy'])
+ else:
+ print iou
+
+ pgt = self.proxy2_pgt(iou)
+ if pgt.startswith('PGT'):
+ print 'PASS: Got PGT - %s' % pgt
+ else:
+ print pgt
+
+ pt = self.proxy3_pt(pgt)
+ if pt.startswith('PT'):
+ print 'PASS: Got PT - %s' % pt
+ else:
+ print pt
+
+ # NB: Dont logout proxy app, but test proxy auth with new openers
+ # for the tests to be valid...
+
+ print ''
+ print 'Test SSO server login with proxy ticket'
+ print '---------------------------------------'
+ proxy = self.proxy4_login(pt)
+ if proxy:
+ print 'PASS: Got Success response for app %s using proxy %s' % (self.urls['app'], proxy)
+ else:
+ print 'FAIL: The proxy login to %s via %s has failed' % (self.urls['app'], self.urls['proxy'])
+
+ print ''
+ print 'Test direct proxy login'
+ print '-----------------------'
+ new_pt = self.proxy3_pt(pgt)
+ self.proxy5_login(new_pt)
+ return
+
+
+ def get_auth(self):
+ """ Get authentication by passing to this script on the command line """
+ if len(sys.argv) > 1:
+ self.auth['username'] = sys.argv[1]
+ else:
+ self.auth['username'] = getpass.getuser()
+ self.auth['password'] = getpass.getpass('CAS Password for user %s:' % AUTH['username'])
+ return
+
+ def get_token(self, url=None, token=TOKEN, page=''):
+ """ Get CSRF token """
+ if url:
+ try:
+ r = self.opener.open(url)
+ except:
+ return 'FAIL: URL not found %s' % url
+ page = r.read()
+ if not page:
+ return 'FAIL: Page is empty'
+ starts = ['-1:
+ start += point
+ else:
+ return "FAIL: Couldnt find '%s' in page" % part
+ pagepart = pagepart[start:]
+ start = start + len(part)
+ end = page[start:].find(stop)
+ if end == -1:
+ end = len(page[start:])
+ end = start + end
+ found = page[start:end]
+ return found.strip()
+
+ def login(self):
+ """ Login to CAS server """
+ url = '%slogin?service=%s' % (self.urls['cas'], self.urls['app'])
+ ticket = ''
+ token = self.get_token(url)
+ if token:
+ if token.startswith('FAIL'):
+ print token
+ return ticket
+ else:
+ self.auth[TOKEN] = token
+ else:
+ print 'FAIL: CSRF Token could not be found on page'
+ return ticket
+ self.auth['service'] = self.urls['app']
+ data = urllib.urlencode(self.auth)
+ sso_resp = self.opener.open(url, data)
+ sso_page = sso_resp.read()
+ found = sso_page.find(CAS_SUCCESS) > -1
+ sso_resp.close()
+ if found:
+ ticket = self.get_ticket(sso_page, self.urls['app'])
+ print 'PASS: CAS logged in to %s' % url
+ else:
+ print 'FAIL: Couldnt login to %s' % url
+ return ticket
+
+ def logout(self):
+ """ Logout inbetween tests """
+ url = '%slogout' % self.urls['cas']
+ app_resp = self.opener.open(url)
+ app_resp.close()
+ self.cj.clear()
+ print 'Logged out'
+ return
+
+ def get_restricted(self, ticket='', opener=None, print_page=False):
+ """ Access a restricted URL and see if its accessible
+ Use token to check if this page has redirected to SSO login
+ ie. success for get_token is a fail for get restricted
+ """
+ url = '%s%s' % (self.urls['app'], APP_RESTRICTED)
+ if ticket:
+ url = '%s?ticket=%s' % (url, ticket)
+ try:
+ app_resp = opener.open(url)
+ ok = app_resp.code == 200
+ except:
+ print 'FAIL: couldnt log in to restricted app at %s' % url
+ return
+ page = app_resp.read()
+ if ok:
+ token = self.get_token(page=page)
+ if token and not token.startswith('FAIL'):
+ print 'FAIL: couldnt log in to restricted app at %s' % url
+ else:
+ print 'PASS: logged in to restricted app at %s' % url
+ else:
+ print 'FAIL: couldnt log in to restricted app at %s' % url
+ if print_page:
+ print page
+ app_resp.close()
+
+ def proxy1_iou(self):
+ """ Use login ticket to get proxy iou
+ NB: SSO server installation may require self.urls['proxy']/?pgtIou be called at the root
+ """
+ url_args = (self.urls['cas'], self.ticket, self.urls['app'], self.urls['proxy'])
+ url = '%sserviceValidate?ticket=%s&service=%s&pgtUrl=%s' % url_args
+ try:
+ iou = self.opener.open(url)
+ except:
+ return 'FAIL: service validate url=%s not found' % url
+ page = iou.read()
+ if page.find('cas:authenticationSuccess') > -1:
+ iou_ticket = self.find_in_dom(page,['cas:serviceResponse',
+ 'cas:authenticationSuccess',
+ 'cas:proxyGrantingTicket'])
+ if iou_ticket:
+ return iou_ticket
+ else:
+ if page:
+ return "FAIL: NO PGIOU\n\n%s" % page
+ else:
+ return 'FAIL: PGIOU Empty response from %s' % url
+ else:
+ return 'FAIL: PGIOU Response failed authentication'
+ return None
+
+ def proxy2_pgt(self, iou):
+ """ Dig out the proxy granting ticket using shell script so this test class
+ is independent of CAS implementation - eg. can substitute this function
+ to get proxy ticket from Java CAS instead of django-cas for example
+
+ For a django-cas implementation this can be read from the ORM
+ by calling the django shell environment
+ """
+ out = commands.getoutput(SCRIPT)
+ pgt = self.find_in_page(out, ['PGT',], ' ')
+ return 'PGT%s' % pgt
+
+ def proxy3_pt(self, pgt):
+ """ Use granting ticket to get proxy """
+ url_args = (self.urls['cas'], self.urls['app'], pgt)
+ url = '%sproxy?targetService=%s&pgt=%s' % url_args
+ try:
+ pt = self.opener.open(url)
+ except:
+ return 'FAIL: PTURL=%s not found' % url
+ page = pt.read()
+ if page.find('cas:serviceResponse') > -1:
+ try:
+ pt_ticket = self.find_in_dom(page,['cas:proxySuccess',
+ 'cas:proxyTicket'])
+ return pt_ticket
+ except:
+ print url
+ print page
+ return ''
+ return None
+
+
+ def proxy4_login(self, pt):
+ """ Check proxy ticket for service
+ Use a new opener so its not got any cookies / auth already
+ """
+ opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.CookieJar()))
+ url_args = (self.urls['cas'], self.urls['app'], pt)
+ url = '%sproxyValidate?service=%s&ticket=%s' % url_args
+ try:
+ login = opener.open(url)
+ except:
+ return 'FAIL: PTURL=%s not found' % url
+ page = login.read()
+ print page
+ if page.find('cas:authenticationSuccess') > -1:
+ proxy = self.find_in_dom(page,['cas:proxies',
+ 'cas:proxy'])
+ return proxy
+ return None
+
+ def proxy5_login(self, pt):
+ """ Use proxy ticket to login directly to app
+ Use a new opener so its not got any cookies / auth already
+ """
+ opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.CookieJar()))
+ return self.get_restricted(ticket=pt, opener=opener)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/cas/tests/get_pgt.py b/cas/tests/get_pgt.py
index a0e88280b..ef0732413 100755
--- a/cas/tests/get_pgt.py
+++ b/cas/tests/get_pgt.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
# Run via bin/django shell --plain < get_pgt.py
# to pick up all the django environment
# Allows main test class to be independent of CAS implementation platform
@@ -9,8 +10,8 @@
def lookup_pgt():
pgt = PgtIOU.objects.latest('created')
if pgt:
- print pgt.tgt
+ print(pgt.tgt)
else:
- print 'FAIL'
+ print('FAIL')
diff --git a/cas/tests/get_pgt.py.bak b/cas/tests/get_pgt.py.bak
new file mode 100755
index 000000000..a0e88280b
--- /dev/null
+++ b/cas/tests/get_pgt.py.bak
@@ -0,0 +1,16 @@
+# Run via bin/django shell --plain < get_pgt.py
+# to pick up all the django environment
+# Allows main test class to be independent of CAS implementation platform
+# TODO: pass in iou - if cant take args write to file and read here
+import atexit
+from cas.models import PgtIOU
+
+@atexit.register
+def lookup_pgt():
+ pgt = PgtIOU.objects.latest('created')
+ if pgt:
+ print pgt.tgt
+ else:
+ print 'FAIL'
+
+
diff --git a/cas/utils.py b/cas/utils.py
index 3863a47a1..fc18765d2 100755
--- a/cas/utils.py
+++ b/cas/utils.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
def cas_response_callbacks(tree):
from django.conf import settings
callbacks = []
@@ -8,9 +9,9 @@ def cas_response_callbacks(tree):
try:
mod = __import__(module, fromlist=[''])
except ImportError as e:
- print "Import Error: %s" % e
+ print("Import Error: %s" % e)
try:
func = getattr(mod, callback)
- except AttributeError, e:
- print "Attribute Error: %s" % e
+ except AttributeError as e:
+ print("Attribute Error: %s" % e)
func(tree)
\ No newline at end of file
diff --git a/cas/utils.py.bak b/cas/utils.py.bak
new file mode 100755
index 000000000..3863a47a1
--- /dev/null
+++ b/cas/utils.py.bak
@@ -0,0 +1,16 @@
+def cas_response_callbacks(tree):
+ from django.conf import settings
+ callbacks = []
+ callbacks.extend(settings.CAS_RESPONSE_CALLBACKS)
+ for path in callbacks:
+ i = path.rfind('.')
+ module, callback = path[:i], path[i+1:]
+ try:
+ mod = __import__(module, fromlist=[''])
+ except ImportError as e:
+ print "Import Error: %s" % e
+ try:
+ func = getattr(mod, callback)
+ except AttributeError, e:
+ print "Attribute Error: %s" % e
+ func(tree)
\ No newline at end of file
diff --git a/course_selection/cos333_scraper.py b/course_selection/cos333_scraper.py
index bd6d1d8ea..a846904b3 100644
--- a/course_selection/cos333_scraper.py
+++ b/course_selection/cos333_scraper.py
@@ -13,6 +13,7 @@
Useful functions are scrape_page() and scrape_all().
"""
+from __future__ import print_function
from datetime import datetime
import json
@@ -184,8 +185,8 @@ def scrape_all():
for course in scrape_all():
if first:
first = False
- print '['
+ print('[')
else:
- print ','
+ print(',')
json.dump(course, sys.stdout)
- print ']'
+ print(']')
diff --git a/course_selection/cos333_scraper.py.bak b/course_selection/cos333_scraper.py.bak
new file mode 100644
index 000000000..bd6d1d8ea
--- /dev/null
+++ b/course_selection/cos333_scraper.py.bak
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+"""
+Python routines for scraping data from Princeton's registrar.
+by Alex Ogier '13.
+
+Kept limping along by Brian Kernighan, with bandaids every year
+as the registrar makes format changes.
+
+If run as a python script, the module will dump information on all the courses available
+on the registrar website as a JSON format.
+
+Check out LIST_URL to adjust what courses are scraped.
+
+Useful functions are scrape_page() and scrape_all().
+"""
+
+from datetime import datetime
+import json
+import re
+import string
+import sqlite3
+import sys
+import urllib2
+from bs4 import BeautifulSoup
+
+TERM_CODE = 1122 # seems to be fall 11-12
+TERM_CODE = 1124 # so 1124 would be spring 11-12
+ # 1134 is definitely spring 13 (course offerings link)`
+TERM_CODE = 1134
+TERM_CODE = 1142 # fall 2013; spring 2014 will be 1144
+TERM_CODE = 1144 # spring 2014
+TERM_CODE = 1154 # spring 2015
+
+URL_PREFIX = "http://registrar.princeton.edu/course-offerings/"
+LIST_URL = URL_PREFIX + "search_results.xml?term={term}"
+COURSE_URL = URL_PREFIX + "course_details.xml?courseid={courseid}&term={term}"
+
+COURSE_URL_REGEX = re.compile(r'courseid=(?P\d+)')
+PROF_URL_REGEX = re.compile(r'dirinfo\.xml\?uid=(?P\d+)')
+LISTING_REGEX = re.compile(r'(?P[A-Z]{3})\s+(?P\d{3})')
+
+def get_course_list(search_page):
+ "Grep through the document for a list of course ids."
+ soup = BeautifulSoup(search_page)
+ links = soup('a', href=COURSE_URL_REGEX)
+ courseids = [COURSE_URL_REGEX.search(a['href']).group('id') for a in links]
+ return courseids
+
+def clean(str):
+ "Return a string with leading and trailing whitespace gone and all other whitespace condensed to a single space."
+ return re.sub('\s+', ' ', str.strip())
+
+def get_course_details(soup):
+ "Returns a dict of {courseid, area, title, descrip, prereqs}."
+
+# print "DEBUG"
+# s = soup('h2')
+# print "s = "
+# print s
+# s1 = s[0].string
+# print "s1 = "
+# print s1
+# print "END DEBUG"
+
+ area = clean(soup('strong')[1].findAllNext(text=True)[1]) # balanced on a pinhead
+ if re.match(r'^\((LA|SA|HA|EM|EC|QR|STN|STL)\)$', area):
+ area = area[1:-1]
+ else:
+ area = ''
+
+ match = re.match(r'\(([A-Z]+)\)', clean(soup('strong')[1].findNext(text=True)))
+ pretitle = soup.find(text="Prerequisites and Restrictions:")
+ descrdiv = soup.find('div', id='descr')
+ return {
+ 'courseid': COURSE_URL_REGEX.search(soup.find('a', href=COURSE_URL_REGEX)['href']).group('id'),
+ 'area': area, #bwk: this was wrong[1:-1], # trim parens # match.group(1) if match != None else ''
+ 'title': clean(soup('h2')[0].string), # was [1]
+ ###'descrip': clean(descrdiv.contents[0] if descrdiv else ''),
+ 'descrip': clean(flatten(descrdiv)),
+ 'prereqs': clean(pretitle.parent.findNextSibling(text=True)) if pretitle != None else ''
+ }
+
+def flatten(dd):
+ s = ""
+ try:
+ for i in dd.contents:
+ try:
+ s += i
+ except:
+ s += flatten(i)
+ except:
+ s += "oh, dear"
+ return s
+
+def get_course_listings(soup):
+ "Return a list of {dept, number} dicts under which the course is listed."
+ listings = soup('strong')[1].string
+ return [{'dept': match.group('dept'), 'number': match.group('num')} for match in LISTING_REGEX.finditer(listings)]
+
+def get_course_profs(soup):
+ "Return a list of {uid, name} dicts for the professors teaching this course."
+ prof_links = soup('a', href=PROF_URL_REGEX)
+ return [{'uid': PROF_URL_REGEX.search(link['href']).group('id'), 'name': clean(link.string)} for link in prof_links]
+
+def get_single_class(row):
+ "Helper function to turn table rows into class tuples."
+ cells = row('td')
+ time = cells[2].string.split("-")
+ bldg_link = cells[4].strong.a
+
+ # | Enrolled:0
+ # Limit:11 |
+ enroll = ''
+ limit = ''
+ if cells[5] != None: # bwk
+ enroll = cells[5].strong.nextSibling.string.strip()
+
+ limit = cells[5].strong.nextSibling.nextSibling.nextSibling
+ if limit != None:
+ limit = limit.string.strip()
+ else:
+ limit = "0"
+
+ return {
+ 'classnum': cells[0].strong.string,
+ 'section': cells[1].strong.string,
+ 'days': re.sub(r'\s+', '', cells[3].strong.string),
+ 'starttime': time[0].strip(),
+ 'endtime': time[1].strip(),
+ 'bldg': bldg_link.string.strip(),
+ 'roomnum': bldg_link.nextSibling.string.replace(' ', ' ').strip(),
+ 'enroll': enroll, # bwk
+ 'limit': limit #bwk
+ }
+
+def get_course_classes(soup):
+ "Return a list of {classnum, days, starttime, endtime, bldg, roomnum} dicts for classes in this course."
+ class_rows = soup('tr')[1:] # the first row is actually just column headings
+ # This next bit tends to cause problems because the registrar includes precepts and canceled
+ # classes. Having text in both 1st and 4th columns (class number and day of the week)
+ # currently indicates a valid class.
+ return [get_single_class(row) for row in class_rows if row('td')[0].strong and row('td')[3].strong.string]
+
+def scrape_page(page):
+ "Returns a dict containing as much course info as possible from the HTML contained in page."
+ soup = BeautifulSoup(page).find('div', id='timetable') # was contentcontainer
+ course = get_course_details(soup)
+ course['listings'] = get_course_listings(soup)
+ course['profs'] = get_course_profs(soup)
+ course['classes'] = get_course_classes(soup)
+ return course
+
+def scrape_id(id):
+ page = urllib2.urlopen(COURSE_URL.format(term=TERM_CODE, courseid=id))
+ return scrape_page(page)
+
+def scrape_all():
+ """
+ Return an iterator over all courses listed on the registrar's site.
+
+ Which courses are retrieved are governed by the globals at the top of this module,
+ most importantly LIST_URL and TERM_CODE.
+
+ To be robust in case the registrar breaks a small subset of courses, we trap
+ all exceptions and log them to stdout so that the rest of the program can continue.
+ """
+ search_page = urllib2.urlopen(LIST_URL.format(term=TERM_CODE))
+ courseids = get_course_list(search_page)
+
+ n = 0
+ for id in courseids:
+ try:
+ if n > 99999:
+ return
+ n += 1
+ yield scrape_id(id)
+ except Exception:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.write('Error processing course id {0}\n'.format(id))
+
+if __name__ == "__main__":
+ first = True
+ for course in scrape_all():
+ if first:
+ first = False
+ print '['
+ else:
+ print ','
+ json.dump(course, sys.stdout)
+ print ']'
diff --git a/course_selection/models.py b/course_selection/models.py
index c4f476de5..08b6e6fd9 100644
--- a/course_selection/models.py
+++ b/course_selection/models.py
@@ -197,10 +197,10 @@ def make_new_nice_user(sender, instance, created, **kwargs):
if created:
try:
nice_user.save()
- except Exception, e:
+ except Exception as e:
if settings.DEBUG:
raise e
- except Exception, e:
+ except Exception as e:
if settings.DEBUG:
raise e
diff --git a/course_selection/models.py.bak b/course_selection/models.py.bak
new file mode 100644
index 000000000..c4f476de5
--- /dev/null
+++ b/course_selection/models.py.bak
@@ -0,0 +1,228 @@
+from django.db import models
+from django.db.models.signals import post_save
+from django.contrib.auth.models import AbstractBaseUser, User
+import settings.prod as settings
+import uuid
+
+
+class Semester(models.Model):
+ # fields
+ start_date = models.DateField()
+ end_date = models.DateField()
+ """ term_code = 1xxy, where xx is the year in which the school year ends,
+ and y is the semester code. y = 2 for the fall term, y = 4 for the spring
+ Example:
+ 1144 = 1314Spring
+ 1132 = 1213Fall
+ """
+ term_code = models.CharField(
+ max_length=4, default=settings.CURR_TERM, db_index=True, unique=True)
+
+ def __unicode__(self):
+ end_year = int(self.term_code[1:3])
+ start_year = end_year - 1
+ if int(self.term_code[3]) == 2:
+ sem = 'Fall'
+ else:
+ sem = 'Spring'
+ return str(start_year) + "-" + str(end_year) + " " + sem
+
+
+class Professor(models.Model):
+ name = models.CharField(max_length=100)
+
+
+class Color_Palette(models.Model):
+ DEFAULT_ID = 1
+
+ """
+ these are hex values of colors
+ """
+ light = models.CharField(max_length=7, default="#FFFFFF")
+ dark = models.CharField(max_length=7, default="#000000")
+
+ def __unicode__(self):
+ return "light: " + self.light + '\n' + "dark: " + self.dark
+
+
+class Course(models.Model):
+ # relationships
+ semester = models.ForeignKey(Semester)
+ professors = models.ManyToManyField(Professor)
+
+ # fields
+ title = models.TextField()
+ rating = models.FloatField(default=0)
+ description = models.TextField()
+ registrar_id = models.CharField(max_length=20)
+
+ def course_listings(self):
+ # + ' ' + ': ' + self.title
+ return " / ".join([unicode(course_listing) for course_listing in self.course_listing_set.all().order_by('dept')])
+
+ course_listings.admin_order_field = 'course_listings'
+
+ def primary_listing(self):
+ """
+ Returns the best course department and number string.
+ """
+ return unicode(self.course_listing_set.all().get(is_primary=True))
+
+ def __unicode__(self):
+ # + ' ' + ': ' + self.title
+ return " / ".join([unicode(course_listing) for course_listing in self.course_listing_set.all().order_by('dept')])
+
+ class Meta:
+ pass
+ # ordering = ['semester', 'course_listings']
+
+
+class Section(models.Model):
+ # Types
+ TYPE_CLASS = "CLA"
+ TYPE_DRILL = "DRI"
+ TYPE_EAR = "EAR"
+ TYPE_FILM = "FIL"
+ TYPE_LAB = "LAB"
+ TYPE_LECTURE = "LEC"
+ TYPE_PRECEPT = "PRE"
+ TYPE_SEMINAR = "SEM"
+ TYPE_STUDIO = "STU"
+
+ TYPE_CHOICES = (
+ (TYPE_CLASS, "class"),
+ (TYPE_DRILL, "drill"),
+ (TYPE_EAR, "ear training"),
+ (TYPE_FILM, "film"),
+ (TYPE_LAB, "lab"),
+ (TYPE_LECTURE, "lecture"),
+ (TYPE_PRECEPT, "precept"),
+ (TYPE_SEMINAR, "seminar"),
+ (TYPE_STUDIO, "studio")
+ )
+
+ # relationships
+ course = models.ForeignKey(Course, related_name="sections")
+
+ # fields
+ name = models.CharField(max_length=100, default='')
+
+ """ if true, then everyone in the course is automatically enrolled in this section """
+ isDefault = models.BooleanField(default=False)
+ section_type = models.CharField(max_length=3, choices=TYPE_CHOICES)
+ section_enrollment = models.IntegerField(default=0)
+ section_capacity = models.IntegerField(default=999)
+ section_registrar_id = models.CharField(max_length=20, default="")
+
+ def __unicode__(self):
+ return self.course.primary_listing() + ' - ' + self.name
+
+ class Meta:
+ ordering = ['course', 'name']
+
+
+class Meeting(models.Model):
+ section = models.ForeignKey(Section, related_name="meetings")
+ start_time = models.CharField(max_length=20)
+ end_time = models.CharField(max_length=20)
+ days = models.CharField(max_length=10)
+ location = models.CharField(max_length=50)
+
+ def __unicode__(self):
+ return unicode(self.section) + ' - ' + self.location
+
+
+class Course_Listing(models.Model):
+ # TODO: this line causes admin site to fail, commenting out related_name
+ # causes tastypie to fail
+ course = models.ForeignKey(Course, related_name="course_listing_set")
+ # Even though the max_length should be 3~4, there are extreme cases.
+ dept = models.CharField(max_length=10)
+ number = models.CharField(max_length=10)
+ is_primary = models.BooleanField(default=False)
+
+ def __unicode__(self):
+ return self.dept + ' ' + self.number
+
+ class Meta:
+ ordering = ['dept', 'number']
+
+
+class Schedule(models.Model):
+ """
+ NOTE: ical_uuid must be migrated in a careful way to create unique field.
+ See https://github.com/django/django/commit/1f9e44030e9c5300b97ef7b029f482c53a66f13b and https://docs.djangoproject.com/en/1.9/howto/writing-migrations/#migrations-that-add-unique-fields
+ It is done in migrations 23-25. Note that you have to split data from schema migrations. So we do a schema migration, a data migration, and then a schema migration.
+ """
+ # relationships
+ semester = models.ForeignKey(Semester)
+ user = models.ForeignKey('Nice_User')
+
+ # fields
+ available_colors = models.TextField(null=True)
+ enrollments = models.TextField(null=True)
+ title = models.CharField(max_length=100, default="schedule")
+ # uuid.uuid4 generates a random UUID (Universally Unique ID)
+ ical_uuid = models.UUIDField(default=uuid.uuid4, unique=True)
+
+# class Enrollment(models.Model):
+# # each course enrollment has
+# # a course, a color, and a few sections
+# # and belongs to a schedule
+# course = models.ForeignKey(Course, related_name="enrollment")
+# sections = models.ManyToManyField(Section)
+# color = models.ForeignKey(Color_Palette)
+# schedule = models.ForeignKey(Schedule)
+
+
+class Nice_User(AbstractBaseUser):
+ netid = models.CharField(max_length=20, unique=True)
+ friends = models.ManyToManyField(
+ 'self', symmetrical=True, related_name='friends')
+ USERNAME_FIELD = 'netid'
+
+# create user profile as soon as a user is added
+
+
+def make_new_nice_user(sender, instance, created, **kwargs):
+ # see http://stackoverflow.com/a/965883/130164
+ # Use a try because the first user (super user) is created before other tables are created.
+ # That is, this fails during syncdb upon initial database setup, because
+ # it creates a superuser before User_Profile table is added (we add that
+ # by running migrations after).
+ try:
+ if created:
+ nice_user, created = Nice_User.objects.get_or_create(
+ netid=instance.username)
+ if created:
+ try:
+ nice_user.save()
+ except Exception, e:
+ if settings.DEBUG:
+ raise e
+ except Exception, e:
+ if settings.DEBUG:
+ raise e
+
+post_save.connect(make_new_nice_user, sender=User)
+
+
+class Friend_Request(models.Model):
+ from_user = models.ForeignKey(Nice_User, related_name='from_users')
+ to_user = models.ForeignKey(Nice_User, related_name='to_users')
+
+ class Meta:
+ unique_together = ('from_user', 'to_user')
+
+
+class NetID_Name_Table(models.Model):
+ """ table for netid--name lookups """
+ netid = models.CharField(max_length=100, primary_key=True)
+ first_name = models.CharField(max_length=100)
+ last_name = models.CharField(max_length=100)
+
+ def __unicode__(self):
+ if self.first_name and self.last_name:
+ return '%s %s' % (self.first_name, self.last_name)
+ else:
+ return self.netid
diff --git a/course_selection/models_invariants.py b/course_selection/models_invariants.py
index 5098be2bb..c1879a3b4 100644
--- a/course_selection/models_invariants.py
+++ b/course_selection/models_invariants.py
@@ -1,5 +1,6 @@
+from __future__ import absolute_import
def check_schedule_invariants(schedule):
- from models import Course
+ from .models import Course
def has_user(schedule):
return schedule.user is not None
diff --git a/course_selection/models_invariants.py.bak b/course_selection/models_invariants.py.bak
new file mode 100644
index 000000000..5098be2bb
--- /dev/null
+++ b/course_selection/models_invariants.py.bak
@@ -0,0 +1,37 @@
+def check_schedule_invariants(schedule):
+ from models import Course
+
+ def has_user(schedule):
+ return schedule.user is not None
+
+ def has_available_colors(schedule):
+ return schedule.available_colors is not None
+
+ def has_enrollments(schedule):
+ return schedule.enrollments is not None
+
+ def enrolled_courses_exist(schedule):
+ import json
+ try:
+ enrollments = json.parse(schedule.enrollments)
+
+ def course_exists(course):
+ course_id = course["course_id"]
+ try:
+ Course.objects.get(id=course_id)
+ return True
+ except Course.DoesNotExist:
+ return False
+ results = [course_exists(course) for course in enrollments]
+ return reduce(lambda x, y: x and y, results, initializer=True)
+ except:
+ return False
+
+ check_invariants = [
+ has_user,
+ has_available_colors,
+ has_enrollments,
+ enrolled_courses_exist,
+ ]
+ results = [check(schedule) for check in check_invariants]
+ return reduce(lambda x, y: x and y, results, initializer=True)
diff --git a/course_selection/names.py b/course_selection/names.py
index d2014f11d..e4eca579e 100644
--- a/course_selection/names.py
+++ b/course_selection/names.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
import os
import course_selection
# TODO remove import *
@@ -7,7 +8,7 @@
def construct_netid_map():
path = os.path.dirname(course_selection.__file__)
source_name = path + '/netids.txt'
- print 'reading from ' + source_name + '...'
+ print('reading from ' + source_name + '...')
mapping_count = 0
user_count = 0
@@ -15,7 +16,7 @@ def construct_netid_map():
with open(source_name) as f:
content = f.read().splitlines()
except:
- print 'getnetids failed: could not open file'
+ print('getnetids failed: could not open file')
return
list_length = len(content)
@@ -39,9 +40,9 @@ def construct_netid_map():
if created:
user_count += 1
- print "number of netids found: " + str(list_length / 4)
- print "new netids added: " + str(mapping_count)
- print "new users added: " + str(user_count)
+ print("number of netids found: " + str(list_length / 4))
+ print("new netids added: " + str(mapping_count))
+ print("new users added: " + str(user_count))
def main(self):
diff --git a/course_selection/names.py.bak b/course_selection/names.py.bak
new file mode 100644
index 000000000..d2014f11d
--- /dev/null
+++ b/course_selection/names.py.bak
@@ -0,0 +1,48 @@
+import os
+import course_selection
+# TODO remove import *
+from course_selection.models import * # NOQA
+
+
+def construct_netid_map():
+ path = os.path.dirname(course_selection.__file__)
+ source_name = path + '/netids.txt'
+ print 'reading from ' + source_name + '...'
+
+ mapping_count = 0
+ user_count = 0
+ try:
+ with open(source_name) as f:
+ content = f.read().splitlines()
+ except:
+ print 'getnetids failed: could not open file'
+ return
+
+ list_length = len(content)
+ for x in xrange(0, list_length, 4):
+ person = content[x:x + 4]
+ first_name = person[0]
+ last_name = person[1]
+ netid = person[2].split('@')[0]
+ new_user, created = NetID_Name_Table.objects.get_or_create(
+ netid=netid,
+ first_name=first_name,
+ last_name=last_name
+ )
+ if created:
+ mapping_count += 1
+
+ nice_user, created = Nice_User.objects.get_or_create(
+ netid=netid
+ )
+
+ if created:
+ user_count += 1
+
+ print "number of netids found: " + str(list_length / 4)
+ print "new netids added: " + str(mapping_count)
+ print "new users added: " + str(user_count)
+
+
+def main(self):
+ construct_netid_map()
diff --git a/course_selection/pdf.py b/course_selection/pdf.py
index d08eacaba..9f2ba2ed7 100644
--- a/course_selection/pdf.py
+++ b/course_selection/pdf.py
@@ -85,7 +85,7 @@ def run_cmd(self, cmd, input_data):
return process.communicate(input=input_data)
else:
return process.communicate()
- except OSError, e:
+ except OSError as e:
return None, e
def set_pdftk_bin(self):
diff --git a/course_selection/pdf.py.bak b/course_selection/pdf.py.bak
new file mode 100644
index 000000000..d08eacaba
--- /dev/null
+++ b/course_selection/pdf.py.bak
@@ -0,0 +1,137 @@
+# retrieved from: https://gist.github.com/zyegfryed/918403, https://gist.github.com/grantmcconnaughey/ce90a689050c07c61c96
+# used for creating pdf files to be served using django
+
+# -*- coding: utf-8 -*-
+import codecs
+import subprocess
+from fdfgen import forge_fdf
+from django.core.exceptions import ImproperlyConfigured
+from django.template import engines
+from django.template.backends.base import BaseEngine
+from django.template.engine import Engine, _dirs_undefined
+
+
+class PdfTemplateError(Exception):
+ pass
+
+
+class PdftkEngine(BaseEngine):
+
+ # Going ahead and defining this, but really PDFs should still be placed
+ # in the templates directory of an app because the loader checks templates
+ app_dirname = 'pdfs'
+
+ def __init__(self, params):
+ params = params.copy()
+ options = params.pop('OPTIONS').copy()
+ super(PdftkEngine, self).__init__(params)
+ self.engine = self._Engine(self.dirs, self.app_dirs, **options)
+
+ def get_template(self, template_name, dirs=_dirs_undefined):
+ return PdfTemplate(self.engine.get_template(template_name, dirs))
+
+ class _Engine(Engine):
+
+ def make_origin(self, display_name, loader, name, dirs):
+ # Always return an Origin object, because PDFTemplate need it to
+ # render the PDF Form file.
+ from django.template.loader import LoaderOrigin
+ return LoaderOrigin(display_name, loader, name, dirs)
+
+
+class PdfTemplate(object):
+ pdftk_bin = None
+
+ def __init__(self, template):
+ self.template = template
+ self.set_pdftk_bin()
+
+ @property
+ def origin(self):
+ return self.template.origin
+
+ def render(self, context=None, request=None):
+ if context is None:
+ context = {}
+
+ context = context.items()
+ output, err = self.fill_form(context, self.origin.name)
+ if err:
+ raise PdfTemplateError(err)
+ return output
+
+ def fill_form(self, fields, src, pdftk_bin=None):
+ fdf_stream = forge_fdf(fdf_data_strings=fields)
+
+ cmd = [self.pdftk_bin, src, 'fill_form', '-', 'output', '-', 'flatten']
+ cmd = ' '.join(cmd)
+
+ return self.run_cmd(cmd, fdf_stream)
+
+ def dump_data_fields(self):
+ cmd = [self.pdftk_bin, self.origin.name, 'dump_data_fields']
+ cmd = ' '.join(cmd)
+
+ output, err = self.run_cmd(cmd, None)
+ if err:
+ raise PdfTemplateError(err)
+ return output
+
+ def run_cmd(self, cmd, input_data):
+ try:
+ process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, shell=True)
+ if input_data:
+ return process.communicate(input=input_data)
+ else:
+ return process.communicate()
+ except OSError, e:
+ return None, e
+
+ def set_pdftk_bin(self):
+ if self.pdftk_bin is None:
+ from django.conf import settings
+ if not hasattr(settings, 'PDFTK_BIN'):
+ msg = "PDF generation requires pdftk " \
+ "(http://www.pdflabs.com/tools/pdftk-the-pdf-toolkit). " \
+ "Edit your PDFTK_BIN settings accordingly."
+ raise ImproperlyConfigured(msg)
+ self.pdftk_bin = settings.PDFTK_BIN
+
+ return self.pdftk_bin
+
+ def version(self):
+ cmd = [self.pdftk_bin, '--version']
+ cmd = ' '.join(cmd)
+
+ output, err = self.run_cmd(cmd, None)
+ if err:
+ raise PdfTemplateError(err)
+ return output
+
+
+def get_template(template_name):
+ """
+ Returns a compiled Template object for the given template name,
+ handling template inheritance recursively.
+ """
+
+ def strict_errors(exception):
+ raise exception
+
+ def fake_strict_errors(exception):
+ return (u'', -1)
+
+ # Loading hacks
+ # Ignore UnicodeError, due to PDF file read
+ codecs.register_error('strict', fake_strict_errors)
+
+ if template_name.endswith('.pdf'):
+ template = engines['pdf'].get_template(template_name)
+ else:
+ template = engines['django'].get_template(template_name)
+
+ # Loading hacks
+ codecs.register_error('strict', strict_errors)
+
+ return template
diff --git a/course_selection/scrape_all.py b/course_selection/scrape_all.py
index ec2401de4..18276031a 100644
--- a/course_selection/scrape_all.py
+++ b/course_selection/scrape_all.py
@@ -1,7 +1,9 @@
+from __future__ import print_function
+from __future__ import absolute_import
from django.conf import settings
-from scrape_parse import scrape_parse_semester
-from scrape_validate import validate_course
-from scrape_import import scrape_import_course, ScrapeCounter
+from .scrape_parse import scrape_parse_semester
+from .scrape_validate import validate_course
+from .scrape_import import scrape_import_course, ScrapeCounter
def get_all_courses():
@@ -9,13 +11,13 @@ def get_all_courses():
term_codes = settings.ACTIVE_TERMS
for term_code in term_codes:
try:
- print "Scraping for semester " + str(term_code)
+ print("Scraping for semester " + str(term_code))
courses = scrape_parse_semester(term_code)
# just a sanity check in case we ever modify scrape_parse
[validate_course(x) for x in courses]
scrapeCounter = ScrapeCounter()
[scrape_import_course(x, scrapeCounter) for x in courses]
- print str(scrapeCounter)
- print "----------------------------------"
+ print(str(scrapeCounter))
+ print("----------------------------------")
except Exception as e:
raise e
diff --git a/course_selection/scrape_all.py.bak b/course_selection/scrape_all.py.bak
new file mode 100644
index 000000000..ec2401de4
--- /dev/null
+++ b/course_selection/scrape_all.py.bak
@@ -0,0 +1,21 @@
+from django.conf import settings
+from scrape_parse import scrape_parse_semester
+from scrape_validate import validate_course
+from scrape_import import scrape_import_course, ScrapeCounter
+
+
+def get_all_courses():
+ # we can generate these given settings.CURR_TERM
+ term_codes = settings.ACTIVE_TERMS
+ for term_code in term_codes:
+ try:
+ print "Scraping for semester " + str(term_code)
+ courses = scrape_parse_semester(term_code)
+ # just a sanity check in case we ever modify scrape_parse
+ [validate_course(x) for x in courses]
+ scrapeCounter = ScrapeCounter()
+ [scrape_import_course(x, scrapeCounter) for x in courses]
+ print str(scrapeCounter)
+ print "----------------------------------"
+ except Exception as e:
+ raise e
diff --git a/course_selection/scrape_import.py b/course_selection/scrape_import.py
index 7b263f1e5..9429439a6 100644
--- a/course_selection/scrape_import.py
+++ b/course_selection/scrape_import.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
class ScrapeCounter:
def __init__(self):
@@ -26,10 +27,10 @@ def __str__(self):
def scrape_import_course(course, counter=ScrapeCounter()):
- from models import Course
+ from .models import Course
def import_section(section, course_object):
- from models import Section, Meeting
+ from .models import Section, Meeting
def import_meeting(meeting, course_object, section_object):
meeting_object, created = Meeting.objects.get_or_create(
@@ -61,7 +62,7 @@ def import_meeting(meeting, course_object, section_object):
return section_object
def import_professor(prof, course_object):
- from models import Professor
+ from .models import Professor
prof_object, created = Professor.objects.get_or_create(
name=prof['full_name']
)
@@ -73,7 +74,7 @@ def import_professor(prof, course_object):
return course_object
def import_listing(listing, course_object):
- from models import Course_Listing
+ from .models import Course_Listing
listing_object, created = Course_Listing.objects.get_or_create(
course=course_object,
dept=listing['dept'],
@@ -86,7 +87,7 @@ def import_listing(listing, course_object):
return listing_object
def import_semester(semester):
- from models import Semester
+ from .models import Semester
semester_object, created = Semester.objects.get_or_create(
start_date=semester['start_date'],
end_date=semester['end_date'],
diff --git a/course_selection/scrape_import.py.bak b/course_selection/scrape_import.py.bak
new file mode 100644
index 000000000..7b263f1e5
--- /dev/null
+++ b/course_selection/scrape_import.py.bak
@@ -0,0 +1,111 @@
+class ScrapeCounter:
+
+ def __init__(self):
+ self.totalCoursesCount = 0
+ self.createdCoursesCount = 0
+ self.totalSectionsCount = 0
+ self.createdSectionsCount = 0
+ self.totalMeetingsCount = 0
+ self.createdMeetingsCount = 0
+ self.totalProfessorsCount = 0
+ self.createdProfessorsCount = 0
+ self.totalListingsCount = 0
+ self.createdListingsCount = 0
+
+ def __str__(self):
+ return str(self.createdCoursesCount) + " new courses\n" + \
+ str(self.totalCoursesCount) + " total courses\n" + \
+ str(self.createdSectionsCount) + " new sections\n" + \
+ str(self.totalSectionsCount) + " total sections\n" + \
+ str(self.createdMeetingsCount) + " new meetings\n" + \
+ str(self.totalMeetingsCount) + " total meetings\n" + \
+ str(self.createdProfessorsCount) + " new professors\n" + \
+ str(self.totalProfessorsCount) + " total professors\n" + \
+ str(self.createdListingsCount) + " new listings\n" + \
+ str(self.totalListingsCount) + " total listings"
+
+
+def scrape_import_course(course, counter=ScrapeCounter()):
+ from models import Course
+
+ def import_section(section, course_object):
+ from models import Section, Meeting
+
+ def import_meeting(meeting, course_object, section_object):
+ meeting_object, created = Meeting.objects.get_or_create(
+ section=section_object,
+ start_time=meeting['start_time'],
+ end_time=meeting['end_time'],
+ days=meeting['days'],
+ location=meeting['location']
+ )
+ if created:
+ counter.createdMeetingsCount += 1
+ counter.totalMeetingsCount += 1
+ return meeting_object
+ section_object, created = Section.objects.get_or_create(
+ course=course_object,
+ name=section['name']
+ )
+ section_object.section_type = section['type']
+ section_object.section_capacity = section['capacity']
+ section_object.section_enrollment = section['enrollment']
+ section_object.section_registrar_id = section['registrar_id']
+ Meeting.objects.filter(section=section_object).delete()
+ [import_meeting(x, course_object, section_object)
+ for x in section['meetings']]
+ section_object.save()
+ if created:
+ counter.createdSectionsCount += 1
+ counter.totalSectionsCount += 1
+ return section_object
+
+ def import_professor(prof, course_object):
+ from models import Professor
+ prof_object, created = Professor.objects.get_or_create(
+ name=prof['full_name']
+ )
+ course_object.professors.add(prof_object)
+ prof_object.save()
+ if created:
+ counter.createdProfessorsCount += 1
+ counter.totalProfessorsCount += 1
+ return course_object
+
+ def import_listing(listing, course_object):
+ from models import Course_Listing
+ listing_object, created = Course_Listing.objects.get_or_create(
+ course=course_object,
+ dept=listing['dept'],
+ number=listing['code'],
+ is_primary=listing['is_primary']
+ )
+ if created:
+ counter.createdListingsCount += 1
+ counter.totalListingsCount += 1
+ return listing_object
+
+ def import_semester(semester):
+ from models import Semester
+ semester_object, created = Semester.objects.get_or_create(
+ start_date=semester['start_date'],
+ end_date=semester['end_date'],
+ term_code=semester['term_code']
+ )
+ return semester_object
+
+ course_object, created = Course.objects.get_or_create(
+ registrar_id=course['guid'],
+ semester=import_semester(course['semester'])
+ )
+ course_object.title = course['title']
+ course_object.description = course['description']
+ [import_section(x, course_object) for x in course['sections']]
+ course_object.professors.clear()
+ [import_professor(x, course_object) for x in course['professors']]
+ [import_listing(x, course_object) for x in course['course_listings']]
+ course_object.save()
+ if created:
+ counter.createdCoursesCount += 1
+ counter.totalCoursesCount += 1
+ return counter
diff --git a/course_selection/views.py b/course_selection/views.py
index 44bff3cea..8e780ffbd 100644
--- a/course_selection/views.py
+++ b/course_selection/views.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseForbidden
from django.views.decorators.http import require_GET
@@ -14,7 +15,7 @@
import json
# TODO don't use import *
-from models import * # NOQA
+from .models import * # NOQA
# @ensure_csrf_cookie
@@ -215,7 +216,7 @@ def mobile_logged_in(request):
# course enrollment form generation
#############################################################################
-from pdf import get_template
+from .pdf import get_template
def get_worksheet_pdf(request, schedule_id, template_name='course_enrollment_worksheet.pdf', **kwargs):
diff --git a/course_selection/views.py.bak b/course_selection/views.py.bak
new file mode 100644
index 000000000..44bff3cea
--- /dev/null
+++ b/course_selection/views.py.bak
@@ -0,0 +1,489 @@
+from django.shortcuts import render, redirect
+from django.http import HttpResponse, HttpResponseNotFound, HttpResponseForbidden
+from django.views.decorators.http import require_GET
+from django.contrib.auth.decorators import login_required
+from django.db.models import Q
+from django.core.cache import caches
+from django.views.decorators.cache import cache_page, never_cache
+from django.core.urlresolvers import reverse
+# send regardless of whether Django thinks we should
+
+from view_cache_utils import cache_page_with_prefix
+import hashlib
+
+import json
+
+# TODO don't use import *
+from models import * # NOQA
+
+# @ensure_csrf_cookie
+
+
+def index(request):
+ """
+ Home page. Show landing page or course selection page, depending on user's state.
+ """
+ if not request.user.is_authenticated():
+ return redirect('landing')
+
+ return render(request, 'main/index.html', {
+ 'username': unicode(request.user.username)
+ })
+
+
+@login_required
+def course_evaluations(request, semester_id, course_id):
+ """
+ Course evaluations, an iFrame to registrar's
+ """
+ return render(request, 'main/course_evaluations.html', {
+ 'semester_id': semester_id,
+ 'course_id': course_id
+ })
+
+
+def landing(request):
+ """
+ Displays the landing page.
+ """
+ return render(request, 'landing/index.html', None)
+
+
+def status(request):
+ """
+ Displays the status page.
+ """
+ return render(request, 'status/index.html', None)
+
+
+def about(request):
+ """
+ Displays the about page.
+ """
+ return render(request, 'landing/about.html', None)
+
+
+def continuity_check(request):
+ """
+ This is a continuity check we can trigger that makes sure:
+ - number of schedules > 100
+ - number of users > 100
+ - number of classes > 100
+
+ """
+
+ if Schedule.objects.count() > 100 and Section.objects.count() > 100 and Nice_User.objects.count() > 100:
+ return HttpResponse("OK")
+ return HttpResponse("Alarm")
+
+
+def we_sorry(request):
+ """
+ Displays the sorry announcement page.
+ """
+ return render(request, 'announcements/we_sorry.html', None)
+
+
+def hydrate_meeting_dict(meeting):
+ return {
+ 'days': meeting.days,
+ 'start_time': meeting.start_time,
+ 'end_time': meeting.end_time,
+ 'location': meeting.location,
+ 'id': meeting.id
+ }
+
+
+def hydrate_section_dict(section, course):
+ meetings = [hydrate_meeting_dict(meeting)
+ for meeting in section.meetings.all()]
+ return {
+ 'id': section.id,
+ 'name': section.name,
+ 'section_type': section.section_type,
+ 'section_capacity': section.section_capacity,
+ 'section_enrollment': section.section_enrollment,
+ 'course': "/course_selection/api/v1/course/" + str(course.id) + "/",
+ 'meetings': meetings
+ }
+
+
+def hydrate_course_listing_dict(course_listing):
+ return {
+ 'dept': course_listing.dept,
+ 'number': course_listing.number,
+ 'is_primary': course_listing.is_primary,
+ }
+
+
+def hydrate_semester(semester):
+ return {
+ 'id': semester.id,
+ 'start_date': unicode(semester.start_date),
+ 'end_date': unicode(semester.end_date),
+ 'name': unicode(semester),
+ 'term_code': semester.term_code
+ }
+
+
+def hydrate_course_dict(course):
+ sections = [hydrate_section_dict(section, course)
+ for section in course.sections.all()]
+ course_listings = [hydrate_course_listing_dict(
+ cl) for cl in course.course_listing_set.all()]
+ return {
+ 'course_listings': course_listings,
+ 'description': course.description,
+ 'id': course.id,
+ 'registrar_id': course.registrar_id,
+ 'title': course.title,
+ 'sections': sections,
+ 'semester': hydrate_semester(course.semester),
+ }
+
+
+def get_courses_by_term_code(term_code):
+ filtered = Course.objects.filter(Q(semester__term_code=term_code))
+ return [hydrate_course_dict(c) for c in filtered]
+
+
+def hydrate_user_dict(user):
+ return {
+ 'id': user.id,
+ 'netid': user.netid
+ }
+
+
+@require_GET
+@cache_page(60 * 60 * 24)
+def get_users_json(request):
+ """
+ Returns list of all users
+ Cached for a day
+ """
+ results = [hydrate_user_dict(u) for u in Nice_User.objects.all()]
+ data = json.dumps(results)
+ return HttpResponse(data, 'application/json', status=200)
+
+
+@require_GET
+@cache_page(60 * 30, cache="courseapi") # cache for 30 minutes
+def get_courses_json(request, term_code):
+ """
+ Returns list of courses for a semester
+ Cached for a day by ?semester__term_code
+ """
+ data = caches['courses'].get(term_code)
+ if data is None:
+ results = get_courses_by_term_code(term_code)
+ data = json.dumps(results)
+ # add doesn't try to set if already exists (i.e. races)
+ caches['courses'].add(term_code, data)
+ return HttpResponse(data, 'application/json', status=200)
+
+
+@require_GET
+@cache_page_with_prefix(60 * 60 * 24, lambda request: hashlib.md5(request.GET.get('semester__term_code', '')).hexdigest())
+def get_courses_json_old(request):
+ """
+ Returns list of courses for a semester
+ Cached for a day by ?semester__term_code
+ """
+ term_code = request.GET.get('semester__term_code', '')
+ results = get_courses_by_term_code(term_code)
+ data = json.dumps(results)
+ return HttpResponse(data, 'application/json', status=200)
+
+
+@login_required
+def mobile_logged_in(request):
+ """Custom log in page handler for iOS app.
+
+ If logged in (i.e. on the way back from CAS), returns username.
+ The way to use this is to go to /login?next=mobile_logged_in
+
+ The mobile app detects that we're logged in as soon as the URL changes to /mobile_logged_in.
+ At that point it stops and grabs the username from the displayed page.
+
+ """
+ # TODO: retrieve Nice_User corresponding to request.user
+ nice_user = Nice_User.objects.get(netid=request.user.username)
+ return HttpResponse(request.user.username + " " + unicode(nice_user.id))
+
+
+#############################################################################
+# course enrollment form generation
+#############################################################################
+
+from pdf import get_template
+
+
+def get_worksheet_pdf(request, schedule_id, template_name='course_enrollment_worksheet.pdf', **kwargs):
+ """
+ returns a filled out course enrollment form
+ NOTE: use sp to check a checkbox
+ """
+
+ # get all the required fields:
+ # get user schedule, verify that user has permissions
+ #
+ # get each course, registrar id and times
+
+ #user = NetID_Name_Table.objects.get(Q(netid=request.user.username))
+ try:
+ schedule = Schedule.objects.get(Q(id=schedule_id))
+ assert schedule.user.netid == request.user.username
+ except:
+ return HttpResponseNotFound('Schedule Not Found
')
+
+ context = get_form_context(schedule)
+
+ # context = {
+ # 'class': '2016',
+ # 'terms': 'sp', # terms is the spring term
+ # 'first': 'first_name', #unicode(user.first_name),
+ # 'last': 'last_name' #unicode(user.last_name)
+ # }
+
+ response = HttpResponse(content_type='application/pdf')
+ response['Content-Disposition'] = \
+ 'inline; filename=course_enrollment_worksheet.pdf'
+
+ template = get_template(template_name)
+ response.write(template.render(context))
+
+ return response
+
+
+def get_form_context(schedule_obj):
+ import json
+
+ context = {}
+ context = fill_out_term(context, schedule_obj)
+ context = fill_out_acad(context, schedule_obj)
+
+ enrollments = json.loads(schedule_obj.enrollments)
+ for idx, enrollment in enumerate(enrollments):
+ # form indices start from 1, array indices start from 0
+ context = fill_out_course(context, idx + 1, enrollment)
+
+ return context
+
+
+def fill_out_term(context, schedule_obj):
+ if int(schedule_obj.semester.term_code[3]) == 2:
+ context['termf'] = 'sp'
+ else:
+ context['terms'] = 'sp'
+ return context
+
+
+def fill_out_acad(context, schedule_obj):
+ end_year = int(schedule_obj.semester.term_code[1:3])
+ start_year = end_year - 1
+ context['acad'] = unicode(start_year) + '-' + unicode(end_year)
+ return context
+
+
+def get_course_checkbox_val(idx):
+ if idx == 1:
+ return 'add1'
+ elif idx == 2:
+ return 'Yes'
+ else:
+ return 'sp'
+
+
+def fill_out_course(context, idx, enrollment):
+ checkbox_name = 'add' + str(idx)
+ course_name = 'crs' + str(idx)
+ checkbox_val = get_course_checkbox_val(idx)
+
+ course = Course.objects.get(id=enrollment['course_id'])
+ sections = [Section.objects.get(id=section_id)
+ for section_id in enrollment['sections']]
+
+ for section in sections:
+ meetings = section.meetings.all()
+
+ # TODO: test this
+ if section.section_type == Section.TYPE_LECTURE or \
+ section.section_type == Section.TYPE_SEMINAR or \
+ section.section_type == Section.TYPE_CLASS:
+ section_type = 'a'
+ elif section.section_type == Section.TYPE_LAB:
+ section_type = 'c'
+ else:
+ section_type = 'b'
+
+ # if there are no days, we assume the class doesn't have meetings
+ if len(meetings) > 0 and meetings[0].days:
+ daytm_field_name = 'daytm' + str(idx) + section_type
+ clsnbr_field_name = 'clsnbr' + str(idx) + section_type
+
+ context[daytm_field_name] = ' '.join([
+ meeting.days + meeting.start_time[:-3]
+ + " - " + meeting.end_time[:-3] for meeting in meetings
+ ])
+
+ context[clsnbr_field_name] = section.section_registrar_id
+
+ context[checkbox_name] = checkbox_val
+ context[course_name] = course.primary_listing()
+ return context
+
+
+# CALENDAR EXPORT
+from icalendar import Calendar, Event, vText, vRecur
+from datetime import datetime, timedelta
+from dateutil import parser as dt_parser
+import pytz
+import uuid
+
+
+@require_GET
+@never_cache
+def ical_feed(request, cal_id):
+ """
+ iCal feed
+ Kept up-to-date
+ Parameter: cal_id, which is a guid that is 1:1 with schedules in our database
+ """
+ cal = Calendar()
+ cal.add('prodid', '-//Recal Course Planner//recal.io//')
+ cal.add('version', '2.0')
+
+ try:
+ sched = Schedule.objects.get(Q(ical_uuid=uuid.UUID(cal_id)))
+ except Schedule.DoesNotExist:
+ return HttpResponseNotFound("Not Found")
+ semester = sched.semester
+
+ cal.add('X-WR-CALNAME', 'ReCal %s (%s)' %
+ (unicode(semester), sched.user.netid))
+ cal.add('X-WR-CALDESC', sched.title) # 'ReCal Schedule'
+ # https://msdn.microsoft.com/en-us/library/ee178699(v=exchg.80).aspx. 15
+ # minute updates.
+ cal.add('X-PUBLISHED-TTL', 'PT15M')
+
+ tz = pytz.timezone("US/Eastern") # pytz.utc
+ # recurrence
+ ical_days = {
+ 0: 'MO',
+ 1: 'TU',
+ 2: 'WE',
+ 3: 'TH',
+ 4: 'FR'
+ }
+ builtin_days = {
+ 'M': 0,
+ 'T': 1,
+ 'W': 2,
+ 'Th': 3,
+ 'F': 4
+ }
+
+ #data = [hydrate_course_dict(Course.objects.get(Q(id=course['course_id']))) for course in json.loads(sched.enrollments)]
+
+ # 0-6, monday is 0, sunday is 6. we will have values of 0 (Monday) or 2
+ # (Wednesday)
+ day_of_week_semester_start = semester.start_date.weekday()
+
+ for course_obj in json.loads(sched.enrollments):
+ # course = Course.objects.get(Q(id=course_obj['course_id'])) #
+ # course_obj is json object; course is model
+ for section_id in course_obj['sections']:
+ section = Section.objects.get(Q(pk=section_id))
+ for meeting in section.meetings.all():
+ event = Event()
+ event.add('summary', unicode(section)) # name of the event
+ event.add('location', vText(
+ meeting.location + ', Princeton, NJ'))
+
+ # compute first meeting date.
+ # days when the class meets. convert them to day difference
+ # relative to first date of the semester
+ # split by space. format: 0-4. monday is 0, friday is 4.
+ # matches python weekday() format.
+ daysofweek = [builtin_days[i] for i in meeting.days.split()]
+ if len(daysofweek) == 0:
+ # no meetings -- skip
+ continue
+ dayofweek_relative_to_semester_start = []
+ for dow in daysofweek:
+ diff = dow - day_of_week_semester_start
+ if diff < 0:
+ diff += 7 # add a week
+ dayofweek_relative_to_semester_start.append(diff)
+ # all must be positive
+ assert all(
+ [d >= 0 for d in dayofweek_relative_to_semester_start])
+ # a T,Th class will have first meeting on T if semester starts
+ # on M, or on Th if semester starts on Wed.
+ first_meeting_dayofweek = min(
+ dayofweek_relative_to_semester_start)
+
+ # get meeting time
+ # meeting.start_time, meeting.end_time examples: "03:20 PM",
+ # "10:00 AM"
+ start_time = dt_parser.parse(meeting.start_time)
+ end_time = dt_parser.parse(meeting.end_time)
+
+ # add event time.
+ event.add('dtstart', tz.localize(datetime(semester.start_date.year, semester.start_date.month, semester.start_date.day,
+ start_time.hour, start_time.minute, 0) + timedelta(days=first_meeting_dayofweek))) # year,month,day, hour,min,second in ET
+ event.add('dtend', tz.localize(datetime(semester.start_date.year, semester.start_date.month,
+ semester.start_date.day, end_time.hour, end_time.minute, 0) + timedelta(days=first_meeting_dayofweek)))
+ # "property specifies the DATE-TIME that iCalendar object was created". per 3.8.7.2 of RFC 5545, must be in UTC
+ event.add('dtstamp', tz.localize(datetime(semester.start_date.year,
+ semester.start_date.month, semester.start_date.day, 0, 0, 0)))
+
+ # recurring event config
+ # producing e.g.: RRULE:FREQ=WEEKLY;UNTIL=[LAST DAY OF SEMESTER
+ # + 1];WKST=SU;BYDAY=TU,TH
+ selected_days = [ical_days[i]
+ for i in sorted(daysofweek)] # formatted for ical
+ end_date = tz.localize(datetime(semester.end_date.year, semester.end_date.month,
+ semester.end_date.day, 0, 0, 0) + timedelta(days=1)) # [LAST DAY OF SEMESTER + 1]
+ event.add('rrule', vRecur(
+ {'FREQ': 'WEEKLY', 'UNTIL': end_date, 'WKST': 'SU', 'BYDAY': selected_days}))
+ cal.add_component(event)
+
+ ical = cal.to_ical()
+
+ # filter out blank lines
+ #filtered = filter(lambda x: not re.match(r'^\s*$', x), ical)
+ # print filtered
+ return HttpResponse(ical, 'text/calendar', status=200)
+
+
+@login_required
+def get_ical_url_for_schedule(request, schedule_id):
+ return get_ical_url(request, schedule_id, make_new=False)
+
+
+@login_required
+def regenerate_ical_url_for_schedule(request, schedule_id):
+ return get_ical_url(request, schedule_id, make_new=True)
+
+
+def get_ical_url(request, schedule_id, make_new=False):
+ """
+ Returns ical feed url for a particular schedule
+ Parameter: schedule_id
+ We look up the UUID that is 1:1 to this schedule. Each schedule has a UUID always (it is auto-created.)
+ If make_new, then we create a new UUID for the schedule.
+ Then we return the url with it
+ """
+ try:
+ schedule = Schedule.objects.get(Q(pk=schedule_id))
+ except Schedule.DoesNotExist:
+ return HttpResponseNotFound("Not Found")
+ # Confirm ownership
+ if schedule.user.netid != request.user.username:
+ return HttpResponseForbidden("Forbidden")
+
+ if make_new:
+ schedule.ical_uuid = uuid.uuid4()
+ schedule.save()
+ return HttpResponse(request.build_absolute_uri(reverse('ical-feed', args=(str(schedule.ical_uuid),))))
diff --git a/extern/cron_jobs/continuity.py b/extern/cron_jobs/continuity.py
index ca15a5c11..bc9ab31f0 100644
--- a/extern/cron_jobs/continuity.py
+++ b/extern/cron_jobs/continuity.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
# run continuity check
# email setup:
@@ -47,7 +48,7 @@ def sendEmail(base_url, tested_url, test_time_utc):
server.quit()
if VERBOSE:
- print 'testing', base_url, continuity_check_url
+ print('testing', base_url, continuity_check_url)
url = base_url + continuity_check_url
up = urllib.urlopen(url).read() == "OK"
if not up:
@@ -56,4 +57,4 @@ def sendEmail(base_url, tested_url, test_time_utc):
if not up: # if still not up
# message us
sendEmail(base_url, continuity_check_url, str(datetime.utcnow()))
- print 'email sent because failed', base_url, continuity_check_url
+ print('email sent because failed', base_url, continuity_check_url)
diff --git a/extern/cron_jobs/continuity.py.bak b/extern/cron_jobs/continuity.py.bak
new file mode 100644
index 000000000..ca15a5c11
--- /dev/null
+++ b/extern/cron_jobs/continuity.py.bak
@@ -0,0 +1,59 @@
+# run continuity check
+
+# email setup:
+# to set up, must go to https://www.google.com/settings/security/lesssecureapps
+# and then to https://accounts.google.com/DisplayUnlockCaptcha if doesn't work on new server (ssh tunnel in)
+
+base_url = "http://recal.io"
+continuity_check_url = "/checks/continuity"
+
+VERBOSE=False
+
+import urllib
+import time
+from datetime import datetime
+import socket
+import smtplib
+import email
+import os
+from email.MIMEMultipart import MIMEMultipart
+from email.Utils import COMMASPACE
+from email.MIMEBase import MIMEBase
+from email.parser import Parser
+from email.MIMEImage import MIMEImage
+from email.MIMEText import MIMEText
+from email.MIMEAudio import MIMEAudio
+import mimetypes
+
+def sendEmail(base_url, tested_url, test_time_utc):
+ # sends an email to maxim with this info on failure
+ import smtplib
+ fromaddr = 'maximz.mailer@gmail.com'
+ toaddrs = 'maxim@maximz.com, dxue@princeton.edu, naphats@princeton.edu' # comma separated
+ subject = "RECAL NOTIFICATION: data loss"
+ msg = email.MIMEMultipart.MIMEMultipart()
+ msg['From'] = fromaddr
+ msg['To'] = toaddrs
+ msg['Subject'] = subject
+ msg.attach(MIMEText("URL tested: " + base_url + tested_url + ". Time (UTC): " + test_time_utc))
+ msg.attach(MIMEText('\nsent via python from ' + socket.gethostname(), 'plain'))
+ username = 'maximz.mailer@gmail.com'
+ password = 'PASSWORD'
+ server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
+ server.ehlo()
+ #server.starttls()
+ server.login(username,password)
+ server.sendmail(username ,toaddrs,msg.as_string())
+ server.quit()
+
+if VERBOSE:
+ print 'testing', base_url, continuity_check_url
+url = base_url + continuity_check_url
+up = urllib.urlopen(url).read() == "OK"
+if not up:
+ # try again in 5 seconds
+ up = urllib.urlopen(url).read() == "OK"
+ if not up: # if still not up
+ # message us
+ sendEmail(base_url, continuity_check_url, str(datetime.utcnow()))
+ print 'email sent because failed', base_url, continuity_check_url
diff --git a/settings/prod.py b/settings/prod.py
index abb090859..fd104de87 100644
--- a/settings/prod.py
+++ b/settings/prod.py
@@ -1,4 +1,5 @@
-from common import * # NOQA
+from __future__ import absolute_import
+from .common import * # NOQA
from os import environ
# DEBUG CONFIGURATION
diff --git a/settings/prod.py.bak b/settings/prod.py.bak
new file mode 100644
index 000000000..abb090859
--- /dev/null
+++ b/settings/prod.py.bak
@@ -0,0 +1,111 @@
+from common import * # NOQA
+from os import environ
+
+# DEBUG CONFIGURATION
+# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
+DEBUG = environ.get('DJANGO_DEBUG', False)
+
+# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
+TEMPLATE_DEBUG = DEBUG
+# ########## END DEBUG CONFIGURATION
+
+# DATABASE CONFIGURATION
+# Parse database configuration from $DATABASE_URL
+import dj_database_url
+DATABASES['default'] = dj_database_url.config()
+
+# END DATABASE CONFIGURATION
+
+# Honor the 'X-Forwarded-Proto' header for request.is_secure()
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
+
+# Limit Host and Referrer headers for security purposes
+# See
+# https://docs.djangoproject.com/en/1.6/ref/settings/#std:setting-ALLOWED_HOSTS
+ALLOWED_HOSTS = environ.get('ALLOWED_HOSTS', '*').split(',')
+
+# ALLOWED_HOSTS = [
+# '.recal.io', # Allow domain and subdomains
+# '.recal.io.', # Also allow FQDN and subdomains
+# 'herokuapp.com',
+# 'localhost', # Allow foreman to run
+# ]
+
+SECRET_KEY = environ.get(
+ 'DJANGO_SECRET_KEY', 'asdfasfshjkxhvkzjxhiu1012u4-9r0iojsof')
+
+
+def get_cache():
+ try:
+ environ['MEMCACHE_SERVERS'] = environ[
+ 'MEMCACHIER_SERVERS'].replace(',', ';')
+ environ['MEMCACHE_USERNAME'] = environ['MEMCACHIER_USERNAME']
+ environ['MEMCACHE_PASSWORD'] = environ['MEMCACHIER_PASSWORD']
+ return {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
+ 'LOCATION': 'my_cache_table',
+ 'TIMEOUT': 60 * 60
+ },
+ 'courses': {
+ 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
+ 'LOCATION': 'courses_cache_table',
+ 'TIMEOUT': 60 * 60
+ },
+ 'courseapi': {
+ 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
+ 'LOCATION': 'courseapi_cache_table',
+ 'TIMEOUT': 60 * 60
+ },
+ 'memcache': {
+ 'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
+ 'TIMEOUT': 500,
+ 'BINARY': True,
+ 'OPTIONS': {
+ 'tcp_nodelay': True
+ }
+ }
+ }
+ except:
+ # local memory caches
+ return {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
+ },
+ 'courses': {
+ 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
+ },
+ 'courseapi': {
+ 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
+ }
+ }
+
+CACHES = get_cache()
+
+# END TOOLBAR CONFIGURATION
+
+LOGGING = {
+ 'version': 1,
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(message)s'
+ },
+ },
+ 'handlers': {
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ },
+ 'loggers': {
+ 'django': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+ }
+}
diff --git a/urls.py b/urls.py
index 10165a4ad..be53b64f8 100644
--- a/urls.py
+++ b/urls.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
@@ -5,7 +6,7 @@
admin.autodiscover()
# TODO we're not sure if this is needed
-import cas # NOQA
+from . import cas # NOQA
urlpatterns = patterns(
"",
diff --git a/urls.py.bak b/urls.py.bak
new file mode 100644
index 000000000..10165a4ad
--- /dev/null
+++ b/urls.py.bak
@@ -0,0 +1,44 @@
+from django.conf.urls import patterns, include, url
+from django.conf.urls.static import static
+from django.conf import settings
+from django.contrib import admin
+admin.autodiscover()
+
+# TODO we're not sure if this is needed
+import cas # NOQA
+
+urlpatterns = patterns(
+ "",
+ url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
+ url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
+ url(r'^admin/', include(admin.site.urls)),
+ url(r'^course_selection/', include('course_selection.urls')),
+ url(r'^hackerino/login/$', 'django.contrib.auth.views.login',
+ {'template_name': 'admin/login.html'}),
+ url(r'^hackerino/logout/$', 'django.contrib.auth.views.logout',
+ {'template_name': 'admin/logout.html'}),
+ url(r'^', include('course_selection.urls')),
+)
+
+urlpatterns += patterns(
+ 'cas.views',
+ url(r'^login/$', 'login', name='cas_login'),
+ url(r'^logout/$', 'logout', name='cas_logout'),
+)
+
+if settings.DEBUG:
+ urlpatterns += static(settings.STATIC_URL,
+ document_root=settings.STATICFILES_DIRS[0])
+else:
+ urlpatterns += static(settings.STATIC_URL,
+ document_root=settings.STATICFILES_DIRS)
+
+"""
+Debug toolbar url
+
+"""
+# if settings.DEBUG:
+# import debug_toolbar
+# urlpatterns += patterns('',
+# url(r'^__debug__/', include(debug_toolbar.urls)),
+# )