/libs/gluon/dal.py

https://github.com/saffih/openshift_web2py · Python · 9552 lines · 9050 code · 175 blank · 327 comment · 456 complexity · 048b7117e257bb113f8532a636b5f425 MD5 · raw file

  1. #!/bin/env python
  2. # -*- coding: utf-8 -*-
  3. """
  4. This file is part of the web2py Web Framework
  5. Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
  6. License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
  7. Thanks to
  8. * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support
  9. * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support
  10. * Denes
  11. * Chris Clark
  12. * clach05
  13. * Denes Lengyel
  14. * and many others who have contributed to current and previous versions
  15. This file contains the DAL support for many relational databases,
  16. including:
  17. - SQLite & SpatiaLite
  18. - MySQL
  19. - Postgres
  20. - Firebird
  21. - Oracle
  22. - MS SQL
  23. - DB2
  24. - Interbase
  25. - Ingres
  26. - SapDB (experimental)
  27. - Cubrid (experimental)
  28. - CouchDB (experimental)
  29. - MongoDB (in progress)
  30. - Google:nosql
  31. - Google:sql
  32. - Teradata
  33. - IMAP (experimental)
  34. Example of usage:
  35. >>> # from dal import DAL, Field
  36. ### create DAL connection (and create DB if it doesn't exist)
  37. >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
  38. ... folder=None)
  39. ### define a table 'person' (create/alter as necessary)
  40. >>> person = db.define_table('person',Field('name','string'))
  41. ### insert a record
  42. >>> id = person.insert(name='James')
  43. ### retrieve it by id
  44. >>> james = person(id)
  45. ### retrieve it by name
  46. >>> james = person(name='James')
  47. ### retrieve it by arbitrary query
  48. >>> query = (person.name=='James') & (person.name.startswith('J'))
  49. >>> james = db(query).select(person.ALL)[0]
  50. ### update one record
  51. >>> james.update_record(name='Jim')
  52. <Row {'id': 1, 'name': 'Jim'}>
  53. ### update multiple records by query
  54. >>> db(person.name.like('J%')).update(name='James')
  55. 1
  56. ### delete records by query
  57. >>> db(person.name.lower() == 'jim').delete()
  58. 0
  59. ### retrieve multiple records (rows)
  60. >>> people = db(person).select(orderby=person.name,
  61. ... groupby=person.name, limitby=(0,100))
  62. ### further filter them
  63. >>> james = people.find(lambda row: row.name == 'James').first()
  64. >>> print james.id, james.name
  65. 1 James
  66. ### check aggregates
  67. >>> counter = person.id.count()
  68. >>> print db(person).select(counter).first()(counter)
  69. 1
  70. ### delete one record
  71. >>> james.delete_record()
  72. 1
  73. ### delete (drop) entire database table
  74. >>> person.drop()
  75. Supported field types:
  76. id string text boolean integer double decimal password upload
  77. blob time date datetime
  78. Supported DAL URI strings:
  79. 'sqlite://test.db'
  80. 'spatialite://test.db'
  81. 'sqlite:memory'
  82. 'spatialite:memory'
  83. 'jdbc:sqlite://test.db'
  84. 'mysql://root:none@localhost/test'
  85. 'postgres://mdipierro:password@localhost/test'
  86. 'postgres:psycopg2://mdipierro:password@localhost/test'
  87. 'postgres:pg8000://mdipierro:password@localhost/test'
  88. 'jdbc:postgres://mdipierro:none@localhost/test'
  89. 'mssql://web2py:none@A64X2/web2py_test'
  90. 'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
  91. 'oracle://username:password@database'
  92. 'firebird://user:password@server:3050/database'
  93. 'db2://DSN=dsn;UID=user;PWD=pass'
  94. 'firebird://username:password@hostname/database'
  95. 'firebird_embedded://username:password@c://path'
  96. 'informix://user:password@server:3050/database'
  97. 'informixu://user:password@server:3050/database' # unicode informix
  98. 'google:datastore' # for google app engine datastore
  99. 'google:sql' # for google app engine with sql (mysql compatible)
  100. 'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
  101. 'imap://user:password@server:port' # experimental
  102. For more info:
  103. help(DAL)
  104. help(Field)
  105. """
  106. ###################################################################################
  107. # this file only exposes DAL and Field
  108. ###################################################################################
  109. __all__ = ['DAL', 'Field']
  110. MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length
  111. DEFAULTLENGTH = {'string':512,
  112. 'password':512,
  113. 'upload':512,
  114. 'text':2**15,
  115. 'blob':2**31}
  116. TIMINGSSIZE = 100
  117. SPATIALLIBS = {
  118. 'Windows':'libspatialite',
  119. 'Linux':'libspatialite.so',
  120. 'Darwin':'libspatialite.dylib'
  121. }
  122. import re
  123. import sys
  124. import locale
  125. import os
  126. import types
  127. import cPickle
  128. import datetime
  129. import threading
  130. import time
  131. import cStringIO
  132. import csv
  133. import cgi
  134. import copy
  135. import socket
  136. import logging
  137. import copy_reg
  138. import base64
  139. import shutil
  140. import marshal
  141. import decimal
  142. import struct
  143. import urllib
  144. import hashlib
  145. import uuid
  146. import glob
  147. import traceback
  148. import platform
  149. CALLABLETYPES = (types.LambdaType, types.FunctionType,
  150. types.BuiltinFunctionType,
  151. types.MethodType, types.BuiltinMethodType)
  152. TABLE_ARGS = set(
  153. ('migrate','primarykey','fake_migrate','format','redefine',
  154. 'singular','plural','trigger_name','sequence_name',
  155. 'common_filter','polymodel','table_class','on_define',))
  156. SELECT_ARGS = set(
  157. ('orderby', 'groupby', 'limitby','required', 'cache', 'left',
  158. 'distinct', 'having', 'join','for_update', 'processor','cacheable'))
  159. ogetattr = object.__getattribute__
  160. osetattr = object.__setattr__
  161. exists = os.path.exists
  162. pjoin = os.path.join
  163. ###################################################################################
  164. # following checks allow the use of dal without web2py, as a standalone module
  165. ###################################################################################
  166. try:
  167. from utils import web2py_uuid
  168. except ImportError:
  169. import uuid
  170. def web2py_uuid(): return str(uuid.uuid4())
  171. try:
  172. import portalocker
  173. have_portalocker = True
  174. except ImportError:
  175. have_portalocker = False
  176. try:
  177. import serializers
  178. have_serializers = True
  179. except ImportError:
  180. have_serializers = False
  181. try:
  182. import validators
  183. have_validators = True
  184. except ImportError:
  185. have_validators = False
  186. LOGGER = logging.getLogger("web2py.dal")
  187. DEFAULT = lambda:0
  188. GLOBAL_LOCKER = threading.RLock()
  189. THREAD_LOCAL = threading.local()
  190. # internal representation of tables with field
  191. # <table>.<field>, tables and fields may only be [a-zA-Z0-9_]
  192. REGEX_TYPE = re.compile('^([\w\_\:]+)')
  193. REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*')
  194. REGEX_W = re.compile('^\w+$')
  195. REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$')
  196. REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$')
  197. REGEX_CLEANUP_FN = re.compile('[\'"\s;]+')
  198. REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)')
  199. REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$')
  200. REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)")
  201. REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')')
  202. REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$')
  203. REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$')
  204. REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$')
  205. REGEX_QUOTES = re.compile("'[^']*'")
  206. REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$')
  207. REGEX_PASSWORD = re.compile('\://([^:@]*)\:')
  208. # list of drivers will be built on the fly
  209. # and lists only what is available
  210. DRIVERS = []
  211. try:
  212. from new import classobj
  213. from google.appengine.ext import db as gae
  214. from google.appengine.api import namespace_manager, rdbms
  215. from google.appengine.api.datastore_types import Key ### for belongs on ID
  216. from google.appengine.ext.db.polymodel import PolyModel
  217. DRIVERS.append('google')
  218. except ImportError:
  219. pass
  220. if not 'google' in DRIVERS:
  221. try:
  222. from pysqlite2 import dbapi2 as sqlite2
  223. DRIVERS.append('SQLite(sqlite2)')
  224. except ImportError:
  225. LOGGER.debug('no SQLite drivers pysqlite2.dbapi2')
  226. try:
  227. from sqlite3 import dbapi2 as sqlite3
  228. DRIVERS.append('SQLite(sqlite3)')
  229. except ImportError:
  230. LOGGER.debug('no SQLite drivers sqlite3')
  231. try:
  232. # first try contrib driver, then from site-packages (if installed)
  233. try:
  234. import contrib.pymysql as pymysql
  235. # monkeypatch pymysql because they havent fixed the bug:
  236. # https://github.com/petehunt/PyMySQL/issues/86
  237. pymysql.ESCAPE_REGEX = re.compile("'")
  238. pymysql.ESCAPE_MAP = {"'": "''"}
  239. # end monkeypatch
  240. except ImportError:
  241. import pymysql
  242. DRIVERS.append('MySQL(pymysql)')
  243. except ImportError:
  244. LOGGER.debug('no MySQL driver pymysql')
  245. try:
  246. import MySQLdb
  247. DRIVERS.append('MySQL(MySQLdb)')
  248. except ImportError:
  249. LOGGER.debug('no MySQL driver MySQLDB')
  250. try:
  251. import psycopg2
  252. from psycopg2.extensions import adapt as psycopg2_adapt
  253. DRIVERS.append('PostgreSQL(psycopg2)')
  254. except ImportError:
  255. LOGGER.debug('no PostgreSQL driver psycopg2')
  256. try:
  257. # first try contrib driver, then from site-packages (if installed)
  258. try:
  259. import contrib.pg8000.dbapi as pg8000
  260. except ImportError:
  261. import pg8000.dbapi as pg8000
  262. DRIVERS.append('PostgreSQL(pg8000)')
  263. except ImportError:
  264. LOGGER.debug('no PostgreSQL driver pg8000')
  265. try:
  266. import cx_Oracle
  267. DRIVERS.append('Oracle(cx_Oracle)')
  268. except ImportError:
  269. LOGGER.debug('no Oracle driver cx_Oracle')
  270. try:
  271. import pyodbc
  272. DRIVERS.append('MSSQL(pyodbc)')
  273. DRIVERS.append('DB2(pyodbc)')
  274. DRIVERS.append('Teradata(pyodbc)')
  275. except ImportError:
  276. LOGGER.debug('no MSSQL/DB2/Teradata driver pyodbc')
  277. try:
  278. import Sybase
  279. DRIVERS.append('Sybase(Sybase)')
  280. except ImportError:
  281. LOGGER.debug('no Sybase driver')
  282. try:
  283. import kinterbasdb
  284. DRIVERS.append('Interbase(kinterbasdb)')
  285. DRIVERS.append('Firebird(kinterbasdb)')
  286. except ImportError:
  287. LOGGER.debug('no Firebird/Interbase driver kinterbasdb')
  288. try:
  289. import fdb
  290. DRIVERS.append('Firbird(fdb)')
  291. except ImportError:
  292. LOGGER.debug('no Firebird driver fdb')
  293. #####
  294. try:
  295. import firebirdsql
  296. DRIVERS.append('Firebird(firebirdsql)')
  297. except ImportError:
  298. LOGGER.debug('no Firebird driver firebirdsql')
  299. try:
  300. import informixdb
  301. DRIVERS.append('Informix(informixdb)')
  302. LOGGER.warning('Informix support is experimental')
  303. except ImportError:
  304. LOGGER.debug('no Informix driver informixdb')
  305. try:
  306. import sapdb
  307. DRIVERS.append('SQL(sapdb)')
  308. LOGGER.warning('SAPDB support is experimental')
  309. except ImportError:
  310. LOGGER.debug('no SAP driver sapdb')
  311. try:
  312. import cubriddb
  313. DRIVERS.append('Cubrid(cubriddb)')
  314. LOGGER.warning('Cubrid support is experimental')
  315. except ImportError:
  316. LOGGER.debug('no Cubrid driver cubriddb')
  317. try:
  318. from com.ziclix.python.sql import zxJDBC
  319. import java.sql
  320. # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/
  321. from org.sqlite import JDBC # required by java.sql; ensure we have it
  322. zxJDBC_sqlite = java.sql.DriverManager
  323. DRIVERS.append('PostgreSQL(zxJDBC)')
  324. DRIVERS.append('SQLite(zxJDBC)')
  325. LOGGER.warning('zxJDBC support is experimental')
  326. is_jdbc = True
  327. except ImportError:
  328. LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC')
  329. is_jdbc = False
  330. try:
  331. import ingresdbi
  332. DRIVERS.append('Ingres(ingresdbi)')
  333. except ImportError:
  334. LOGGER.debug('no Ingres driver ingresdbi')
  335. # NOTE could try JDBC.......
  336. try:
  337. import couchdb
  338. DRIVERS.append('CouchDB(couchdb)')
  339. except ImportError:
  340. LOGGER.debug('no Couchdb driver couchdb')
  341. try:
  342. import pymongo
  343. DRIVERS.append('MongoDB(pymongo)')
  344. except:
  345. LOGGER.debug('no MongoDB driver pymongo')
  346. try:
  347. import imaplib
  348. DRIVERS.append('IMAP(imaplib)')
  349. except:
  350. LOGGER.debug('no IMAP driver imaplib')
  351. PLURALIZE_RULES = [
  352. (re.compile('child$'), re.compile('child$'), 'children'),
  353. (re.compile('oot$'), re.compile('oot$'), 'eet'),
  354. (re.compile('ooth$'), re.compile('ooth$'), 'eeth'),
  355. (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'),
  356. (re.compile('sis$'), re.compile('sis$'), 'ses'),
  357. (re.compile('man$'), re.compile('man$'), 'men'),
  358. (re.compile('ife$'), re.compile('ife$'), 'ives'),
  359. (re.compile('eau$'), re.compile('eau$'), 'eaux'),
  360. (re.compile('lf$'), re.compile('lf$'), 'lves'),
  361. (re.compile('[sxz]$'), re.compile('$'), 'es'),
  362. (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'),
  363. (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'),
  364. (re.compile('$'), re.compile('$'), 's'),
  365. ]
  366. def pluralize(singular, rules=PLURALIZE_RULES):
  367. for line in rules:
  368. re_search, re_sub, replace = line
  369. plural = re_search.search(singular) and re_sub.sub(replace, singular)
  370. if plural: return plural
  371. def hide_password(uri):
  372. return REGEX_PASSWORD.sub('://******:',uri)
  373. def OR(a,b):
  374. return a|b
  375. def AND(a,b):
  376. return a&b
  377. def IDENTITY(x): return x
  378. def varquote_aux(name,quotestr='%s'):
  379. return name if REGEX_W.match(name) else quotestr % name
  380. if 'google' in DRIVERS:
  381. is_jdbc = False
  382. class GAEDecimalProperty(gae.Property):
  383. """
  384. GAE decimal implementation
  385. """
  386. data_type = decimal.Decimal
  387. def __init__(self, precision, scale, **kwargs):
  388. super(GAEDecimalProperty, self).__init__(self, **kwargs)
  389. d = '1.'
  390. for x in range(scale):
  391. d += '0'
  392. self.round = decimal.Decimal(d)
  393. def get_value_for_datastore(self, model_instance):
  394. value = super(GAEDecimalProperty, self)\
  395. .get_value_for_datastore(model_instance)
  396. if value is None or value == '':
  397. return None
  398. else:
  399. return str(value)
  400. def make_value_from_datastore(self, value):
  401. if value is None or value == '':
  402. return None
  403. else:
  404. return decimal.Decimal(value).quantize(self.round)
  405. def validate(self, value):
  406. value = super(GAEDecimalProperty, self).validate(value)
  407. if value is None or isinstance(value, decimal.Decimal):
  408. return value
  409. elif isinstance(value, basestring):
  410. return decimal.Decimal(value)
  411. raise gae.BadValueError("Property %s must be a Decimal or string."\
  412. % self.name)
  413. ###################################################################################
  414. # class that handles connection pooling (all adapters are derived from this one)
  415. ###################################################################################
  416. class ConnectionPool(object):
  417. POOLS = {}
  418. check_active_connection = True
  419. @staticmethod
  420. def set_folder(folder):
  421. THREAD_LOCAL.folder = folder
  422. # ## this allows gluon to commit/rollback all dbs in this thread
  423. def close(self,action='commit',really=True):
  424. if action:
  425. if callable(action):
  426. action(self)
  427. else:
  428. getattr(self, action)()
  429. # ## if you want pools, recycle this connection
  430. if self.pool_size:
  431. GLOBAL_LOCKER.acquire()
  432. pool = ConnectionPool.POOLS[self.uri]
  433. if len(pool) < self.pool_size:
  434. pool.append(self.connection)
  435. really = False
  436. GLOBAL_LOCKER.release()
  437. if really:
  438. self.close_connection()
  439. self.connection = None
  440. @staticmethod
  441. def close_all_instances(action):
  442. """ to close cleanly databases in a multithreaded environment """
  443. dbs = getattr(THREAD_LOCAL,'db_instances',{}).items()
  444. for db_uid, db_group in dbs:
  445. for db in db_group:
  446. if hasattr(db,'_adapter'):
  447. db._adapter.close(action)
  448. getattr(THREAD_LOCAL,'db_instances',{}).clear()
  449. getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear()
  450. if callable(action):
  451. action(None)
  452. return
  453. def find_or_make_work_folder(self):
  454. """ this actually does not make the folder. it has to be there """
  455. self.folder = getattr(THREAD_LOCAL,'folder','')
  456. # Creating the folder if it does not exist
  457. if False and self.folder and not exists(self.folder):
  458. os.mkdir(self.folder)
  459. def after_connection(self):
  460. """ this it is suppoed to be overloaded by adtapters"""
  461. pass
  462. def reconnect(self, f=None, cursor=True):
  463. """
  464. this function defines: self.connection and self.cursor
  465. (iff cursor is True)
  466. if self.pool_size>0 it will try pull the connection from the pool
  467. if the connection is not active (closed by db server) it will loop
  468. if not self.pool_size or no active connections in pool makes a new one
  469. """
  470. if getattr(self,'connection',None) != None:
  471. return
  472. if f is None:
  473. f = self.connector
  474. if not self.pool_size:
  475. self.connection = f()
  476. self.cursor = cursor and self.connection.cursor()
  477. else:
  478. uri = self.uri
  479. POOLS = ConnectionPool.POOLS
  480. while True:
  481. GLOBAL_LOCKER.acquire()
  482. if not uri in POOLS:
  483. POOLS[uri] = []
  484. if POOLS[uri]:
  485. self.connection = POOLS[uri].pop()
  486. GLOBAL_LOCKER.release()
  487. self.cursor = cursor and self.connection.cursor()
  488. try:
  489. if self.cursor and self.check_active_connection:
  490. self.execute('SELECT 1;')
  491. break
  492. except:
  493. pass
  494. else:
  495. GLOBAL_LOCKER.release()
  496. self.connection = f()
  497. self.cursor = cursor and self.connection.cursor()
  498. break
  499. self.after_connection()
  500. ###################################################################################
  501. # this is a generic adapter that does nothing; all others are derived from this one
  502. ###################################################################################
  503. class BaseAdapter(ConnectionPool):
  504. driver = None
  505. driver_name = None
  506. drivers = () # list of drivers from which to pick
  507. connection = None
  508. maxcharlength = MAXCHARLENGTH
  509. commit_on_alter_table = False
  510. support_distributed_transaction = False
  511. uploads_in_blob = False
  512. can_select_for_update = True
  513. TRUE = 'T'
  514. FALSE = 'F'
  515. types = {
  516. 'boolean': 'CHAR(1)',
  517. 'string': 'CHAR(%(length)s)',
  518. 'text': 'TEXT',
  519. 'password': 'CHAR(%(length)s)',
  520. 'blob': 'BLOB',
  521. 'upload': 'CHAR(%(length)s)',
  522. 'integer': 'INTEGER',
  523. 'bigint': 'INTEGER',
  524. 'float':'DOUBLE',
  525. 'double': 'DOUBLE',
  526. 'decimal': 'DOUBLE',
  527. 'date': 'DATE',
  528. 'time': 'TIME',
  529. 'datetime': 'TIMESTAMP',
  530. 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
  531. 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  532. 'list:integer': 'TEXT',
  533. 'list:string': 'TEXT',
  534. 'list:reference': 'TEXT',
  535. # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference'
  536. 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT',
  537. 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  538. }
  539. def id_query(self, table):
  540. return table._id != None
  541. def adapt(self, obj):
  542. return "'%s'" % obj.replace("'", "''")
  543. def smart_adapt(self, obj):
  544. if isinstance(obj,(int,float)):
  545. return str(obj)
  546. return self.adapt(str(obj))
  547. def integrity_error(self):
  548. return self.driver.IntegrityError
  549. def operational_error(self):
  550. return self.driver.OperationalError
  551. def file_exists(self, filename):
  552. """
  553. to be used ONLY for files that on GAE may not be on filesystem
  554. """
  555. return exists(filename)
  556. def file_open(self, filename, mode='rb', lock=True):
  557. """
  558. to be used ONLY for files that on GAE may not be on filesystem
  559. """
  560. if have_portalocker and lock:
  561. fileobj = portalocker.LockedFile(filename,mode)
  562. else:
  563. fileobj = open(filename,mode)
  564. return fileobj
  565. def file_close(self, fileobj):
  566. """
  567. to be used ONLY for files that on GAE may not be on filesystem
  568. """
  569. if fileobj:
  570. fileobj.close()
  571. def file_delete(self, filename):
  572. os.unlink(filename)
  573. def find_driver(self,adapter_args,uri=None):
  574. if getattr(self,'driver',None) != None:
  575. return
  576. drivers_available = [driver for driver in self.drivers
  577. if driver in globals()]
  578. if uri:
  579. items = uri.split('://',1)[0].split(':')
  580. request_driver = items[1] if len(items)>1 else None
  581. else:
  582. request_driver = None
  583. request_driver = request_driver or adapter_args.get('driver')
  584. if request_driver:
  585. if request_driver in drivers_available:
  586. self.driver_name = request_driver
  587. self.driver = globals().get(request_driver)
  588. else:
  589. raise RuntimeError, "driver %s not available" % request_driver
  590. elif drivers_available:
  591. self.driver_name = drivers_available[0]
  592. self.driver = globals().get(self.driver_name)
  593. else:
  594. raise RuntimeError, "no driver available %s" % self.drivers
  595. def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8',
  596. credential_decoder=IDENTITY, driver_args={},
  597. adapter_args={},do_connect=True):
  598. self.db = db
  599. self.dbengine = "None"
  600. self.uri = uri
  601. self.pool_size = pool_size
  602. self.folder = folder
  603. self.db_codec = db_codec
  604. class Dummy(object):
  605. lastrowid = 1
  606. def __getattr__(self, value):
  607. return lambda *a, **b: []
  608. self.connection = Dummy()
  609. self.cursor = Dummy()
  610. def sequence_name(self,tablename):
  611. return '%s_sequence' % tablename
  612. def trigger_name(self,tablename):
  613. return '%s_sequence' % tablename
  614. def varquote(self,name):
  615. return name
  616. def create_table(self, table,
  617. migrate=True,
  618. fake_migrate=False,
  619. polymodel=None):
  620. db = table._db
  621. fields = []
  622. # PostGIS geo fields are added after the table has been created
  623. postcreation_fields = []
  624. sql_fields = {}
  625. sql_fields_aux = {}
  626. TFK = {}
  627. tablename = table._tablename
  628. sortable = 0
  629. types = self.types
  630. for field in table:
  631. sortable += 1
  632. field_name = field.name
  633. field_type = field.type
  634. if isinstance(field_type,SQLCustomType):
  635. ftype = field_type.native or field_type.type
  636. elif field_type.startswith('reference'):
  637. referenced = field_type[10:].strip()
  638. if referenced == '.':
  639. referenced = tablename
  640. constraint_name = self.constraint_name(tablename, field_name)
  641. if not '.' in referenced \
  642. and referenced != tablename \
  643. and hasattr(table,'_primarykey'):
  644. ftype = types['integer']
  645. else:
  646. if hasattr(table,'_primarykey'):
  647. rtablename,rfieldname = referenced.split('.')
  648. rtable = db[rtablename]
  649. rfield = rtable[rfieldname]
  650. # must be PK reference or unique
  651. if rfieldname in hasattr(rtable,'_primarykey') or \
  652. rfield.unique:
  653. ftype = types[rfield.type[:9]] % \
  654. dict(length=rfield.length)
  655. # multicolumn primary key reference?
  656. if not rfield.unique and len(rtable._primarykey)>1:
  657. # then it has to be a table level FK
  658. if rtablename not in TFK:
  659. TFK[rtablename] = {}
  660. TFK[rtablename][rfieldname] = field_name
  661. else:
  662. ftype = ftype + \
  663. types['reference FK'] % dict(
  664. constraint_name = constraint_name, # should be quoted
  665. foreign_key = '%s (%s)' % (rtablename,
  666. rfieldname),
  667. on_delete_action=field.ondelete)
  668. else:
  669. # make a guess here for circular references
  670. if referenced in db:
  671. id_fieldname = db[referenced]._id.name
  672. elif referenced == tablename:
  673. id_fieldname = table._id.name
  674. else: #make a guess
  675. id_fieldname = 'id'
  676. ftype = types[field_type[:9]] % dict(
  677. index_name = field_name+'__idx',
  678. field_name = field_name,
  679. constraint_name = constraint_name,
  680. foreign_key = '%s (%s)' % (referenced,
  681. id_fieldname),
  682. on_delete_action=field.ondelete)
  683. elif field_type.startswith('list:reference'):
  684. ftype = types[field_type[:14]]
  685. elif field_type.startswith('decimal'):
  686. precision, scale = map(int,field_type[8:-1].split(','))
  687. ftype = types[field_type[:7]] % \
  688. dict(precision=precision,scale=scale)
  689. elif field_type.startswith('geo'):
  690. srid = self.srid
  691. geotype, parms = field_type[:-1].split('(')
  692. if not geotype in types:
  693. raise SyntaxError, \
  694. 'Field: unknown field type: %s for %s' \
  695. % (field_type, field_name)
  696. ftype = types[geotype]
  697. if self.dbengine == 'postgres' and geotype == 'geometry':
  698. # parameters: schema, srid, dimension
  699. dimension = 2 # GIS.dimension ???
  700. parms = parms.split(',')
  701. if len(parms) == 3:
  702. schema, srid, dimension = parms
  703. elif len(parms) == 2:
  704. schema, srid = parms
  705. else:
  706. schema = parms[0]
  707. ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype]
  708. ftype = ftype % dict(schema=schema,
  709. tablename=tablename,
  710. fieldname=field_name, srid=srid,
  711. dimension=dimension)
  712. postcreation_fields.append(ftype)
  713. elif not field_type in types:
  714. raise SyntaxError, 'Field: unknown field type: %s for %s' % \
  715. (field_type, field_name)
  716. else:
  717. ftype = types[field_type]\
  718. % dict(length=field.length)
  719. if not field_type.startswith('id') and \
  720. not field_type.startswith('reference'):
  721. if field.notnull:
  722. ftype += ' NOT NULL'
  723. else:
  724. ftype += self.ALLOW_NULL()
  725. if field.unique:
  726. ftype += ' UNIQUE'
  727. if field.custom_qualifier:
  728. ftype += ' %s' % field.custom_qualifier
  729. # add to list of fields
  730. sql_fields[field_name] = dict(
  731. length=field.length,
  732. unique=field.unique,
  733. notnull=field.notnull,
  734. sortable=sortable,
  735. type=str(field_type),
  736. sql=ftype)
  737. if isinstance(field.default,(str,int,float)):
  738. # Caveat: sql_fields and sql_fields_aux
  739. # differ for default values.
  740. # sql_fields is used to trigger migrations and sql_fields_aux
  741. # is used for create tables.
  742. # The reason is that we do not want to trigger
  743. # a migration simply because a default value changes.
  744. not_null = self.NOT_NULL(field.default, field_type)
  745. ftype = ftype.replace('NOT NULL', not_null)
  746. sql_fields_aux[field_name] = dict(sql=ftype)
  747. # Postgres - PostGIS:
  748. # geometry fields are added after the table has been created, not now
  749. if not (self.dbengine == 'postgres' and \
  750. field_type.startswith('geom')):
  751. fields.append('%s %s' % (field_name, ftype))
  752. other = ';'
  753. # backend-specific extensions to fields
  754. if self.dbengine == 'mysql':
  755. if not hasattr(table, "_primarykey"):
  756. fields.append('PRIMARY KEY(%s)' % table._id.name)
  757. other = ' ENGINE=InnoDB CHARACTER SET utf8;'
  758. fields = ',\n '.join(fields)
  759. for rtablename in TFK:
  760. rfields = TFK[rtablename]
  761. pkeys = db[rtablename]._primarykey
  762. fkeys = [ rfields[k] for k in pkeys ]
  763. fields = fields + ',\n ' + \
  764. types['reference TFK'] % dict(
  765. table_name = tablename,
  766. field_name=', '.join(fkeys),
  767. foreign_table = rtablename,
  768. foreign_key = ', '.join(pkeys),
  769. on_delete_action = field.ondelete)
  770. if hasattr(table,'_primarykey'):
  771. query = "CREATE TABLE %s(\n %s,\n %s) %s" % \
  772. (tablename, fields,
  773. self.PRIMARY_KEY(', '.join(table._primarykey)),other)
  774. else:
  775. query = "CREATE TABLE %s(\n %s\n)%s" % \
  776. (tablename, fields, other)
  777. if self.uri.startswith('sqlite:///') \
  778. or self.uri.startswith('spatialite:///'):
  779. path_encoding = sys.getfilesystemencoding() \
  780. or locale.getdefaultlocale()[1] or 'utf8'
  781. dbpath = self.uri[9:self.uri.rfind('/')]\
  782. .decode('utf8').encode(path_encoding)
  783. else:
  784. dbpath = self.folder
  785. if not migrate:
  786. return query
  787. elif self.uri.startswith('sqlite:memory')\
  788. or self.uri.startswith('spatialite:memory'):
  789. table._dbt = None
  790. elif isinstance(migrate, str):
  791. table._dbt = pjoin(dbpath, migrate)
  792. else:
  793. table._dbt = pjoin(
  794. dbpath, '%s_%s.table' % (table._db._uri_hash, tablename))
  795. if table._dbt:
  796. table._loggername = pjoin(dbpath, 'sql.log')
  797. logfile = self.file_open(table._loggername, 'a')
  798. else:
  799. logfile = None
  800. if not table._dbt or not self.file_exists(table._dbt):
  801. if table._dbt:
  802. logfile.write('timestamp: %s\n'
  803. % datetime.datetime.today().isoformat())
  804. logfile.write(query + '\n')
  805. if not fake_migrate:
  806. self.create_sequence_and_triggers(query,table)
  807. table._db.commit()
  808. # Postgres geom fields are added now,
  809. # after the table has been created
  810. for query in postcreation_fields:
  811. self.execute(query)
  812. table._db.commit()
  813. if table._dbt:
  814. tfile = self.file_open(table._dbt, 'w')
  815. cPickle.dump(sql_fields, tfile)
  816. self.file_close(tfile)
  817. if fake_migrate:
  818. logfile.write('faked!\n')
  819. else:
  820. logfile.write('success!\n')
  821. else:
  822. tfile = self.file_open(table._dbt, 'r')
  823. try:
  824. sql_fields_old = cPickle.load(tfile)
  825. except EOFError:
  826. self.file_close(tfile)
  827. self.file_close(logfile)
  828. raise RuntimeError, 'File %s appears corrupted' % table._dbt
  829. self.file_close(tfile)
  830. if sql_fields != sql_fields_old:
  831. self.migrate_table(table,
  832. sql_fields, sql_fields_old,
  833. sql_fields_aux, logfile,
  834. fake_migrate=fake_migrate)
  835. self.file_close(logfile)
  836. return query
  837. def migrate_table(
  838. self,
  839. table,
  840. sql_fields,
  841. sql_fields_old,
  842. sql_fields_aux,
  843. logfile,
  844. fake_migrate=False,
  845. ):
  846. db = table._db
  847. db._migrated.append(table._tablename)
  848. tablename = table._tablename
  849. def fix(item):
  850. k,v=item
  851. if not isinstance(v,dict):
  852. v=dict(type='unkown',sql=v)
  853. return k.lower(),v
  854. # make sure all field names are lower case to avoid
  855. # migrations because of case cahnge
  856. sql_fields = dict(map(fix,sql_fields.iteritems()))
  857. sql_fields_old = dict(map(fix,sql_fields_old.iteritems()))
  858. sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems()))
  859. if db._debug:
  860. logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields))
  861. keys = sql_fields.keys()
  862. for key in sql_fields_old:
  863. if not key in keys:
  864. keys.append(key)
  865. if self.dbengine == 'mssql':
  866. new_add = '; ALTER TABLE %s ADD ' % tablename
  867. else:
  868. new_add = ', ADD '
  869. metadata_change = False
  870. sql_fields_current = copy.copy(sql_fields_old)
  871. for key in keys:
  872. query = None
  873. if not key in sql_fields_old:
  874. sql_fields_current[key] = sql_fields[key]
  875. if self.dbengine in ('postgres',) and \
  876. sql_fields[key]['type'].startswith('geometry'):
  877. # 'sql' == ftype in sql
  878. query = [ sql_fields[key]['sql'] ]
  879. else:
  880. query = ['ALTER TABLE %s ADD %s %s;' % \
  881. (tablename, key,
  882. sql_fields_aux[key]['sql'].replace(', ', new_add))]
  883. metadata_change = True
  884. elif self.dbengine in ('sqlite', 'spatialite'):
  885. if key in sql_fields:
  886. sql_fields_current[key] = sql_fields[key]
  887. metadata_change = True
  888. elif not key in sql_fields:
  889. del sql_fields_current[key]
  890. ftype = sql_fields_old[key]['type']
  891. if self.dbengine in ('postgres',) \
  892. and ftype.startswith('geometry'):
  893. geotype, parms = ftype[:-1].split('(')
  894. schema = parms.split(',')[0]
  895. query = [ "SELECT DropGeometryColumn ('%(schema)s', '%(table)s', '%(field)s');" % dict(schema=schema, table=tablename, field=key,) ]
  896. elif not self.dbengine in ('firebird',):
  897. query = ['ALTER TABLE %s DROP COLUMN %s;'
  898. % (tablename, key)]
  899. else:
  900. query = ['ALTER TABLE %s DROP %s;' % (tablename, key)]
  901. metadata_change = True
  902. elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \
  903. and not (key in table.fields and
  904. isinstance(table[key].type, SQLCustomType)) \
  905. and not sql_fields[key]['type'].startswith('reference')\
  906. and not sql_fields[key]['type'].startswith('double')\
  907. and not sql_fields[key]['type'].startswith('id'):
  908. sql_fields_current[key] = sql_fields[key]
  909. t = tablename
  910. tt = sql_fields_aux[key]['sql'].replace(', ', new_add)
  911. if not self.dbengine in ('firebird',):
  912. query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt),
  913. 'UPDATE %s SET %s__tmp=%s;' % (t, key, key),
  914. 'ALTER TABLE %s DROP COLUMN %s;' % (t, key),
  915. 'ALTER TABLE %s ADD %s %s;' % (t, key, tt),
  916. 'UPDATE %s SET %s=%s__tmp;' % (t, key, key),
  917. 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)]
  918. else:
  919. query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt),
  920. 'UPDATE %s SET %s__tmp=%s;' % (t, key, key),
  921. 'ALTER TABLE %s DROP %s;' % (t, key),
  922. 'ALTER TABLE %s ADD %s %s;' % (t, key, tt),
  923. 'UPDATE %s SET %s=%s__tmp;' % (t, key, key),
  924. 'ALTER TABLE %s DROP %s__tmp;' % (t, key)]
  925. metadata_change = True
  926. elif sql_fields[key]['type'] != sql_fields_old[key]['type']:
  927. sql_fields_current[key] = sql_fields[key]
  928. metadata_change = True
  929. if query:
  930. logfile.write('timestamp: %s\n'
  931. % datetime.datetime.today().isoformat())
  932. db['_lastsql'] = '\n'.join(query)
  933. for sub_query in query:
  934. logfile.write(sub_query + '\n')
  935. if not fake_migrate:
  936. self.execute(sub_query)
  937. # Caveat: mysql, oracle and firebird do not allow multiple alter table
  938. # in one transaction so we must commit partial transactions and
  939. # update table._dbt after alter table.
  940. if db._adapter.commit_on_alter_table:
  941. db.commit()
  942. tfile = self.file_open(table._dbt, 'w')
  943. cPickle.dump(sql_fields_current, tfile)
  944. self.file_close(tfile)
  945. logfile.write('success!\n')
  946. else:
  947. logfile.write('faked!\n')
  948. elif metadata_change:
  949. tfile = self.file_open(table._dbt, 'w')
  950. cPickle.dump(sql_fields_current, tfile)
  951. self.file_close(tfile)
  952. if metadata_change and \
  953. not (query and self.dbengine in ('mysql','oracle','firebird')):
  954. db.commit()
  955. tfile = self.file_open(table._dbt, 'w')
  956. cPickle.dump(sql_fields_current, tfile)
  957. self.file_close(tfile)
  958. def LOWER(self, first):
  959. return 'LOWER(%s)' % self.expand(first)
  960. def UPPER(self, first):
  961. return 'UPPER(%s)' % self.expand(first)
  962. def COUNT(self, first, distinct=None):
  963. return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \
  964. % self.expand(first)
  965. def EXTRACT(self, first, what):
  966. return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
  967. def EPOCH(self, first):
  968. return self.EXTRACT(first, 'epoch')
  969. def AGGREGATE(self, first, what):
  970. return "%s(%s)" % (what, self.expand(first))
  971. def JOIN(self):
  972. return 'JOIN'
  973. def LEFT_JOIN(self):
  974. return 'LEFT JOIN'
  975. def RANDOM(self):
  976. return 'Random()'
  977. def NOT_NULL(self, default, field_type):
  978. return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
  979. def COALESCE(self, first, second):
  980. expressions = [self.expand(first)]+[self.expand(e) for e in second]
  981. return 'COALESCE(%s)' % ','.join(expressions)
  982. def COALESCE_ZERO(self, first):
  983. return 'COALESCE(%s,0)' % self.expand(first)
  984. def RAW(self, first):
  985. return first
  986. def ALLOW_NULL(self):
  987. return ''
  988. def SUBSTRING(self, field, parameters):
  989. return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
  990. def PRIMARY_KEY(self, key):
  991. return 'PRIMARY KEY(%s)' % key
  992. def _drop(self, table, mode):
  993. return ['DROP TABLE %s;' % table]
  994. def drop(self, table, mode=''):
  995. db = table._db
  996. if table._dbt:
  997. logfile = self.file_open(table._loggername, 'a')
  998. queries = self._drop(table, mode)
  999. for query in queries:
  1000. if table._dbt:
  1001. logfile.write(query + '\n')
  1002. self.execute(query)
  1003. db.commit()
  1004. del db[table._tablename]
  1005. del db.tables[db.tables.index(table._tablename)]
  1006. db._remove_references_to(table)
  1007. if table._dbt:
  1008. self.file_delete(table._dbt)
  1009. logfile.write('success!\n')
  1010. def _insert(self, table, fields):
  1011. keys = ','.join(f.name for f,v in fields)
  1012. values = ','.join(self.expand(v,f.type) for f,v in fields)
  1013. return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values)
  1014. def insert(self, table, fields):
  1015. query = self._insert(table,fields)
  1016. try:
  1017. self.execute(query)
  1018. except Exception, e:
  1019. if isinstance(e,self.integrity_error_class()):
  1020. return None
  1021. raise e
  1022. if hasattr(table,'_primarykey'):
  1023. return dict([(k[0].name, k[1]) for k in fields \
  1024. if k[0].name in table._primarykey])
  1025. id = self.lastrowid(table)
  1026. if not isinstance(id,int):
  1027. return id
  1028. rid = Reference(id)
  1029. (rid._table, rid._record) = (table, None)
  1030. return rid
  1031. def bulk_insert(self, table, items):
  1032. return [self.insert(table,item) for item in items]
  1033. def NOT(self, first):
  1034. return '(NOT %s)' % self.expand(first)
  1035. def AND(self, first, second):
  1036. return '(%s AND %s)' % (self.expand(first), self.expand(second))
  1037. def OR(self, first, second):
  1038. return '(%s OR %s)' % (self.expand(first), self.expand(second))
  1039. def BELONGS(self, first, second):
  1040. if isinstance(second, str):
  1041. return '(%s IN (%s))' % (self.expand(first), second[:-1])
  1042. elif second==[] or second==():
  1043. return '(1=0)'
  1044. items = ','.join(self.expand(item, first.type) for item in second)
  1045. return '(%s IN (%s))' % (self.expand(first), items)
  1046. def REGEXP(self, first, second):
  1047. "regular expression operator"
  1048. raise NotImplementedError
  1049. def LIKE(self, first, second):
  1050. "case sensitive like operator"
  1051. raise NotImplementedError
  1052. def ILIKE(self, first, second):
  1053. "case in-sensitive like operator"
  1054. return '(%s LIKE %s)' % (self.expand(first),
  1055. self.expand(second, 'string'))
  1056. def STARTSWITH(self, first, second):
  1057. return '(%s LIKE %s)' % (self.expand(first),
  1058. self.expand(second+'%', 'string'))
  1059. def ENDSWITH(self, first, second):
  1060. return '(%s LIKE %s)' % (self.expand(first),
  1061. self.expand('%'+second, 'string'))
  1062. def CONTAINS(self, first, second):
  1063. if first.type in ('string', 'text'):
  1064. key = '%'+str(second).replace('%','%%')+'%'
  1065. elif first.type.startswith('list:'):
  1066. key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%'
  1067. return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string'))
  1068. def EQ(self, first, second=None):
  1069. if second is None:
  1070. return '(%s IS NULL)' % self.expand(first)
  1071. return '(%s = %s)' % (self.expand(first),
  1072. self.expand(second, first.type))
  1073. def NE(self, first, second=None):
  1074. if second is None:
  1075. return '(%s IS NOT NULL)' % self.expand(first)
  1076. return '(%s <> %s)' % (self.expand(first),
  1077. self.expand(second, first.type))
  1078. def LT(self,first,second=None):
  1079. if second is None:
  1080. raise RuntimeError, "Cannot compare %s < None" % first
  1081. return '(%s < %s)' % (self.expand(first),
  1082. self.expand(second,first.type))
  1083. def LE(self,first,second=None):
  1084. if second is None:
  1085. raise RuntimeError, "Cannot compare %s <= None" % first
  1086. return '(%s <= %s)' % (self.expand(first),
  1087. self.expand(second,first.type))
  1088. def GT(self,first,second=None):
  1089. if second is None:
  1090. raise RuntimeError, "Cannot compare %s > None" % first
  1091. return '(%s > %s)' % (self.expand(first),
  1092. self.expand(second,first.type))
  1093. def GE(self,first,second=None):
  1094. if second is None:
  1095. raise RuntimeError, "Cannot compare %s >= None" % first
  1096. return '(%s >= %s)' % (self.expand(first),
  1097. self.expand(second,first.type))
  1098. def ADD(self, first, second):
  1099. return '(%s + %s)' % (self.expand(first),
  1100. self.expand(second, first.type))
  1101. def SUB(self, first, second):
  1102. return '(%s - %s)' % (self.expand(first),
  1103. self.expand(second, first.type))
  1104. def MUL(self, first, second):
  1105. return '(%s * %s)' % (self.expand(first),
  1106. self.expand(second, first.type))
  1107. def DIV(self, first, second):
  1108. return '(%s / %s)' % (self.expand(first),
  1109. self.expand(second, first.type))
  1110. def MOD(self, first, second):
  1111. return '(%s %% %s)' % (self.expand(first),
  1112. self.expand(second, first.type))
  1113. def AS(self, first, second):
  1114. return '%s AS %s' % (self.expand(first), second)
  1115. def ON(self, first, second):
  1116. return '%s ON %s' % (self.expand(first), self.expand(second))
  1117. def INVERT(self, first):
  1118. return '%s DESC' % self.expand(first)
  1119. def COMMA(self, first, second):
  1120. return '%s, %s' % (self.expand(first), self.expand(second))
  1121. def expand(self, expression, field_type=None):
  1122. if isinstance(expression, Field):
  1123. return '%s.%s' % (expression.tablename, expression.name)
  1124. elif isinstance(expression, (Expression, Query)):
  1125. first = expression.first
  1126. second = expression.second
  1127. op = expression.op
  1128. if not second is None:
  1129. return op(first, second)
  1130. elif not first is None:
  1131. return op(first)
  1132. elif isinstance(op, str):
  1133. if op.endswith(';'):
  1134. op=op[:-1]
  1135. return '(%s)' % op
  1136. else:
  1137. return op()
  1138. elif field_type:
  1139. return str(self.represent(expression,field_type))
  1140. elif isinstance(expression,(list,tuple)):
  1141. return ','.join(self.represent(item,field_type) \
  1142. for item in expression)
  1143. elif isinstance(expression, bool):
  1144. return '1' if expression else '0'
  1145. else:
  1146. return str(expression)
  1147. def alias(self, table, alias):
  1148. """
  1149. Given a table object, makes a new table object
  1150. with alias name.
  1151. """
  1152. other = copy.copy(table)
  1153. other['_ot'] = other._tablename
  1154. other['ALL'] = SQLALL(other)
  1155. other['_tablename'] = alias
  1156. for fieldname in other.fields:
  1157. other[fieldname] = copy.copy(other[fieldname])
  1158. other[fieldname]._tablename = alias
  1159. other[fieldname].tablename = alias
  1160. other[fieldname].table = other
  1161. table._db[alias] = other
  1162. return other
  1163. def _truncate(self, table, mode=''):
  1164. tablename = table._tablename
  1165. return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
  1166. def truncate(self, table, mode= ' '):
  1167. # Prepare functions "write_to_logfile" and "close_logfile"
  1168. if table._dbt:
  1169. logfile = self.file_open(table._loggername, 'a')
  1170. else:
  1171. class Logfile(object):
  1172. def write(self, value):
  1173. pass
  1174. def close(self):
  1175. pass
  1176. logfile = Logfile()
  1177. try:
  1178. queries = table._db._adapter._truncate(table, mode)
  1179. for query in queries:
  1180. logfile.write(query + '\n')
  1181. self.execute(query)
  1182. table._db.commit()
  1183. logfile.write('success!\n')
  1184. finally:
  1185. logfile.close()
  1186. def _update(self, tablename, query, fields):
  1187. if query:
  1188. if use_common_filters(query):
  1189. query = self.common_filter(query, [tablename])
  1190. sql_w = ' WHERE ' + self.expand(query)
  1191. else:
  1192. sql_w = ''
  1193. sql_v = ','.join(['%s=%s' % (field.name,
  1194. self.expand(value, field.type)) \
  1195. for (field, value) in fields])
  1196. return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
  1197. def update(self, tablename, query, fields):
  1198. sql = self._update(tablename, query, fields)
  1199. self.execute(sql)
  1200. try:
  1201. return self.cursor.rowcount
  1202. except:
  1203. return None
  1204. def _delete(self, tablename, query):
  1205. if query:
  1206. if use_common_filters(query):
  1207. query = self.common_filter(query, [tablename])
  1208. sql_w = ' WHERE ' + self.expand(query)
  1209. else:
  1210. sql_w = ''
  1211. return 'DELETE FROM %s%s;' % (tablename, sql_w)
  1212. def delete(self, tablename, query):
  1213. sql = self._delete(tablename, query)
  1214. ### special code to handle CASCADE in SQLite & SpatiaLite
  1215. db = self.db
  1216. table = db[tablename]
  1217. if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by:
  1218. deleted = [x[table._id.name] for x in db(query).select(table._id)]
  1219. ### end special code to handle CASCADE in SQLite & SpatiaLite
  1220. self.execute(sql)
  1221. try:
  1222. counter = self.cursor.rowcount
  1223. except:
  1224. counter = None
  1225. ### special code to handle CASCADE in SQLite & SpatiaLite
  1226. if self.dbengine in ('sqlite', 'spatialite') and counter:
  1227. for field in table._referenced_by:
  1228. if field.type=='reference '+table._tablename \
  1229. and field.ondelete=='CASCADE':
  1230. db(field.belongs(deleted)).delete()
  1231. ### end special code to handle CASCADE in SQLite & SpatiaLite
  1232. return counter
  1233. def get_table(self, query):
  1234. tablenames = self.tables(query)
  1235. if len(tablenames)==1:
  1236. return tablenames[0]
  1237. elif len(tablenames)<1:
  1238. raise RuntimeError, "No table selected"
  1239. else:
  1240. raise RuntimeError, "Too many tables selected"
  1241. def expand_all(self, fields, tablenames):
  1242. db = self.db
  1243. new_fields = []
  1244. append = new_fields.append
  1245. for item in fields:
  1246. if isinstance(item,SQLALL):
  1247. new_fields += item._table
  1248. elif isinstance(item,str):
  1249. if REGEX_TABLE_DOT_FIELD.match(item):
  1250. tablename,fieldname = item.split('.')
  1251. append(db[tablename][fieldname])
  1252. else:
  1253. append(Expression(db,lambda:item))
  1254. else:
  1255. append(item)
  1256. # ## if no fields specified take them all from the requested tables
  1257. if not new_fields:
  1258. for table in tablenames:
  1259. for field in db[table]:
  1260. append(field)
  1261. return new_fields
  1262. def _select(self, query, fields, attributes):
  1263. tables = self.tables
  1264. for key in set(attributes.keys())-SELECT_ARGS:
  1265. raise SyntaxError, 'invalid select attribute: %s' % key
  1266. args_get = attributes.get
  1267. tablenames = tables(query)
  1268. for field in fields:
  1269. if isinstance(field, basestring) \
  1270. and REGEX_TABLE_DOT_FIELD.match(field):
  1271. tn,fn = field.split('.')
  1272. field = self.db[tn][fn]
  1273. for tablename in tables(field):
  1274. if not tablename in tablenames:
  1275. tablenames.append(tablename)
  1276. if use_common_filters(query):
  1277. query = self.common_filter(query,tablenames)
  1278. if len(tablenames) < 1:
  1279. raise SyntaxError, 'Set: no tables selected'
  1280. sql_f = ', '.join(map(self.expand, fields))
  1281. self._colnames = [c.strip() for c in sql_f.split(', ')]
  1282. if query:
  1283. sql_w = ' WHERE ' + self.expand(query)
  1284. else:
  1285. sql_w = ''
  1286. sql_o = ''
  1287. sql_s = ''
  1288. left = args_get('left', False)
  1289. inner_join = args_get('join', False)
  1290. distinct = args_get('distinct', False)
  1291. groupby = args_get('groupby', False)
  1292. orderby = args_get('orderby', False)
  1293. having = args_get('having', False)
  1294. limitby = args_get('limitby', False)
  1295. for_update = args_get('for_update', False)
  1296. if self.can_select_for_update is False and for_update is True:
  1297. raise SyntaxError, 'invalid select attribute: for_update'
  1298. if distinct is True:
  1299. sql_s += 'DISTINCT'
  1300. elif distinct:
  1301. sql_s += 'DISTINCT ON (%s)' % distinct
  1302. if inner_join:
  1303. icommand = self.JOIN()
  1304. if not isinstance(inner_join, (tuple, list)):
  1305. inner_join = [inner_join]
  1306. ijoint = [t._tablename for t in inner_join
  1307. if not isinstance(t,Expression)]
  1308. ijoinon = [t for t in inner_join if isinstance(t, Expression)]
  1309. itables_to_merge={} #issue 490
  1310. [itables_to_merge.update(
  1311. dict.fromkeys(tables(t))) for t in ijoinon]
  1312. ijoinont = [t.first._tablename for t in ijoinon]
  1313. [itables_to_merge.pop(t) for t in ijoinont
  1314. if t in itables_to_merge] #issue 490
  1315. iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys()
  1316. iexcluded = [t for t in tablenames
  1317. if not t in iimportant_tablenames]
  1318. if left:
  1319. join = attributes['left']
  1320. command = self.LEFT_JOIN()
  1321. if not isinstance(join, (tuple, list)):
  1322. join = [join]
  1323. joint = [t._tablename for t in join
  1324. if not isinstance(t, Expression)]
  1325. joinon = [t for t in join if isinstance(t, Expression)]
  1326. #patch join+left patch (solves problem with ordering in left joins)
  1327. tables_to_merge={}
  1328. [tables_to_merge.update(
  1329. dict.fromkeys(tables(t))) for t in joinon]
  1330. joinont = [t.first._tablename for t in joinon]
  1331. [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge]
  1332. important_tablenames = joint + joinont + tables_to_merge.keys()
  1333. excluded = [t for t in tablenames
  1334. if not t in important_tablenames ]
  1335. def alias(t):
  1336. return str(self.db[t])
  1337. if inner_join and not left:
  1338. sql_t = ', '.join([alias(t) for t in iexcluded + \
  1339. itables_to_merge.keys()])
  1340. for t in ijoinon:
  1341. sql_t += ' %s %s' % (icommand, str(t))
  1342. elif not inner_join and left:
  1343. sql_t = ', '.join([alias(t) for t in excluded + \
  1344. tables_to_merge.keys()])
  1345. if joint:
  1346. sql_t += ' %s %s' % (command, ','.join([t for t in joint]))
  1347. for t in joinon:
  1348. sql_t += ' %s %s' % (command, str(t))
  1349. elif inner_join and left:
  1350. all_tables_in_query = set(important_tablenames + \
  1351. iimportant_tablenames + \
  1352. tablenames)
  1353. tables_in_joinon = set(joinont + ijoinont)
  1354. tables_not_in_joinon = \
  1355. all_tables_in_query.difference(tables_in_joinon)
  1356. sql_t = ','.join([alias(t) for t in tables_not_in_joinon])
  1357. for t in ijoinon:
  1358. sql_t += ' %s %s' % (icommand, str(t))
  1359. if joint:
  1360. sql_t += ' %s %s' % (command, ','.join([t for t in joint]))
  1361. for t in joinon:
  1362. sql_t += ' %s %s' % (command, str(t))
  1363. else:
  1364. sql_t = ', '.join(alias(t) for t in tablenames)
  1365. if groupby:
  1366. if isinstance(groupby, (list, tuple)):
  1367. groupby = xorify(groupby)
  1368. sql_o += ' GROUP BY %s' % self.expand(groupby)
  1369. if having:
  1370. sql_o += ' HAVING %s' % attributes['having']
  1371. if orderby:
  1372. if isinstance(orderby, (list, tuple)):
  1373. orderby = xorify(orderby)
  1374. if str(orderby) == '<random>':
  1375. sql_o += ' ORDER BY %s' % self.RANDOM()
  1376. else:
  1377. sql_o += ' ORDER BY %s' % self.expand(orderby)
  1378. if limitby:
  1379. if not orderby and tablenames:
  1380. sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])])
  1381. # oracle does not support limitby
  1382. sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby)
  1383. if for_update and self.can_select_for_update is True:
  1384. sql = sql.rstrip(';') + ' FOR UPDATE;'
  1385. return sql
  1386. def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
  1387. if limitby:
  1388. (lmin, lmax) = limitby
  1389. sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin)
  1390. return 'SELECT %s %s FROM %s%s%s;' % \
  1391. (sql_s, sql_f, sql_t, sql_w, sql_o)
  1392. def _fetchall(self):
  1393. return self.cursor.fetchall()
  1394. def _select_aux(self,sql,fields,attributes):
  1395. args_get = attributes.get
  1396. cache = args_get('cache',None)
  1397. if not cache:
  1398. self.execute(sql)
  1399. rows = self._fetchall()
  1400. else:
  1401. (cache_model, time_expire) = cache
  1402. key = self.uri + '/' + sql + '/rows'
  1403. if len(key)>200: key = hashlib.md5(key).hexdigest()
  1404. def _select_aux2():
  1405. self.execute(sql)
  1406. return self._fetchall()
  1407. rows = cache_model(key,_select_aux2,time_expire)
  1408. if isinstance(rows,tuple):
  1409. rows = list(rows)
  1410. limitby = args_get('limitby', None) or (0,)
  1411. rows = self.rowslice(rows,limitby[0],None)
  1412. processor = args_get('processor',self.parse)
  1413. cacheable = args_get('cacheable',False)
  1414. return processor(rows,fields,self._colnames,cacheable=cacheable)
  1415. def select(self, query, fields, attributes):
  1416. """
  1417. Always returns a Rows object, possibly empty.
  1418. """
  1419. sql = self._select(query, fields, attributes)
  1420. cache = attributes.get('cache', None)
  1421. if cache and attributes.get('cacheable',False):
  1422. del attributes['cache']
  1423. (cache_model, time_expire) = cache
  1424. key = self.uri + '/' + sql
  1425. if len(key)>200: key = hashlib.md5(key).hexdigest()
  1426. args = (sql,fields,attributes)
  1427. return cache_model(
  1428. key,
  1429. lambda self=self,args=args:self._select_aux(*args),
  1430. time_expire)
  1431. else:
  1432. return self._select_aux(sql,fields,attributes)
  1433. def _count(self, query, distinct=None):
  1434. tablenames = self.tables(query)
  1435. if query:
  1436. if use_common_filters(query):
  1437. query = self.common_filter(query, tablenames)
  1438. sql_w = ' WHERE ' + self.expand(query)
  1439. else:
  1440. sql_w = ''
  1441. sql_t = ','.join(tablenames)
  1442. if distinct:
  1443. if isinstance(distinct,(list, tuple)):
  1444. distinct = xorify(distinct)
  1445. sql_d = self.expand(distinct)
  1446. return 'SELECT count(DISTINCT %s) FROM %s%s;' % \
  1447. (sql_d, sql_t, sql_w)
  1448. return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
  1449. def count(self, query, distinct=None):
  1450. self.execute(self._count(query, distinct))
  1451. return self.cursor.fetchone()[0]
  1452. def tables(self, *queries):
  1453. tables = set()
  1454. for query in queries:
  1455. if isinstance(query, Field):
  1456. tables.add(query.tablename)
  1457. elif isinstance(query, (Expression, Query)):
  1458. if not query.first is None:
  1459. tables = tables.union(self.tables(query.first))
  1460. if not query.second is None:
  1461. tables = tables.union(self.tables(query.second))
  1462. return list(tables)
  1463. def commit(self):
  1464. if self.connection: return self.connection.commit()
  1465. def rollback(self):
  1466. if self.connection: return self.connection.rollback()
  1467. def close_connection(self):
  1468. if self.connection: return self.connection.close()
  1469. def distributed_transaction_begin(self, key):
  1470. return
  1471. def prepare(self, key):
  1472. if self.connection: self.connection.prepare()
  1473. def commit_prepared(self, key):
  1474. if self.connection: self.connection.commit()
  1475. def rollback_prepared(self, key):
  1476. if self.connection: self.connection.rollback()
  1477. def concat_add(self, table):
  1478. return ', ADD '
  1479. def constraint_name(self, table, fieldname):
  1480. return '%s_%s__constraint' % (table,fieldname)
  1481. def create_sequence_and_triggers(self, query, table, **args):
  1482. self.execute(query)
  1483. def log_execute(self, *a, **b):
  1484. if not self.connection: return None
  1485. command = a[0]
  1486. if self.db._debug:
  1487. LOGGER.debug('SQL: %s' % command)
  1488. self.db._lastsql = command
  1489. t0 = time.time()
  1490. ret = self.cursor.execute(*a, **b)
  1491. self.db._timings.append((command,time.time()-t0))
  1492. del self.db._timings[:-TIMINGSSIZE]
  1493. return ret
  1494. def execute(self, *a, **b):
  1495. return self.log_execute(*a, **b)
  1496. def represent(self, obj, fieldtype):
  1497. field_is_type = fieldtype.startswith
  1498. if isinstance(obj, CALLABLETYPES):
  1499. obj = obj()
  1500. if isinstance(fieldtype, SQLCustomType):
  1501. value = fieldtype.encoder(obj)
  1502. if fieldtype.type in ('string','text'):
  1503. return self.adapt(value)
  1504. return value
  1505. if isinstance(obj, (Expression, Field)):
  1506. return str(obj)
  1507. if field_is_type('list:'):
  1508. if not obj:
  1509. obj = []
  1510. elif not isinstance(obj, (list, tuple)):
  1511. obj = [obj]
  1512. if field_is_type('list:string'):
  1513. obj = map(str,obj)
  1514. else:
  1515. obj = map(int,obj)
  1516. if isinstance(obj, (list, tuple)):
  1517. obj = bar_encode(obj)
  1518. if obj is None:
  1519. return 'NULL'
  1520. if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']:
  1521. return 'NULL'
  1522. r = self.represent_exceptions(obj, fieldtype)
  1523. if not r is None:
  1524. return r
  1525. if fieldtype == 'boolean':
  1526. if obj and not str(obj)[:1].upper() in '0F':
  1527. return self.smart_adapt(self.TRUE)
  1528. else:
  1529. return self.smart_adapt(self.FALSE)
  1530. if fieldtype == 'id' or fieldtype == 'integer':
  1531. return str(int(obj))
  1532. if field_is_type('decimal'):
  1533. return str(obj)
  1534. elif field_is_type('reference'): # reference
  1535. if fieldtype.find('.')>0:
  1536. return repr(obj)
  1537. elif isinstance(obj, (Row, Reference)):
  1538. return str(obj['id'])
  1539. return str(int(obj))
  1540. elif fieldtype == 'double':
  1541. return repr(float(obj))
  1542. if isinstance(obj, unicode):
  1543. obj = obj.encode(self.db_codec)
  1544. if fieldtype == 'blob':
  1545. obj = base64.b64encode(str(obj))
  1546. elif fieldtype == 'date':
  1547. if isinstance(obj, (datetime.date, datetime.datetime)):
  1548. obj = obj.isoformat()[:10]
  1549. else:
  1550. obj = str(obj)
  1551. elif fieldtype == 'datetime':
  1552. if isinstance(obj, datetime.datetime):
  1553. obj = obj.isoformat()[:19].replace('T',' ')
  1554. elif isinstance(obj, datetime.date):
  1555. obj = obj.isoformat()[:10]+' 00:00:00'
  1556. else:
  1557. obj = str(obj)
  1558. elif fieldtype == 'time':
  1559. if isinstance(obj, datetime.time):
  1560. obj = obj.isoformat()[:10]
  1561. else:
  1562. obj = str(obj)
  1563. if not isinstance(obj,str):
  1564. obj = str(obj)
  1565. try:
  1566. obj.decode(self.db_codec)
  1567. except:
  1568. obj = obj.decode('latin1').encode(self.db_codec)
  1569. return self.adapt(obj)
  1570. def represent_exceptions(self, obj, fieldtype):
  1571. return None
  1572. def lastrowid(self, table):
  1573. return None
  1574. def integrity_error_class(self):
  1575. return type(None)
  1576. def rowslice(self, rows, minimum=0, maximum=None):
  1577. """
  1578. By default this function does nothing;
  1579. overload when db does not do slicing.
  1580. """
  1581. return rows
  1582. def parse_value(self, value, field_type, blob_decode=True):
  1583. if field_type != 'blob' and isinstance(value, str):
  1584. try:
  1585. value = value.decode(self.db._db_codec)
  1586. except Exception:
  1587. pass
  1588. if isinstance(value, unicode):
  1589. value = value.encode('utf-8')
  1590. if isinstance(field_type, SQLCustomType):
  1591. value = field_type.decoder(value)
  1592. if not isinstance(field_type, str) or value is None:
  1593. return value
  1594. elif field_type in ('string', 'text', 'password', 'upload', 'dict'):
  1595. return value
  1596. elif field_type.startswith('geo'):
  1597. return value
  1598. elif field_type == 'blob' and not blob_decode:
  1599. return value
  1600. else:
  1601. key = REGEX_TYPE.match(field_type).group(0)
  1602. return self.parsemap[key](value,field_type)
  1603. def parse_reference(self, value, field_type):
  1604. referee = field_type[10:].strip()
  1605. if not '.' in referee:
  1606. value = Reference(value)
  1607. value._table, value._record = self.db[referee], None
  1608. return value
  1609. def parse_boolean(self, value, field_type):
  1610. return value == True or str(value)[:1].lower() == 't'
  1611. def parse_date(self, value, field_type):
  1612. if not isinstance(value, (datetime.date,datetime.datetime)):
  1613. (y, m, d) = map(int, str(value)[:10].strip().split('-'))
  1614. value = datetime.date(y, m, d)
  1615. return value
  1616. def parse_time(self, value, field_type):
  1617. if not isinstance(value, datetime.time):
  1618. time_items = map(int,str(value)[:8].strip().split(':')[:3])
  1619. if len(time_items) == 3:
  1620. (h, mi, s) = time_items
  1621. else:
  1622. (h, mi, s) = time_items + [0]
  1623. value = datetime.time(h, mi, s)
  1624. return value
  1625. def parse_datetime(self, value, field_type):
  1626. if not isinstance(value, datetime.datetime):
  1627. value = str(value)
  1628. date_part,time_part,timezone = value[:10],value[11:19],value[19:]
  1629. if '+' in timezone:
  1630. ms,tz = timezone.split('+')
  1631. h,m = tz.split(':')
  1632. dt = datetime.timedelta(seconds=3600*int(h)+60*int(m))
  1633. elif '-' in timezone:
  1634. ms,tz = timezone.split('-')
  1635. h,m = tz.split(':')
  1636. dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m))
  1637. else:
  1638. dt = None
  1639. (y, m, d) = map(int,date_part.split('-'))
  1640. time_parts = time_part and time_part.split(':')[:3] or (0,0,0)
  1641. while len(time_parts)<3: time_parts.append(0)
  1642. time_items = map(int,time_parts)
  1643. (h, mi, s) = time_items
  1644. value = datetime.datetime(y, m, d, h, mi, s)
  1645. if dt:
  1646. value = value + dt
  1647. return value
  1648. def parse_blob(self, value, field_type):
  1649. return base64.b64decode(str(value))
  1650. def parse_decimal(self, value, field_type):
  1651. decimals = int(field_type[8:-1].split(',')[-1])
  1652. if self.dbengine in ('sqlite', 'spatialite'):
  1653. value = ('%.' + str(decimals) + 'f') % value
  1654. if not isinstance(value, decimal.Decimal):
  1655. value = decimal.Decimal(str(value))
  1656. return value
  1657. def parse_list_integers(self, value, field_type):
  1658. if not self.dbengine=='google:datastore':
  1659. value = bar_decode_integer(value)
  1660. return value
  1661. def parse_list_references(self, value, field_type):
  1662. if not self.dbengine=='google:datastore':
  1663. value = bar_decode_integer(value)
  1664. return [self.parse_reference(r, field_type[5:]) for r in value]
  1665. def parse_list_strings(self, value, field_type):
  1666. if not self.dbengine=='google:datastore':
  1667. value = bar_decode_string(value)
  1668. return value
  1669. def parse_id(self, value, field_type):
  1670. return int(value)
  1671. def parse_integer(self, value, field_type):
  1672. return int(value)
  1673. def parse_double(self, value, field_type):
  1674. return float(value)
  1675. def build_parsemap(self):
  1676. self.parsemap = {
  1677. 'id':self.parse_id,
  1678. 'integer':self.parse_integer,
  1679. 'bigint':self.parse_integer,
  1680. 'float':self.parse_double,
  1681. 'double':self.parse_double,
  1682. 'reference':self.parse_reference,
  1683. 'boolean':self.parse_boolean,
  1684. 'date':self.parse_date,
  1685. 'time':self.parse_time,
  1686. 'datetime':self.parse_datetime,
  1687. 'blob':self.parse_blob,
  1688. 'decimal':self.parse_decimal,
  1689. 'list:integer':self.parse_list_integers,
  1690. 'list:reference':self.parse_list_references,
  1691. 'list:string':self.parse_list_strings,
  1692. }
  1693. def parse(self, rows, fields, colnames, blob_decode=True,
  1694. cacheable = False):
  1695. self.build_parsemap()
  1696. db = self.db
  1697. virtualtables = []
  1698. new_rows = []
  1699. tmps = []
  1700. for colname in colnames:
  1701. if not REGEX_TABLE_DOT_FIELD.match(colname):
  1702. tmps.append(None)
  1703. else:
  1704. (tablename, fieldname) = colname.split('.')
  1705. table = db[tablename]
  1706. field = table[fieldname]
  1707. ft = field.type
  1708. tmps.append((tablename,fieldname,table,field,ft))
  1709. for (i,row) in enumerate(rows):
  1710. new_row = Row()
  1711. for (j,colname) in enumerate(colnames):
  1712. value = row[j]
  1713. tmp = tmps[j]
  1714. if tmp:
  1715. (tablename,fieldname,table,field,ft) = tmp
  1716. if tablename in new_row:
  1717. colset = new_row[tablename]
  1718. else:
  1719. colset = new_row[tablename] = Row()
  1720. if tablename not in virtualtables:
  1721. virtualtables.append(tablename)
  1722. value = self.parse_value(value,ft,blob_decode)
  1723. if field.filter_out:
  1724. value = field.filter_out(value)
  1725. colset[fieldname] = value
  1726. # for backward compatibility
  1727. if ft=='id' and fieldname!='id' and \
  1728. not 'id' in table.fields:
  1729. colset['id'] = value
  1730. if ft == 'id' and not cacheable:
  1731. # temporary hack to deal with
  1732. # GoogleDatastoreAdapter
  1733. # references
  1734. if isinstance(self, GoogleDatastoreAdapter):
  1735. id = value.key().id_or_name()
  1736. colset[fieldname] = id
  1737. colset.gae_item = value
  1738. else:
  1739. id = value
  1740. colset.update_record = RecordUpdater(colset,table,id)
  1741. colset.delete_record = RecordDeleter(table,id)
  1742. for rfield in table._referenced_by:
  1743. referee_link = db._referee_name and \
  1744. db._referee_name % dict(
  1745. table=rfield.tablename,field=rfield.name)
  1746. if referee_link and not referee_link in colset:
  1747. colset[referee_link] = LazySet(rfield,id)
  1748. else:
  1749. if not '_extra' in new_row:
  1750. new_row['_extra'] = Row()
  1751. new_row['_extra'][colname] = \
  1752. self.parse_value(value,
  1753. fields[j].type,blob_decode)
  1754. new_column_name = \
  1755. REGEX_SELECT_AS_PARSER.search(colname)
  1756. if not new_column_name is None:
  1757. column_name = new_column_name.groups(0)
  1758. setattr(new_row,column_name[0],value)
  1759. new_rows.append(new_row)
  1760. rowsobj = Rows(db, new_rows, colnames, rawrows=rows)
  1761. for tablename in virtualtables:
  1762. ### new style virtual fields
  1763. table = db[tablename]
  1764. fields_virtual = [(f,v) for (f,v) in table.iteritems()
  1765. if isinstance(v,FieldVirtual)]
  1766. fields_lazy = [(f,v) for (f,v) in table.iteritems()
  1767. if isinstance(v,FieldLazy)]
  1768. if fields_virtual or fields_lazy:
  1769. for row in rowsobj.records:
  1770. box = row[tablename]
  1771. for f,v in fields_virtual:
  1772. box[f] = v.f(row)
  1773. for f,v in fields_lazy:
  1774. box[f] = (v.handler or VirtualCommand)(v.f,row)
  1775. ### old style virtual fields
  1776. for item in table.virtualfields:
  1777. try:
  1778. rowsobj = rowsobj.setvirtualfields(**{tablename:item})
  1779. except (KeyError, AttributeError):
  1780. # to avoid breaking virtualfields when partial select
  1781. pass
  1782. return rowsobj
  1783. def common_filter(self, query, tablenames):
  1784. tenant_fieldname = self.db._request_tenant
  1785. for tablename in tablenames:
  1786. table = self.db[tablename]
  1787. # deal with user provided filters
  1788. if table._common_filter != None:
  1789. query = query & table._common_filter(query)
  1790. # deal with multi_tenant filters
  1791. if tenant_fieldname in table:
  1792. default = table[tenant_fieldname].default
  1793. if not default is None:
  1794. newquery = table[tenant_fieldname] == default
  1795. if query is None:
  1796. query = newquery
  1797. else:
  1798. query = query & newquery
  1799. return query
  1800. def CASE(self,query,t,f):
  1801. def represent(x):
  1802. types = {type(True):'boolean',type(0):'integer',type(1.0):'double'}
  1803. if x is None: return 'NULL'
  1804. elif isinstance(x,Expression): return str(x)
  1805. else: return self.represent(x,types.get(type(x),'string'))
  1806. return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \
  1807. (self.expand(query),represent(t),represent(f)))
  1808. ###################################################################################
  1809. # List of all the available adapters; they all extend BaseAdapter.
  1810. ###################################################################################
  1811. class SQLiteAdapter(BaseAdapter):
  1812. drivers = ('sqlite2','sqlite3')
  1813. can_select_for_update = None # support ourselves with BEGIN TRANSACTION
  1814. def EXTRACT(self,field,what):
  1815. return "web2py_extract('%s',%s)" % (what, self.expand(field))
  1816. @staticmethod
  1817. def web2py_extract(lookup, s):
  1818. table = {
  1819. 'year': (0, 4),
  1820. 'month': (5, 7),
  1821. 'day': (8, 10),
  1822. 'hour': (11, 13),
  1823. 'minute': (14, 16),
  1824. 'second': (17, 19),
  1825. }
  1826. try:
  1827. if lookup != 'epoch':
  1828. (i, j) = table[lookup]
  1829. return int(s[i:j])
  1830. else:
  1831. return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple())
  1832. except:
  1833. return None
  1834. @staticmethod
  1835. def web2py_regexp(expression, item):
  1836. return re.compile(expression).search(item) is not None
  1837. def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
  1838. credential_decoder=IDENTITY, driver_args={},
  1839. adapter_args={}, do_connect=True):
  1840. self.db = db
  1841. self.dbengine = "sqlite"
  1842. self.uri = uri
  1843. if do_connect: self.find_driver(adapter_args)
  1844. self.pool_size = 0
  1845. self.folder = folder
  1846. self.db_codec = db_codec
  1847. self.find_or_make_work_folder()
  1848. path_encoding = sys.getfilesystemencoding() \
  1849. or locale.getdefaultlocale()[1] or 'utf8'
  1850. if uri.startswith('sqlite:memory'):
  1851. dbpath = ':memory:'
  1852. else:
  1853. dbpath = uri.split('://',1)[1]
  1854. if dbpath[0] != '/':
  1855. dbpath = pjoin(
  1856. self.folder.decode(path_encoding).encode('utf8'), dbpath)
  1857. if not 'check_same_thread' in driver_args:
  1858. driver_args['check_same_thread'] = False
  1859. if not 'detect_types' in driver_args:
  1860. driver_args['detect_types'] = self.driver.PARSE_DECLTYPES
  1861. def connector(dbpath=dbpath, driver_args=driver_args):
  1862. return self.driver.Connection(dbpath, **driver_args)
  1863. self.connector = connector
  1864. if do_connect: self.reconnect()
  1865. def after_connection(self):
  1866. self.connection.create_function('web2py_extract', 2,
  1867. SQLiteAdapter.web2py_extract)
  1868. self.connection.create_function("REGEXP", 2,
  1869. SQLiteAdapter.web2py_regexp)
  1870. def _truncate(self, table, mode=''):
  1871. tablename = table._tablename
  1872. return ['DELETE FROM %s;' % tablename,
  1873. "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
  1874. def lastrowid(self, table):
  1875. return self.cursor.lastrowid
  1876. def REGEXP(self,first,second):
  1877. return '(%s REGEXP %s)' % (self.expand(first),
  1878. self.expand(second,'string'))
  1879. def select(self, query, fields, attributes):
  1880. """
  1881. Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION.
  1882. Note that the entire database, rather than one record, is locked
  1883. (it will be locked eventually anyway by the following UPDATE).
  1884. """
  1885. if attributes.get('for_update', False) and not 'cache' in attributes:
  1886. self.execute('BEGIN IMMEDIATE TRANSACTION;')
  1887. return super(SQLiteAdapter, self).select(query, fields, attributes)
  1888. class SpatiaLiteAdapter(SQLiteAdapter):
  1889. drivers = ('sqlite3','sqlite2')
  1890. types = copy.copy(BaseAdapter.types)
  1891. types.update(geometry='GEOMETRY')
  1892. def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
  1893. credential_decoder=IDENTITY, driver_args={},
  1894. adapter_args={}, do_connect=True, srid=4326):
  1895. self.db = db
  1896. self.dbengine = "spatialite"
  1897. self.uri = uri
  1898. if do_connect: self.find_driver(adapter_args)
  1899. self.pool_size = 0
  1900. self.folder = folder
  1901. self.db_codec = db_codec
  1902. self.find_or_make_work_folder()
  1903. self.srid = srid
  1904. path_encoding = sys.getfilesystemencoding() \
  1905. or locale.getdefaultlocale()[1] or 'utf8'
  1906. if uri.startswith('spatialite:memory'):
  1907. dbpath = ':memory:'
  1908. else:
  1909. dbpath = uri.split('://',1)[1]
  1910. if dbpath[0] != '/':
  1911. dbpath = pjoin(
  1912. self.folder.decode(path_encoding).encode('utf8'), dbpath)
  1913. if not 'check_same_thread' in driver_args:
  1914. driver_args['check_same_thread'] = False
  1915. if not 'detect_types' in driver_args:
  1916. driver_args['detect_types'] = self.driver.PARSE_DECLTYPES
  1917. def connector(dbpath=dbpath, driver_args=driver_args):
  1918. return self.driver.Connection(dbpath, **driver_args)
  1919. self.connector = connector
  1920. if do_connect: self.reconnect()
  1921. def after_connection(self):
  1922. self.connection.enable_load_extension(True)
  1923. # for Windows, rename libspatialite-2.dll to libspatialite.dll
  1924. # Linux uses libspatialite.so
  1925. # Mac OS X uses libspatialite.dylib
  1926. libspatialite = SPATIALLIBS[platform.system()]
  1927. self.execute(r'SELECT load_extension("%s");') % libspatialite
  1928. self.connection.create_function('web2py_extract', 2,
  1929. SQLiteAdapter.web2py_extract)
  1930. self.connection.create_function("REGEXP", 2,
  1931. SQLiteAdapter.web2py_regexp)
  1932. # GIS functions
  1933. def ST_ASGEOJSON(self, first, second):
  1934. return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first),
  1935. second['precision'], second['options'])
  1936. def ST_ASTEXT(self, first):
  1937. return 'AsText(%s)' %(self.expand(first))
  1938. def ST_CONTAINS(self, first, second):
  1939. return 'Contains(%s,%s)' %(self.expand(first),
  1940. self.expand(second, first.type))
  1941. def ST_DISTANCE(self, first, second):
  1942. return 'Distance(%s,%s)' %(self.expand(first),
  1943. self.expand(second, first.type))
  1944. def ST_EQUALS(self, first, second):
  1945. return 'Equals(%s,%s)' %(self.expand(first),
  1946. self.expand(second, first.type))
  1947. def ST_INTERSECTS(self, first, second):
  1948. return 'Intersects(%s,%s)' %(self.expand(first),
  1949. self.expand(second, first.type))
  1950. def ST_OVERLAPS(self, first, second):
  1951. return 'Overlaps(%s,%s)' %(self.expand(first),
  1952. self.expand(second, first.type))
  1953. def ST_SIMPLIFY(self, first, second):
  1954. return 'Simplify(%s,%s)' %(self.expand(first),
  1955. self.expand(second, 'double'))
  1956. def ST_TOUCHES(self, first, second):
  1957. return 'Touches(%s,%s)' %(self.expand(first),
  1958. self.expand(second, first.type))
  1959. def ST_WITHIN(self, first, second):
  1960. return 'Within(%s,%s)' %(self.expand(first),
  1961. self.expand(second, first.type))
  1962. def represent(self, obj, fieldtype):
  1963. field_is_type = fieldtype.startswith
  1964. if field_is_type('geo'):
  1965. srid = 4326 # Spatialite default srid for geometry
  1966. geotype, parms = fieldtype[:-1].split('(')
  1967. parms = parms.split(',')
  1968. if len(parms) >= 2:
  1969. schema, srid = parms[:2]
  1970. # if field_is_type('geometry'):
  1971. value = "ST_GeomFromText('%s',%s)" %(obj, srid)
  1972. # elif field_is_type('geography'):
  1973. # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
  1974. # else:
  1975. # raise SyntaxError, 'Invalid field type %s' %fieldtype
  1976. return value
  1977. return BaseAdapter.represent(self, obj, fieldtype)
  1978. class JDBCSQLiteAdapter(SQLiteAdapter):
  1979. drivers = ('zxJDBC_sqlite',)
  1980. def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
  1981. credential_decoder=IDENTITY, driver_args={},
  1982. adapter_args={}, do_connect=True):
  1983. self.db = db
  1984. self.dbengine = "sqlite"
  1985. self.uri = uri
  1986. if do_connect: self.find_driver(adapter_args)
  1987. self.pool_size = pool_size
  1988. self.folder = folder
  1989. self.db_codec = db_codec
  1990. self.find_or_make_work_folder()
  1991. path_encoding = sys.getfilesystemencoding() \
  1992. or locale.getdefaultlocale()[1] or 'utf8'
  1993. if uri.startswith('sqlite:memory'):
  1994. dbpath = ':memory:'
  1995. else:
  1996. dbpath = uri.split('://',1)[1]
  1997. if dbpath[0] != '/':
  1998. dbpath = pjoin(
  1999. self.folder.decode(path_encoding).encode('utf8'), dbpath)
  2000. def connector(dbpath=dbpath,driver_args=driver_args):
  2001. return self.driver.connect(
  2002. self.driver.getConnection('jdbc:sqlite:'+dbpath),
  2003. **driver_args)
  2004. self.connector = connector
  2005. if do_connect: self.reconnect()
  2006. def after_connection(self):
  2007. # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs
  2008. self.connection.create_function('web2py_extract', 2,
  2009. SQLiteAdapter.web2py_extract)
  2010. def execute(self, a):
  2011. return self.log_execute(a)
  2012. class MySQLAdapter(BaseAdapter):
  2013. drivers = ('MySQLdb','pymysql')
  2014. maxcharlength = 255
  2015. commit_on_alter_table = True
  2016. support_distributed_transaction = True
  2017. types = {
  2018. 'boolean': 'CHAR(1)',
  2019. 'string': 'VARCHAR(%(length)s)',
  2020. 'text': 'LONGTEXT',
  2021. 'password': 'VARCHAR(%(length)s)',
  2022. 'blob': 'LONGBLOB',
  2023. 'upload': 'VARCHAR(%(length)s)',
  2024. 'integer': 'INT',
  2025. 'bigint': 'BIGINT',
  2026. 'float': 'FLOAT',
  2027. 'double': 'DOUBLE',
  2028. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  2029. 'date': 'DATE',
  2030. 'time': 'TIME',
  2031. 'datetime': 'DATETIME',
  2032. 'id': 'INT AUTO_INCREMENT NOT NULL',
  2033. 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2034. 'list:integer': 'LONGTEXT',
  2035. 'list:string': 'LONGTEXT',
  2036. 'list:reference': 'LONGTEXT',
  2037. 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL',
  2038. 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2039. }
  2040. def varquote(self,name):
  2041. return varquote_aux(name,'`%s`')
  2042. def RANDOM(self):
  2043. return 'RAND()'
  2044. def SUBSTRING(self,field,parameters):
  2045. return 'SUBSTRING(%s,%s,%s)' % (self.expand(field),
  2046. parameters[0], parameters[1])
  2047. def EPOCH(self, first):
  2048. return "UNIX_TIMESTAMP(%s)" % self.expand(first)
  2049. def _drop(self,table,mode):
  2050. # breaks db integrity but without this mysql does not drop table
  2051. return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,
  2052. 'SET FOREIGN_KEY_CHECKS=1;']
  2053. def distributed_transaction_begin(self,key):
  2054. self.execute('XA START;')
  2055. def prepare(self,key):
  2056. self.execute("XA END;")
  2057. self.execute("XA PREPARE;")
  2058. def commit_prepared(self,ley):
  2059. self.execute("XA COMMIT;")
  2060. def rollback_prepared(self,key):
  2061. self.execute("XA ROLLBACK;")
  2062. def concat_add(self,table):
  2063. return '; ALTER TABLE %s ADD ' % table
  2064. REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$')
  2065. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  2066. credential_decoder=IDENTITY, driver_args={},
  2067. adapter_args={}, do_connect=True):
  2068. self.db = db
  2069. self.dbengine = "mysql"
  2070. self.uri = uri
  2071. if do_connect: self.find_driver(adapter_args,uri)
  2072. self.pool_size = pool_size
  2073. self.folder = folder
  2074. self.db_codec = db_codec
  2075. self.find_or_make_work_folder()
  2076. ruri = uri.split('://',1)[1]
  2077. m = self.REGEX_URI.match(ruri)
  2078. if not m:
  2079. raise SyntaxError, \
  2080. "Invalid URI string in DAL: %s" % self.uri
  2081. user = credential_decoder(m.group('user'))
  2082. if not user:
  2083. raise SyntaxError, 'User required'
  2084. password = credential_decoder(m.group('password'))
  2085. if not password:
  2086. password = ''
  2087. host = m.group('host')
  2088. if not host:
  2089. raise SyntaxError, 'Host name required'
  2090. db = m.group('db')
  2091. if not db:
  2092. raise SyntaxError, 'Database name required'
  2093. port = int(m.group('port') or '3306')
  2094. charset = m.group('charset') or 'utf8'
  2095. driver_args.update(db=db,
  2096. user=credential_decoder(user),
  2097. passwd=credential_decoder(password),
  2098. host=host,
  2099. port=port,
  2100. charset=charset)
  2101. def connector(driver_args=driver_args):
  2102. return self.driver.connect(**driver_args)
  2103. self.connector = connector
  2104. if do_connect: self.reconnect()
  2105. def after_connection(self):
  2106. self.execute('SET FOREIGN_KEY_CHECKS=1;')
  2107. self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
  2108. def lastrowid(self,table):
  2109. self.execute('select last_insert_id();')
  2110. return int(self.cursor.fetchone()[0])
  2111. class PostgreSQLAdapter(BaseAdapter):
  2112. drivers = ('psycopg2','pg8000')
  2113. support_distributed_transaction = True
  2114. types = {
  2115. 'boolean': 'CHAR(1)',
  2116. 'string': 'VARCHAR(%(length)s)',
  2117. 'text': 'TEXT',
  2118. 'password': 'VARCHAR(%(length)s)',
  2119. 'blob': 'BYTEA',
  2120. 'upload': 'VARCHAR(%(length)s)',
  2121. 'integer': 'INTEGER',
  2122. 'bigint': 'BIGINT',
  2123. 'float': 'FLOAT',
  2124. 'double': 'FLOAT8',
  2125. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  2126. 'date': 'DATE',
  2127. 'time': 'TIME',
  2128. 'datetime': 'TIMESTAMP',
  2129. 'id': 'SERIAL PRIMARY KEY',
  2130. 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2131. 'list:integer': 'TEXT',
  2132. 'list:string': 'TEXT',
  2133. 'list:reference': 'TEXT',
  2134. 'geometry': 'GEOMETRY',
  2135. 'geography': 'GEOGRAPHY',
  2136. 'big-id': 'BIGSERIAL PRIMARY KEY',
  2137. 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2138. }
  2139. def varquote(self,name):
  2140. return varquote_aux(name,'"%s"')
  2141. def adapt(self,obj):
  2142. if self.driver_name == 'psycopg2':
  2143. return psycopg2_adapt(obj).getquoted()
  2144. elif self.driver_name == 'pg8000':
  2145. return "'%s'" % str(obj).replace("%","%%").replace("'","''")
  2146. else:
  2147. return "'%s'" % str(obj).replace("'","''")
  2148. def sequence_name(self,table):
  2149. return '%s_id_Seq' % table
  2150. def RANDOM(self):
  2151. return 'RANDOM()'
  2152. def ADD(self, first, second):
  2153. t = first.type
  2154. if t in ('text','string','password','upload','blob'):
  2155. return '(%s || %s)' % (self.expand(first), self.expand(second, t))
  2156. else:
  2157. return '(%s + %s)' % (self.expand(first), self.expand(second, t))
  2158. def distributed_transaction_begin(self,key):
  2159. return
  2160. def prepare(self,key):
  2161. self.execute("PREPARE TRANSACTION '%s';" % key)
  2162. def commit_prepared(self,key):
  2163. self.execute("COMMIT PREPARED '%s';" % key)
  2164. def rollback_prepared(self,key):
  2165. self.execute("ROLLBACK PREPARED '%s';" % key)
  2166. def create_sequence_and_triggers(self, query, table, **args):
  2167. # following lines should only be executed if table._sequence_name does not exist
  2168. # self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
  2169. # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
  2170. # % (table._tablename, table._fieldname, table._sequence_name))
  2171. self.execute(query)
  2172. REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$')
  2173. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  2174. credential_decoder=IDENTITY, driver_args={},
  2175. adapter_args={}, do_connect=True, srid=4326):
  2176. self.db = db
  2177. self.dbengine = "postgres"
  2178. self.uri = uri
  2179. if do_connect: self.find_driver(adapter_args,uri)
  2180. self.pool_size = pool_size
  2181. self.folder = folder
  2182. self.db_codec = db_codec
  2183. self.srid = srid
  2184. self.find_or_make_work_folder()
  2185. ruri = uri.split('://',1)[1]
  2186. m = self.REGEX_URI.match(ruri)
  2187. if not m:
  2188. raise SyntaxError, "Invalid URI string in DAL"
  2189. user = credential_decoder(m.group('user'))
  2190. if not user:
  2191. raise SyntaxError, 'User required'
  2192. password = credential_decoder(m.group('password'))
  2193. if not password:
  2194. password = ''
  2195. host = m.group('host')
  2196. if not host:
  2197. raise SyntaxError, 'Host name required'
  2198. db = m.group('db')
  2199. if not db:
  2200. raise SyntaxError, 'Database name required'
  2201. port = m.group('port') or '5432'
  2202. sslmode = m.group('sslmode')
  2203. if sslmode:
  2204. msg = ("dbname='%s' user='%s' host='%s' "
  2205. "port=%s password='%s' sslmode='%s'") \
  2206. % (db, user, host, port, password, sslmode)
  2207. else:
  2208. msg = ("dbname='%s' user='%s' host='%s' "
  2209. "port=%s password='%s'") \
  2210. % (db, user, host, port, password)
  2211. # choose diver according uri
  2212. self.__version__ = "%s %s" % (self.driver.__name__, self.driver.__version__)
  2213. def connector(msg=msg,driver_args=driver_args):
  2214. return self.driver.connect(msg,**driver_args)
  2215. self.connector = connector
  2216. if do_connect: self.reconnect()
  2217. def after_connection(self):
  2218. self.connection.set_client_encoding('UTF8')
  2219. self.execute("SET standard_conforming_strings=on;")
  2220. def lastrowid(self,table):
  2221. self.execute("select currval('%s')" % table._sequence_name)
  2222. return int(self.cursor.fetchone()[0])
  2223. def LIKE(self,first,second):
  2224. return '(%s LIKE %s)' % (self.expand(first),
  2225. self.expand(second,'string'))
  2226. def ILIKE(self,first,second):
  2227. return '(%s ILIKE %s)' % (self.expand(first),
  2228. self.expand(second,'string'))
  2229. def REGEXP(self,first,second):
  2230. return '(%s ~ %s)' % (self.expand(first),
  2231. self.expand(second,'string'))
  2232. def STARTSWITH(self,first,second):
  2233. return '(%s ILIKE %s)' % (self.expand(first),
  2234. self.expand(second+'%','string'))
  2235. def ENDSWITH(self,first,second):
  2236. return '(%s ILIKE %s)' % (self.expand(first),
  2237. self.expand('%'+second,'string'))
  2238. def CONTAINS(self,first,second):
  2239. if first.type in ('string','text'):
  2240. key = '%'+str(second).replace('%','%%')+'%'
  2241. elif first.type.startswith('list:'):
  2242. key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%'
  2243. return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string'))
  2244. # GIS functions
  2245. def ST_ASGEOJSON(self, first, second):
  2246. """
  2247. http://postgis.org/docs/ST_AsGeoJSON.html
  2248. """
  2249. return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'],
  2250. self.expand(first), second['precision'], second['options'])
  2251. def ST_ASTEXT(self, first):
  2252. """
  2253. http://postgis.org/docs/ST_AsText.html
  2254. """
  2255. return 'ST_AsText(%s)' %(self.expand(first))
  2256. # def ST_CONTAINED(self, first, second):
  2257. # """
  2258. # non-standard function based on ST_Contains with parameters reversed
  2259. # http://postgis.org/docs/ST_Contains.html
  2260. # """
  2261. # return 'ST_Contains(%s,%s)' % (self.expand(second, first.type), self.expand(first))
  2262. def ST_CONTAINS(self, first, second):
  2263. """
  2264. http://postgis.org/docs/ST_Contains.html
  2265. """
  2266. return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
  2267. def ST_DISTANCE(self, first, second):
  2268. """
  2269. http://postgis.org/docs/ST_Distance.html
  2270. """
  2271. return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
  2272. def ST_EQUALS(self, first, second):
  2273. """
  2274. http://postgis.org/docs/ST_Equals.html
  2275. """
  2276. return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
  2277. def ST_INTERSECTS(self, first, second):
  2278. """
  2279. http://postgis.org/docs/ST_Intersects.html
  2280. """
  2281. return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
  2282. def ST_OVERLAPS(self, first, second):
  2283. """
  2284. http://postgis.org/docs/ST_Overlaps.html
  2285. """
  2286. return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
  2287. def ST_SIMPLIFY(self, first, second):
  2288. """
  2289. http://postgis.org/docs/ST_Simplify.html
  2290. """
  2291. return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
  2292. def ST_TOUCHES(self, first, second):
  2293. """
  2294. http://postgis.org/docs/ST_Touches.html
  2295. """
  2296. return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
  2297. def ST_WITHIN(self, first, second):
  2298. """
  2299. http://postgis.org/docs/ST_Within.html
  2300. """
  2301. return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
  2302. def represent(self, obj, fieldtype):
  2303. field_is_type = fieldtype.startswith
  2304. if field_is_type('geo'):
  2305. srid = 4326 # postGIS default srid for geometry
  2306. geotype, parms = fieldtype[:-1].split('(')
  2307. parms = parms.split(',')
  2308. if len(parms) >= 2:
  2309. schema, srid = parms[:2]
  2310. if field_is_type('geometry'):
  2311. value = "ST_GeomFromText('%s',%s)" %(obj, srid)
  2312. elif field_is_type('geography'):
  2313. value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
  2314. # else:
  2315. # raise SyntaxError, 'Invalid field type %s' %fieldtype
  2316. return value
  2317. return BaseAdapter.represent(self, obj, fieldtype)
  2318. class NewPostgreSQLAdapter(PostgreSQLAdapter):
  2319. drivers = ('psycopg2','pg8000')
  2320. types = {
  2321. 'boolean': 'CHAR(1)',
  2322. 'string': 'VARCHAR(%(length)s)',
  2323. 'text': 'TEXT',
  2324. 'password': 'VARCHAR(%(length)s)',
  2325. 'blob': 'BYTEA',
  2326. 'upload': 'VARCHAR(%(length)s)',
  2327. 'integer': 'INTEGER',
  2328. 'bigint': 'BIGINT',
  2329. 'float': 'FLOAT',
  2330. 'double': 'FLOAT8',
  2331. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  2332. 'date': 'DATE',
  2333. 'time': 'TIME',
  2334. 'datetime': 'TIMESTAMP',
  2335. 'id': 'SERIAL PRIMARY KEY',
  2336. 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2337. 'list:integer': 'BIGINT[]',
  2338. 'list:string': 'TEXT[]',
  2339. 'list:reference': 'BIGINT[]',
  2340. 'geometry': 'GEOMETRY',
  2341. 'geography': 'GEOGRAPHY',
  2342. 'big-id': 'BIGSERIAL PRIMARY KEY',
  2343. 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2344. }
  2345. def parse_list_integers(self, value, field_type):
  2346. return value
  2347. def parse_list_references(self, value, field_type):
  2348. return [self.parse_reference(r, field_type[5:]) for r in value]
  2349. def parse_list_strings(self, value, field_type):
  2350. return value
  2351. def represent(self, obj, fieldtype):
  2352. field_is_type = fieldtype.startswith
  2353. if field_is_type('list:'):
  2354. if not obj:
  2355. obj = []
  2356. elif not isinstance(obj, (list, tuple)):
  2357. obj = [obj]
  2358. if field_is_type('list:string'):
  2359. obj = map(str,obj)
  2360. else:
  2361. obj = map(int,obj)
  2362. return 'ARRAY[%s]' % ','.join(repr(item) for item in obj)
  2363. return BaseAdapter.represent(self, obj, fieldtype)
  2364. class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
  2365. drivers = ('zxJDBC',)
  2366. REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$')
  2367. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  2368. credential_decoder=IDENTITY, driver_args={},
  2369. adapter_args={}, do_connect=True):
  2370. self.db = db
  2371. self.dbengine = "postgres"
  2372. self.uri = uri
  2373. if do_connect: self.find_driver(adapter_args,uri)
  2374. self.pool_size = pool_size
  2375. self.folder = folder
  2376. self.db_codec = db_codec
  2377. self.find_or_make_work_folder()
  2378. ruri = uri.split('://',1)[1]
  2379. m = self.REGEX_URI.match(ruri)
  2380. if not m:
  2381. raise SyntaxError, "Invalid URI string in DAL"
  2382. user = credential_decoder(m.group('user'))
  2383. if not user:
  2384. raise SyntaxError, 'User required'
  2385. password = credential_decoder(m.group('password'))
  2386. if not password:
  2387. password = ''
  2388. host = m.group('host')
  2389. if not host:
  2390. raise SyntaxError, 'Host name required'
  2391. db = m.group('db')
  2392. if not db:
  2393. raise SyntaxError, 'Database name required'
  2394. port = m.group('port') or '5432'
  2395. msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password)
  2396. def connector(msg=msg,driver_args=driver_args):
  2397. return self.driver.connect(*msg,**driver_args)
  2398. self.connector = connector
  2399. if do_connect: self.reconnect()
  2400. def after_connection(self):
  2401. self.connection.set_client_encoding('UTF8')
  2402. self.execute('BEGIN;')
  2403. self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
  2404. class OracleAdapter(BaseAdapter):
  2405. drivers = ('cx_Oracle',)
  2406. commit_on_alter_table = False
  2407. types = {
  2408. 'boolean': 'CHAR(1)',
  2409. 'string': 'VARCHAR2(%(length)s)',
  2410. 'text': 'CLOB',
  2411. 'password': 'VARCHAR2(%(length)s)',
  2412. 'blob': 'CLOB',
  2413. 'upload': 'VARCHAR2(%(length)s)',
  2414. 'integer': 'INT',
  2415. 'bigint': 'NUMBER',
  2416. 'float': 'FLOAT',
  2417. 'double': 'BINARY_DOUBLE',
  2418. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  2419. 'date': 'DATE',
  2420. 'time': 'CHAR(8)',
  2421. 'datetime': 'DATE',
  2422. 'id': 'NUMBER PRIMARY KEY',
  2423. 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2424. 'list:integer': 'CLOB',
  2425. 'list:string': 'CLOB',
  2426. 'list:reference': 'CLOB',
  2427. 'big-id': 'NUMBER PRIMARY KEY',
  2428. 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2429. 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2430. 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
  2431. }
  2432. def sequence_name(self,tablename):
  2433. return '%s_sequence' % tablename
  2434. def trigger_name(self,tablename):
  2435. return '%s_trigger' % tablename
  2436. def LEFT_JOIN(self):
  2437. return 'LEFT OUTER JOIN'
  2438. def RANDOM(self):
  2439. return 'dbms_random.value'
  2440. def NOT_NULL(self,default,field_type):
  2441. return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
  2442. def _drop(self,table,mode):
  2443. sequence_name = table._sequence_name
  2444. return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
  2445. def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
  2446. if limitby:
  2447. (lmin, lmax) = limitby
  2448. if len(sql_w) > 1:
  2449. sql_w_row = sql_w + ' AND w_row > %i' % lmin
  2450. else:
  2451. sql_w_row = 'WHERE w_row > %i' % lmin
  2452. return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
  2453. return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
  2454. def constraint_name(self, tablename, fieldname):
  2455. constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname)
  2456. if len(constraint_name)>30:
  2457. constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7])
  2458. return constraint_name
  2459. def represent_exceptions(self, obj, fieldtype):
  2460. if fieldtype == 'blob':
  2461. obj = base64.b64encode(str(obj))
  2462. return ":CLOB('%s')" % obj
  2463. elif fieldtype == 'date':
  2464. if isinstance(obj, (datetime.date, datetime.datetime)):
  2465. obj = obj.isoformat()[:10]
  2466. else:
  2467. obj = str(obj)
  2468. return "to_date('%s','yyyy-mm-dd')" % obj
  2469. elif fieldtype == 'datetime':
  2470. if isinstance(obj, datetime.datetime):
  2471. obj = obj.isoformat()[:19].replace('T',' ')
  2472. elif isinstance(obj, datetime.date):
  2473. obj = obj.isoformat()[:10]+' 00:00:00'
  2474. else:
  2475. obj = str(obj)
  2476. return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj
  2477. return None
  2478. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  2479. credential_decoder=IDENTITY, driver_args={},
  2480. adapter_args={}, do_connect=True):
  2481. self.db = db
  2482. self.dbengine = "oracle"
  2483. self.uri = uri
  2484. if do_connect: self.find_driver(adapter_args,uri)
  2485. self.pool_size = pool_size
  2486. self.folder = folder
  2487. self.db_codec = db_codec
  2488. self.find_or_make_work_folder()
  2489. ruri = uri.split('://',1)[1]
  2490. if not 'threaded' in driver_args:
  2491. driver_args['threaded']=True
  2492. def connector(uri=ruri,driver_args=driver_args):
  2493. return self.driver.connect(uri,**driver_args)
  2494. self.connector = connector
  2495. if do_connect: self.reconnect()
  2496. def after_connection(self):
  2497. self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
  2498. self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
  2499. oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))")
  2500. def execute(self, command, args=None):
  2501. args = args or []
  2502. i = 1
  2503. while True:
  2504. m = self.oracle_fix.match(command)
  2505. if not m:
  2506. break
  2507. command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]
  2508. args.append(m.group('clob')[6:-2].replace("''", "'"))
  2509. i += 1
  2510. if command[-1:]==';':
  2511. command = command[:-1]
  2512. return self.log_execute(command, args)
  2513. def create_sequence_and_triggers(self, query, table, **args):
  2514. tablename = table._tablename
  2515. sequence_name = table._sequence_name
  2516. trigger_name = table._trigger_name
  2517. self.execute(query)
  2518. self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name)
  2519. self.execute("""
  2520. CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW
  2521. DECLARE
  2522. curr_val NUMBER;
  2523. diff_val NUMBER;
  2524. PRAGMA autonomous_transaction;
  2525. BEGIN
  2526. IF :NEW.id IS NOT NULL THEN
  2527. EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;
  2528. diff_val := :NEW.id - curr_val - 1;
  2529. IF diff_val != 0 THEN
  2530. EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val;
  2531. EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;
  2532. EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1';
  2533. END IF;
  2534. END IF;
  2535. SELECT %(sequence_name)s.nextval INTO :NEW.id FROM DUAL;
  2536. END;
  2537. """ % dict(trigger_name=trigger_name, tablename=tablename, sequence_name=sequence_name))
  2538. def lastrowid(self,table):
  2539. sequence_name = table._sequence_name
  2540. self.execute('SELECT %s.currval FROM dual;' % sequence_name)
  2541. return int(self.cursor.fetchone()[0])
  2542. #def parse_value(self, value, field_type, blob_decode=True):
  2543. # if blob_decode and isinstance(value, cx_Oracle.LOB):
  2544. # try:
  2545. # value = value.read()
  2546. # except self.driver.ProgrammingError:
  2547. # # After a subsequent fetch the LOB value is not valid anymore
  2548. # pass
  2549. # return BaseAdapter.parse_value(self, value, field_type, blob_decode)
  2550. def _fetchall(self):
  2551. if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description):
  2552. return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \
  2553. for c in r]) for r in self.cursor]
  2554. else:
  2555. return self.cursor.fetchall()
  2556. class MSSQLAdapter(BaseAdapter):
  2557. drivers = ('pyodbc',)
  2558. types = {
  2559. 'boolean': 'BIT',
  2560. 'string': 'VARCHAR(%(length)s)',
  2561. 'text': 'TEXT',
  2562. 'password': 'VARCHAR(%(length)s)',
  2563. 'blob': 'IMAGE',
  2564. 'upload': 'VARCHAR(%(length)s)',
  2565. 'integer': 'INT',
  2566. 'bigint': 'BIGINT',
  2567. 'float': 'FLOAT',
  2568. 'double': 'FLOAT',
  2569. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  2570. 'date': 'DATETIME',
  2571. 'time': 'CHAR(8)',
  2572. 'datetime': 'DATETIME',
  2573. 'id': 'INT IDENTITY PRIMARY KEY',
  2574. 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2575. 'list:integer': 'TEXT',
  2576. 'list:string': 'TEXT',
  2577. 'list:reference': 'TEXT',
  2578. 'geometry': 'geometry',
  2579. 'geography': 'geography',
  2580. 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
  2581. 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2582. 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2583. 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
  2584. }
  2585. def varquote(self,name):
  2586. return varquote_aux(name,'[%s]')
  2587. def EXTRACT(self,field,what):
  2588. return "DATEPART(%s,%s)" % (what, self.expand(field))
  2589. def LEFT_JOIN(self):
  2590. return 'LEFT OUTER JOIN'
  2591. def RANDOM(self):
  2592. return 'NEWID()'
  2593. def ALLOW_NULL(self):
  2594. return ' NULL'
  2595. def SUBSTRING(self,field,parameters):
  2596. return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
  2597. def PRIMARY_KEY(self,key):
  2598. return 'PRIMARY KEY CLUSTERED (%s)' % key
  2599. def AGGREGATE(self, first, what):
  2600. if what == 'LENGTH':
  2601. what = 'LEN'
  2602. return "%s(%s)" % (what, self.expand(first))
  2603. def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
  2604. if limitby:
  2605. (lmin, lmax) = limitby
  2606. sql_s += ' TOP %i' % lmax
  2607. return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
  2608. TRUE = 1
  2609. FALSE = 0
  2610. REGEX_DSN = re.compile('^(?P<dsn>.+)$')
  2611. REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$')
  2612. REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)')
  2613. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  2614. credential_decoder=IDENTITY, driver_args={},
  2615. adapter_args={}, do_connect=True, srid=4326):
  2616. self.db = db
  2617. self.dbengine = "mssql"
  2618. self.uri = uri
  2619. if do_connect: self.find_driver(adapter_args,uri)
  2620. self.pool_size = pool_size
  2621. self.folder = folder
  2622. self.db_codec = db_codec
  2623. self.srid = srid
  2624. self.find_or_make_work_folder()
  2625. # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
  2626. ruri = uri.split('://',1)[1]
  2627. if '@' not in ruri:
  2628. try:
  2629. m = self.REGEX_DSN.match(ruri)
  2630. if not m:
  2631. raise SyntaxError, \
  2632. 'Parsing uri string(%s) has no result' % self.uri
  2633. dsn = m.group('dsn')
  2634. if not dsn:
  2635. raise SyntaxError, 'DSN required'
  2636. except SyntaxError, e:
  2637. LOGGER.error('NdGpatch error')
  2638. raise e
  2639. # was cnxn = 'DSN=%s' % dsn
  2640. cnxn = dsn
  2641. else:
  2642. m = self.REGEX_URI.match(ruri)
  2643. if not m:
  2644. raise SyntaxError, \
  2645. "Invalid URI string in DAL: %s" % self.uri
  2646. user = credential_decoder(m.group('user'))
  2647. if not user:
  2648. raise SyntaxError, 'User required'
  2649. password = credential_decoder(m.group('password'))
  2650. if not password:
  2651. password = ''
  2652. host = m.group('host')
  2653. if not host:
  2654. raise SyntaxError, 'Host name required'
  2655. db = m.group('db')
  2656. if not db:
  2657. raise SyntaxError, 'Database name required'
  2658. port = m.group('port') or '1433'
  2659. # Parse the optional url name-value arg pairs after the '?'
  2660. # (in the form of arg1=value1&arg2=value2&...)
  2661. # Default values (drivers like FreeTDS insist on uppercase parameter keys)
  2662. argsdict = { 'DRIVER':'{SQL Server}' }
  2663. urlargs = m.group('urlargs') or ''
  2664. for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs):
  2665. argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue')
  2666. urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()])
  2667. cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
  2668. % (host, port, db, user, password, urlargs)
  2669. def connector(cnxn=cnxn,driver_args=driver_args):
  2670. return self.driver.connect(cnxn,**driver_args)
  2671. self.connector = connector
  2672. if do_connect: self.reconnect()
  2673. def lastrowid(self,table):
  2674. #self.execute('SELECT @@IDENTITY;')
  2675. self.execute('SELECT SCOPE_IDENTITY();')
  2676. return int(self.cursor.fetchone()[0])
  2677. def integrity_error_class(self):
  2678. return pyodbc.IntegrityError
  2679. def rowslice(self,rows,minimum=0,maximum=None):
  2680. if maximum is None:
  2681. return rows[minimum:]
  2682. return rows[minimum:maximum]
  2683. def EPOCH(self, first):
  2684. return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
  2685. # GIS functions
  2686. # No STAsGeoJSON in MSSQL
  2687. def ST_ASTEXT(self, first):
  2688. return '%s.STAsText()' %(self.expand(first))
  2689. def ST_CONTAINS(self, first, second):
  2690. return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
  2691. def ST_DISTANCE(self, first, second):
  2692. return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
  2693. def ST_EQUALS(self, first, second):
  2694. return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
  2695. def ST_INTERSECTS(self, first, second):
  2696. return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
  2697. def ST_OVERLAPS(self, first, second):
  2698. return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
  2699. # no STSimplify in MSSQL
  2700. def ST_TOUCHES(self, first, second):
  2701. return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
  2702. def ST_WITHIN(self, first, second):
  2703. return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
  2704. def represent(self, obj, fieldtype):
  2705. field_is_type = fieldtype.startswith
  2706. if field_is_type('geometry'):
  2707. srid = 0 # MS SQL default srid for geometry
  2708. geotype, parms = fieldtype[:-1].split('(')
  2709. if parms:
  2710. srid = parms
  2711. return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
  2712. elif fieldtype == 'geography':
  2713. srid = 4326 # MS SQL default srid for geography
  2714. geotype, parms = fieldtype[:-1].split('(')
  2715. if parms:
  2716. srid = parms
  2717. return "geography::STGeomFromText('%s',%s)" %(obj, srid)
  2718. # else:
  2719. # raise SyntaxError, 'Invalid field type %s' %fieldtype
  2720. return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
  2721. return BaseAdapter.represent(self, obj, fieldtype)
  2722. class MSSQL2Adapter(MSSQLAdapter):
  2723. drivers = ('pyodbc',)
  2724. types = {
  2725. 'boolean': 'CHAR(1)',
  2726. 'string': 'NVARCHAR(%(length)s)',
  2727. 'text': 'NTEXT',
  2728. 'password': 'NVARCHAR(%(length)s)',
  2729. 'blob': 'IMAGE',
  2730. 'upload': 'NVARCHAR(%(length)s)',
  2731. 'integer': 'INT',
  2732. 'bigint': 'BIGINT',
  2733. 'float': 'FLOAT',
  2734. 'double': 'FLOAT',
  2735. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  2736. 'date': 'DATETIME',
  2737. 'time': 'CHAR(8)',
  2738. 'datetime': 'DATETIME',
  2739. 'id': 'INT IDENTITY PRIMARY KEY',
  2740. 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2741. 'list:integer': 'NTEXT',
  2742. 'list:string': 'NTEXT',
  2743. 'list:reference': 'NTEXT',
  2744. 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
  2745. 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2746. 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2747. 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
  2748. }
  2749. def represent(self, obj, fieldtype):
  2750. value = BaseAdapter.represent(self, obj, fieldtype)
  2751. if fieldtype in ('string','text') and value[:1]=="'":
  2752. value = 'N'+value
  2753. return value
  2754. def execute(self,a):
  2755. return self.log_execute(a.decode('utf8'))
  2756. class SybaseAdapter(MSSQLAdapter):
  2757. drivers = ('Sybase',)
  2758. types = {
  2759. 'boolean': 'BIT',
  2760. 'string': 'CHAR VARYING(%(length)s)',
  2761. 'text': 'TEXT',
  2762. 'password': 'CHAR VARYING(%(length)s)',
  2763. 'blob': 'IMAGE',
  2764. 'upload': 'CHAR VARYING(%(length)s)',
  2765. 'integer': 'INT',
  2766. 'bigint': 'BIGINT',
  2767. 'float': 'FLOAT',
  2768. 'double': 'FLOAT',
  2769. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  2770. 'date': 'DATETIME',
  2771. 'time': 'CHAR(8)',
  2772. 'datetime': 'DATETIME',
  2773. 'id': 'INT IDENTITY PRIMARY KEY',
  2774. 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2775. 'list:integer': 'TEXT',
  2776. 'list:string': 'TEXT',
  2777. 'list:reference': 'TEXT',
  2778. 'geometry': 'geometry',
  2779. 'geography': 'geography',
  2780. 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
  2781. 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2782. 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2783. 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
  2784. }
  2785. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  2786. credential_decoder=IDENTITY, driver_args={},
  2787. adapter_args={}, do_connect=True, srid=4326):
  2788. self.db = db
  2789. self.dbengine = "sybase"
  2790. self.uri = uri
  2791. if do_connect: self.find_driver(adapter_args,uri)
  2792. self.pool_size = pool_size
  2793. self.folder = folder
  2794. self.db_codec = db_codec
  2795. self.srid = srid
  2796. self.find_or_make_work_folder()
  2797. # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
  2798. ruri = uri.split('://',1)[1]
  2799. if '@' not in ruri:
  2800. try:
  2801. m = self.REGEX_DSN.match(ruri)
  2802. if not m:
  2803. raise SyntaxError, \
  2804. 'Parsing uri string(%s) has no result' % self.uri
  2805. dsn = m.group('dsn')
  2806. if not dsn:
  2807. raise SyntaxError, 'DSN required'
  2808. except SyntaxError, e:
  2809. LOGGER.error('NdGpatch error')
  2810. raise e
  2811. else:
  2812. m = self.REGEX_URI.match(uri)
  2813. if not m:
  2814. raise SyntaxError, \
  2815. "Invalid URI string in DAL: %s" % self.uri
  2816. user = credential_decoder(m.group('user'))
  2817. if not user:
  2818. raise SyntaxError, 'User required'
  2819. password = credential_decoder(m.group('password'))
  2820. if not password:
  2821. password = ''
  2822. host = m.group('host')
  2823. if not host:
  2824. raise SyntaxError, 'Host name required'
  2825. db = m.group('db')
  2826. if not db:
  2827. raise SyntaxError, 'Database name required'
  2828. port = m.group('port') or '1433'
  2829. dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db)
  2830. driver_args.update(user = credential_decoder(user),
  2831. password = credential_decoder(password))
  2832. def connector(dsn=dsn,driver_args=driver_args):
  2833. return self.driver.connect(dsn,**driver_args)
  2834. self.connector = connector
  2835. if do_connect: self.reconnect()
  2836. def integrity_error_class(self):
  2837. return RuntimeError # FIX THIS
  2838. class FireBirdAdapter(BaseAdapter):
  2839. drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc')
  2840. commit_on_alter_table = False
  2841. support_distributed_transaction = True
  2842. types = {
  2843. 'boolean': 'CHAR(1)',
  2844. 'string': 'VARCHAR(%(length)s)',
  2845. 'text': 'BLOB SUB_TYPE 1',
  2846. 'password': 'VARCHAR(%(length)s)',
  2847. 'blob': 'BLOB SUB_TYPE 0',
  2848. 'upload': 'VARCHAR(%(length)s)',
  2849. 'integer': 'INTEGER',
  2850. 'bigint': 'BIGINT',
  2851. 'float': 'FLOAT',
  2852. 'double': 'DOUBLE PRECISION',
  2853. 'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
  2854. 'date': 'DATE',
  2855. 'time': 'TIME',
  2856. 'datetime': 'TIMESTAMP',
  2857. 'id': 'INTEGER PRIMARY KEY',
  2858. 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2859. 'list:integer': 'BLOB SUB_TYPE 1',
  2860. 'list:string': 'BLOB SUB_TYPE 1',
  2861. 'list:reference': 'BLOB SUB_TYPE 1',
  2862. 'big-id': 'BIGINT PRIMARY KEY',
  2863. 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  2864. }
  2865. def sequence_name(self,tablename):
  2866. return 'genid_%s' % tablename
  2867. def trigger_name(self,tablename):
  2868. return 'trg_id_%s' % tablename
  2869. def RANDOM(self):
  2870. return 'RAND()'
  2871. def EPOCH(self, first):
  2872. return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
  2873. def NOT_NULL(self,default,field_type):
  2874. return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
  2875. def SUBSTRING(self,field,parameters):
  2876. return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
  2877. def CONTAINS(self, first, second):
  2878. if first.type in ('string','text'):
  2879. key = str(second).replace('%','%%')
  2880. elif first.type.startswith('list:'):
  2881. key = '|'+str(second).replace('|','||').replace('%','%%')+'|'
  2882. return '(%s CONTAINING %s)' % (self.expand(first),
  2883. self.expand(key,'string'))
  2884. def _drop(self,table,mode):
  2885. sequence_name = table._sequence_name
  2886. return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
  2887. def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
  2888. if limitby:
  2889. (lmin, lmax) = limitby
  2890. sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s)
  2891. return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
  2892. def _truncate(self,table,mode = ''):
  2893. return ['DELETE FROM %s;' % table._tablename,
  2894. 'SET GENERATOR %s TO 0;' % table._sequence_name]
  2895. REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$')
  2896. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  2897. credential_decoder=IDENTITY, driver_args={},
  2898. adapter_args={}, do_connect=True):
  2899. self.db = db
  2900. self.dbengine = "firebird"
  2901. self.uri = uri
  2902. if do_connect: self.find_driver(adapter_args,uri)
  2903. self.pool_size = pool_size
  2904. self.folder = folder
  2905. self.db_codec = db_codec
  2906. self.find_or_make_work_folder()
  2907. ruri = uri.split('://',1)[1]
  2908. m = self.REGEX_URI.match(ruri)
  2909. if not m:
  2910. raise SyntaxError, "Invalid URI string in DAL: %s" % self.uri
  2911. user = credential_decoder(m.group('user'))
  2912. if not user:
  2913. raise SyntaxError, 'User required'
  2914. password = credential_decoder(m.group('password'))
  2915. if not password:
  2916. password = ''
  2917. host = m.group('host')
  2918. if not host:
  2919. raise SyntaxError, 'Host name required'
  2920. port = int(m.group('port') or 3050)
  2921. db = m.group('db')
  2922. if not db:
  2923. raise SyntaxError, 'Database name required'
  2924. charset = m.group('charset') or 'UTF8'
  2925. driver_args.update(dsn='%s/%s:%s' % (host,port,db),
  2926. user = credential_decoder(user),
  2927. password = credential_decoder(password),
  2928. charset = charset)
  2929. def connector(driver_args=driver_args):
  2930. return self.driver.connect(**driver_args)
  2931. self.connector = connector
  2932. if do_connect: self.reconnect()
  2933. def create_sequence_and_triggers(self, query, table, **args):
  2934. tablename = table._tablename
  2935. sequence_name = table._sequence_name
  2936. trigger_name = table._trigger_name
  2937. self.execute(query)
  2938. self.execute('create generator %s;' % sequence_name)
  2939. self.execute('set generator %s to 0;' % sequence_name)
  2940. self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
  2941. def lastrowid(self,table):
  2942. sequence_name = table._sequence_name
  2943. self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name)
  2944. return int(self.cursor.fetchone()[0])
  2945. class FireBirdEmbeddedAdapter(FireBirdAdapter):
  2946. drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc')
  2947. REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$')
  2948. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  2949. credential_decoder=IDENTITY, driver_args={},
  2950. adapter_args={}, do_connect=True):
  2951. self.db = db
  2952. self.dbengine = "firebird"
  2953. self.uri = uri
  2954. if do_connect: self.find_driver(adapter_args,uri)
  2955. self.pool_size = pool_size
  2956. self.folder = folder
  2957. self.db_codec = db_codec
  2958. self.find_or_make_work_folder()
  2959. ruri = uri.split('://',1)[1]
  2960. m = self.REGEX_URI.match(ruri)
  2961. if not m:
  2962. raise SyntaxError, \
  2963. "Invalid URI string in DAL: %s" % self.uri
  2964. user = credential_decoder(m.group('user'))
  2965. if not user:
  2966. raise SyntaxError, 'User required'
  2967. password = credential_decoder(m.group('password'))
  2968. if not password:
  2969. password = ''
  2970. pathdb = m.group('path')
  2971. if not pathdb:
  2972. raise SyntaxError, 'Path required'
  2973. charset = m.group('charset')
  2974. if not charset:
  2975. charset = 'UTF8'
  2976. host = ''
  2977. driver_args.update(host=host,
  2978. database=pathdb,
  2979. user=credential_decoder(user),
  2980. password=credential_decoder(password),
  2981. charset=charset)
  2982. def connector(driver_args=driver_args):
  2983. return self.driver.connect(**driver_args)
  2984. self.connector = connector
  2985. if do_connect: self.reconnect()
  2986. class InformixAdapter(BaseAdapter):
  2987. drivers = ('informixdb',)
  2988. types = {
  2989. 'boolean': 'CHAR(1)',
  2990. 'string': 'VARCHAR(%(length)s)',
  2991. 'text': 'BLOB SUB_TYPE 1',
  2992. 'password': 'VARCHAR(%(length)s)',
  2993. 'blob': 'BLOB SUB_TYPE 0',
  2994. 'upload': 'VARCHAR(%(length)s)',
  2995. 'integer': 'INTEGER',
  2996. 'bigint': 'BIGINT',
  2997. 'float': 'FLOAT',
  2998. 'double': 'DOUBLE PRECISION',
  2999. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  3000. 'date': 'DATE',
  3001. 'time': 'CHAR(8)',
  3002. 'datetime': 'DATETIME',
  3003. 'id': 'SERIAL',
  3004. 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3005. 'list:integer': 'BLOB SUB_TYPE 1',
  3006. 'list:string': 'BLOB SUB_TYPE 1',
  3007. 'list:reference': 'BLOB SUB_TYPE 1',
  3008. 'big-id': 'BIGSERIAL',
  3009. 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3010. 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s',
  3011. 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s',
  3012. }
  3013. def RANDOM(self):
  3014. return 'Random()'
  3015. def NOT_NULL(self,default,field_type):
  3016. return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
  3017. def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
  3018. if limitby:
  3019. (lmin, lmax) = limitby
  3020. fetch_amt = lmax - lmin
  3021. dbms_version = int(self.connection.dbms_version.split('.')[0])
  3022. if lmin and (dbms_version >= 10):
  3023. # Requires Informix 10.0+
  3024. sql_s += ' SKIP %d' % (lmin, )
  3025. if fetch_amt and (dbms_version >= 9):
  3026. # Requires Informix 9.0+
  3027. sql_s += ' FIRST %d' % (fetch_amt, )
  3028. return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
  3029. def represent_exceptions(self, obj, fieldtype):
  3030. if fieldtype == 'date':
  3031. if isinstance(obj, (datetime.date, datetime.datetime)):
  3032. obj = obj.isoformat()[:10]
  3033. else:
  3034. obj = str(obj)
  3035. return "to_date('%s','%%Y-%%m-%%d')" % obj
  3036. elif fieldtype == 'datetime':
  3037. if isinstance(obj, datetime.datetime):
  3038. obj = obj.isoformat()[:19].replace('T',' ')
  3039. elif isinstance(obj, datetime.date):
  3040. obj = obj.isoformat()[:10]+' 00:00:00'
  3041. else:
  3042. obj = str(obj)
  3043. return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj
  3044. return None
  3045. REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$')
  3046. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  3047. credential_decoder=IDENTITY, driver_args={},
  3048. adapter_args={}, do_connect=True):
  3049. self.db = db
  3050. self.dbengine = "informix"
  3051. self.uri = uri
  3052. if do_connect: self.find_driver(adapter_args,uri)
  3053. self.pool_size = pool_size
  3054. self.folder = folder
  3055. self.db_codec = db_codec
  3056. self.find_or_make_work_folder()
  3057. ruri = uri.split('://',1)[1]
  3058. m = self.REGEX_URI.match(ruri)
  3059. if not m:
  3060. raise SyntaxError, \
  3061. "Invalid URI string in DAL: %s" % self.uri
  3062. user = credential_decoder(m.group('user'))
  3063. if not user:
  3064. raise SyntaxError, 'User required'
  3065. password = credential_decoder(m.group('password'))
  3066. if not password:
  3067. password = ''
  3068. host = m.group('host')
  3069. if not host:
  3070. raise SyntaxError, 'Host name required'
  3071. db = m.group('db')
  3072. if not db:
  3073. raise SyntaxError, 'Database name required'
  3074. user = credential_decoder(user)
  3075. password = credential_decoder(password)
  3076. dsn = '%s@%s' % (db,host)
  3077. driver_args.update(user=user,password=password,autocommit=True)
  3078. def connector(dsn=dsn,driver_args=driver_args):
  3079. return self.driver.connect(dsn,**driver_args)
  3080. self.connector = connector
  3081. if do_connect: self.reconnect()
  3082. def execute(self,command):
  3083. if command[-1:]==';':
  3084. command = command[:-1]
  3085. return self.log_execute(command)
  3086. def lastrowid(self,table):
  3087. return self.cursor.sqlerrd[1]
  3088. def integrity_error_class(self):
  3089. return informixdb.IntegrityError
  3090. class DB2Adapter(BaseAdapter):
  3091. drivers = ('pyodbc',)
  3092. types = {
  3093. 'boolean': 'CHAR(1)',
  3094. 'string': 'VARCHAR(%(length)s)',
  3095. 'text': 'CLOB',
  3096. 'password': 'VARCHAR(%(length)s)',
  3097. 'blob': 'BLOB',
  3098. 'upload': 'VARCHAR(%(length)s)',
  3099. 'integer': 'INT',
  3100. 'bigint': 'BIGINT',
  3101. 'float': 'REAL',
  3102. 'double': 'DOUBLE',
  3103. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  3104. 'date': 'DATE',
  3105. 'time': 'TIME',
  3106. 'datetime': 'TIMESTAMP',
  3107. 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
  3108. 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3109. 'list:integer': 'CLOB',
  3110. 'list:string': 'CLOB',
  3111. 'list:reference': 'CLOB',
  3112. 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
  3113. 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3114. 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3115. 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
  3116. }
  3117. def LEFT_JOIN(self):
  3118. return 'LEFT OUTER JOIN'
  3119. def RANDOM(self):
  3120. return 'RAND()'
  3121. def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
  3122. if limitby:
  3123. (lmin, lmax) = limitby
  3124. sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax
  3125. return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
  3126. def represent_exceptions(self, obj, fieldtype):
  3127. if fieldtype == 'blob':
  3128. obj = base64.b64encode(str(obj))
  3129. return "BLOB('%s')" % obj
  3130. elif fieldtype == 'datetime':
  3131. if isinstance(obj, datetime.datetime):
  3132. obj = obj.isoformat()[:19].replace('T','-').replace(':','.')
  3133. elif isinstance(obj, datetime.date):
  3134. obj = obj.isoformat()[:10]+'-00.00.00'
  3135. return "'%s'" % obj
  3136. return None
  3137. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  3138. credential_decoder=IDENTITY, driver_args={},
  3139. adapter_args={}, do_connect=True):
  3140. self.db = db
  3141. self.dbengine = "db2"
  3142. self.uri = uri
  3143. if do_connect: self.find_driver(adapter_args,uri)
  3144. self.pool_size = pool_size
  3145. self.folder = folder
  3146. self.db_codec = db_codec
  3147. self.find_or_make_work_folder()
  3148. ruri = uri.split('://', 1)[1]
  3149. def connector(cnxn=ruri,driver_args=driver_args):
  3150. return self.driver.connect(cnxn,**driver_args)
  3151. self.connector = connector
  3152. if do_connect: self.reconnect()
  3153. def execute(self,command):
  3154. if command[-1:]==';':
  3155. command = command[:-1]
  3156. return self.log_execute(command)
  3157. def lastrowid(self,table):
  3158. self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table)
  3159. return int(self.cursor.fetchone()[0])
  3160. def rowslice(self,rows,minimum=0,maximum=None):
  3161. if maximum is None:
  3162. return rows[minimum:]
  3163. return rows[minimum:maximum]
  3164. class TeradataAdapter(BaseAdapter):
  3165. drivers = ('pyodbc',)
  3166. types = {
  3167. 'boolean': 'CHAR(1)',
  3168. 'string': 'VARCHAR(%(length)s)',
  3169. 'text': 'CLOB',
  3170. 'password': 'VARCHAR(%(length)s)',
  3171. 'blob': 'BLOB',
  3172. 'upload': 'VARCHAR(%(length)s)',
  3173. 'integer': 'INT',
  3174. 'bigint': 'BIGINT',
  3175. 'float': 'REAL',
  3176. 'double': 'DOUBLE',
  3177. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  3178. 'date': 'DATE',
  3179. 'time': 'TIME',
  3180. 'datetime': 'TIMESTAMP',
  3181. # Modified Constraint syntax for Teradata.
  3182. # Teradata does not support ON DELETE.
  3183. 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
  3184. 'reference': 'INT',
  3185. 'list:integer': 'CLOB',
  3186. 'list:string': 'CLOB',
  3187. 'list:reference': 'CLOB',
  3188. 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
  3189. 'big-reference': 'BIGINT',
  3190. 'reference FK': ' REFERENCES %(foreign_key)s',
  3191. 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)',
  3192. }
  3193. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  3194. credential_decoder=IDENTITY, driver_args={},
  3195. adapter_args={}, do_connect=True):
  3196. self.db = db
  3197. self.dbengine = "teradata"
  3198. self.uri = uri
  3199. if do_connect: self.find_driver(adapter_args,uri)
  3200. self.pool_size = pool_size
  3201. self.folder = folder
  3202. self.db_codec = db_codec
  3203. self.find_or_make_work_folder()
  3204. ruri = uri.split('://', 1)[1]
  3205. def connector(cnxn=ruri,driver_args=driver_args):
  3206. return self.driver.connect(cnxn,**driver_args)
  3207. self.connector = connector
  3208. if do_connect: self.reconnect()
  3209. def LEFT_JOIN(self):
  3210. return 'LEFT OUTER JOIN'
  3211. # Similar to MSSQL, Teradata can't specify a range (for Pageby)
  3212. def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
  3213. if limitby:
  3214. (lmin, lmax) = limitby
  3215. sql_s += ' TOP %i' % lmax
  3216. return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
  3217. def _truncate(self, table, mode=''):
  3218. tablename = table._tablename
  3219. return ['DELETE FROM %s ALL;' % (tablename)]
  3220. INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
  3221. # (ANSI-SQL wants this form of name
  3222. # to be a delimited identifier)
  3223. class IngresAdapter(BaseAdapter):
  3224. drivers = ('ingresdbi',)
  3225. types = {
  3226. 'boolean': 'CHAR(1)',
  3227. 'string': 'VARCHAR(%(length)s)',
  3228. 'text': 'CLOB',
  3229. 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
  3230. 'blob': 'BLOB',
  3231. 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
  3232. 'integer': 'INTEGER4', # or int8...
  3233. 'bigint': 'BIGINT',
  3234. 'float': 'FLOAT',
  3235. 'double': 'FLOAT8',
  3236. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  3237. 'date': 'ANSIDATE',
  3238. 'time': 'TIME WITHOUT TIME ZONE',
  3239. 'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
  3240. 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME,
  3241. 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3242. 'list:integer': 'CLOB',
  3243. 'list:string': 'CLOB',
  3244. 'list:reference': 'CLOB',
  3245. 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME,
  3246. 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3247. 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3248. 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
  3249. }
  3250. def LEFT_JOIN(self):
  3251. return 'LEFT OUTER JOIN'
  3252. def RANDOM(self):
  3253. return 'RANDOM()'
  3254. def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
  3255. if limitby:
  3256. (lmin, lmax) = limitby
  3257. fetch_amt = lmax - lmin
  3258. if fetch_amt:
  3259. sql_s += ' FIRST %d ' % (fetch_amt, )
  3260. if lmin:
  3261. # Requires Ingres 9.2+
  3262. sql_o += ' OFFSET %d' % (lmin, )
  3263. return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
  3264. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  3265. credential_decoder=IDENTITY, driver_args={},
  3266. adapter_args={}, do_connect=True):
  3267. self.db = db
  3268. self.dbengine = "ingres"
  3269. self.uri = uri
  3270. if do_connect: self.find_driver(adapter_args,uri)
  3271. self.pool_size = pool_size
  3272. self.folder = folder
  3273. self.db_codec = db_codec
  3274. self.find_or_make_work_folder()
  3275. connstr = self._uri.split(':', 1)[1]
  3276. # Simple URI processing
  3277. connstr = connstr.lstrip()
  3278. while connstr.startswith('/'):
  3279. connstr = connstr[1:]
  3280. database_name=connstr # Assume only (local) dbname is passed in
  3281. vnode = '(local)'
  3282. servertype = 'ingres'
  3283. trace = (0, None) # No tracing
  3284. driver_args.update(database=database_name,
  3285. vnode=vnode,
  3286. servertype=servertype,
  3287. trace=trace)
  3288. def connector(driver_args=driver_args):
  3289. return self.driver.connect(**driver_args)
  3290. self.connector = connector
  3291. if do_connect: self.reconnect()
  3292. def create_sequence_and_triggers(self, query, table, **args):
  3293. # post create table auto inc code (if needed)
  3294. # modify table to btree for performance....
  3295. # Older Ingres releases could use rule/trigger like Oracle above.
  3296. if hasattr(table,'_primarykey'):
  3297. modify_tbl_sql = 'modify %s to btree unique on %s' % \
  3298. (table._tablename,
  3299. ', '.join(["'%s'" % x for x in table.primarykey]))
  3300. self.execute(modify_tbl_sql)
  3301. else:
  3302. tmp_seqname='%s_iisq' % table._tablename
  3303. query=query.replace(INGRES_SEQNAME, tmp_seqname)
  3304. self.execute('create sequence %s' % tmp_seqname)
  3305. self.execute(query)
  3306. self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
  3307. def lastrowid(self,table):
  3308. tmp_seqname='%s_iisq' % table
  3309. self.execute('select current value for %s' % tmp_seqname)
  3310. return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
  3311. def integrity_error_class(self):
  3312. return ingresdbi.IntegrityError
  3313. class IngresUnicodeAdapter(IngresAdapter):
  3314. drivers = ('ingresdbi',)
  3315. types = {
  3316. 'boolean': 'CHAR(1)',
  3317. 'string': 'NVARCHAR(%(length)s)',
  3318. 'text': 'NCLOB',
  3319. 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
  3320. 'blob': 'BLOB',
  3321. 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
  3322. 'integer': 'INTEGER4', # or int8...
  3323. 'bigint': 'BIGINT',
  3324. 'float': 'FLOAT',
  3325. 'double': 'FLOAT8',
  3326. 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
  3327. 'date': 'ANSIDATE',
  3328. 'time': 'TIME WITHOUT TIME ZONE',
  3329. 'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
  3330. 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME,
  3331. 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3332. 'list:integer': 'NCLOB',
  3333. 'list:string': 'NCLOB',
  3334. 'list:reference': 'NCLOB',
  3335. 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME,
  3336. 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3337. 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3338. 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
  3339. }
  3340. class SAPDBAdapter(BaseAdapter):
  3341. drivers = ('sapdb',)
  3342. support_distributed_transaction = False
  3343. types = {
  3344. 'boolean': 'CHAR(1)',
  3345. 'string': 'VARCHAR(%(length)s)',
  3346. 'text': 'LONG',
  3347. 'password': 'VARCHAR(%(length)s)',
  3348. 'blob': 'LONG',
  3349. 'upload': 'VARCHAR(%(length)s)',
  3350. 'integer': 'INT',
  3351. 'bigint': 'BIGINT',
  3352. 'float': 'FLOAT',
  3353. 'double': 'DOUBLE PRECISION',
  3354. 'decimal': 'FIXED(%(precision)s,%(scale)s)',
  3355. 'date': 'DATE',
  3356. 'time': 'TIME',
  3357. 'datetime': 'TIMESTAMP',
  3358. 'id': 'INT PRIMARY KEY',
  3359. 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3360. 'list:integer': 'LONG',
  3361. 'list:string': 'LONG',
  3362. 'list:reference': 'LONG',
  3363. 'big-id': 'BIGINT PRIMARY KEY',
  3364. 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
  3365. }
  3366. def sequence_name(self,table):
  3367. return '%s_id_Seq' % table
  3368. def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
  3369. if limitby:
  3370. (lmin, lmax) = limitby
  3371. if len(sql_w) > 1:
  3372. sql_w_row = sql_w + ' AND w_row > %i' % lmin
  3373. else:
  3374. sql_w_row = 'WHERE w_row > %i' % lmin
  3375. return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
  3376. return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
  3377. def create_sequence_and_triggers(self, query, table, **args):
  3378. # following lines should only be executed if table._sequence_name does not exist
  3379. self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
  3380. self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
  3381. % (table._tablename, table._id.name, table._sequence_name))
  3382. self.execute(query)
  3383. REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$')
  3384. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  3385. credential_decoder=IDENTITY, driver_args={},
  3386. adapter_args={}, do_connect=True):
  3387. self.db = db
  3388. self.dbengine = "sapdb"
  3389. self.uri = uri
  3390. if do_connect: self.find_driver(adapter_args,uri)
  3391. self.pool_size = pool_size
  3392. self.folder = folder
  3393. self.db_codec = db_codec
  3394. self.find_or_make_work_folder()
  3395. ruri = uri.split('://',1)[1]
  3396. m = self.REGEX_URI.match(ruri)
  3397. if not m:
  3398. raise SyntaxError, "Invalid URI string in DAL"
  3399. user = credential_decoder(m.group('user'))
  3400. if not user:
  3401. raise SyntaxError, 'User required'
  3402. password = credential_decoder(m.group('password'))
  3403. if not password:
  3404. password = ''
  3405. host = m.group('host')
  3406. if not host:
  3407. raise SyntaxError, 'Host name required'
  3408. db = m.group('db')
  3409. if not db:
  3410. raise SyntaxError, 'Database name required'
  3411. def connector(user=user, password=password, database=db,
  3412. host=host, driver_args=driver_args):
  3413. return self.driver.Connection(user, password, database,
  3414. host, **driver_args)
  3415. self.connector = connector
  3416. if do_connect: self.reconnect()
  3417. def lastrowid(self,table):
  3418. self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
  3419. return int(self.cursor.fetchone()[0])
  3420. class CubridAdapter(MySQLAdapter):
  3421. drivers = ('cubriddb',)
  3422. REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$')
  3423. def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
  3424. credential_decoder=IDENTITY, driver_args={},
  3425. adapter_args={}, do_connect=True):
  3426. self.db = db
  3427. self.dbengine = "cubrid"
  3428. self.uri = uri
  3429. if do_connect: self.find_driver(adapter_args,uri)
  3430. self.pool_size = pool_size
  3431. self.folder = folder
  3432. self.db_codec = db_codec
  3433. self.find_or_make_work_folder()
  3434. ruri = uri.split('://',1)[1]
  3435. m = self.REGEX_URI.match(ruri)
  3436. if not m:
  3437. raise SyntaxError, \
  3438. "Invalid URI string in DAL: %s" % self.uri
  3439. user = credential_decoder(m.group('user'))
  3440. if not user:
  3441. raise SyntaxError, 'User required'
  3442. password = credential_decoder(m.group('password'))
  3443. if not password:
  3444. password = ''
  3445. host = m.group('host')
  3446. if not host:
  3447. raise SyntaxError, 'Host name required'
  3448. db = m.group('db')
  3449. if not db:
  3450. raise SyntaxError, 'Database name required'
  3451. port = int(m.group('port') or '30000')
  3452. charset = m.group('charset') or 'utf8'
  3453. user = credential_decoder(user)
  3454. passwd = credential_decoder(password)
  3455. def connector(host=host,port=port,db=db,
  3456. user=user,passwd=password,driver_args=driver_args):
  3457. return self.driver.connect(host,port,db,user,passwd,**driver_args)
  3458. self.connector = connector
  3459. if do_connect: self.reconnect()
  3460. def after_connection(self):
  3461. self.execute('SET FOREIGN_KEY_CHECKS=1;')
  3462. self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
  3463. ######## GAE MySQL ##########
  3464. class DatabaseStoredFile:
  3465. web2py_filesystem = False
  3466. def escape(self,obj):
  3467. return self.db._adapter.escape(obj)
  3468. def __init__(self,db,filename,mode):
  3469. if db._adapter.dbengine != 'mysql':
  3470. raise RuntimeError, "only MySQL can store metadata .table files in database for now"
  3471. self.db = db
  3472. self.filename = filename
  3473. self.mode = mode
  3474. if not self.web2py_filesystem:
  3475. self.db.executesql("CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(512), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;")
  3476. DatabaseStoredFile.web2py_filesystem = True
  3477. self.p=0
  3478. self.data = ''
  3479. if mode in ('r','rw','a'):
  3480. query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \
  3481. % filename
  3482. rows = self.db.executesql(query)
  3483. if rows:
  3484. self.data = rows[0][0]
  3485. elif exists(filename):
  3486. datafile = open(filename, 'r')
  3487. try:
  3488. self.data = datafile.read()
  3489. finally:
  3490. datafile.close()
  3491. elif mode in ('r','rw'):
  3492. raise RuntimeError, "File %s does not exist" % filename
  3493. def read(self, bytes):
  3494. data = self.data[self.p:self.p+bytes]
  3495. self.p += len(data)
  3496. return data
  3497. def readline(self):
  3498. i = self.data.find('\n',self.p)+1
  3499. if i>0:
  3500. data, self.p = self.data[self.p:i], i
  3501. else:
  3502. data, self.p = self.data[self.p:], len(self.data)
  3503. return data
  3504. def write(self,data):
  3505. self.data += data
  3506. def close_connection(self):
  3507. self.db.executesql(
  3508. "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename)
  3509. query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\
  3510. % (self.filename, self.data.replace("'","''"))
  3511. self.db.executesql(query)
  3512. self.db.commit()
  3513. @staticmethod
  3514. def exists(db, filename):
  3515. if exists(filename):
  3516. return True
  3517. query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename
  3518. if db.executesql(query):
  3519. return True
  3520. return False
  3521. class UseDatabaseStoredFile:
  3522. def file_exists(self, filename):
  3523. return DatabaseStoredFile.exists(self.db,filename)
  3524. def file_open(self, filename, mode='rb', lock=True):
  3525. return DatabaseStoredFile(self.db,filename,mode)
  3526. def file_close(self, fileobj):
  3527. fileobj.close_connection()
  3528. def file_delete(self,filename):
  3529. query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename
  3530. self.db.executesql(query)
  3531. self.db.commit()
  3532. class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
  3533. uploads_in_blob = True
  3534. REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$')
  3535. def __init__(self, db, uri='google:sql://realm:domain/database',
  3536. pool_size=0, folder=None, db_codec='UTF-8',
  3537. credential_decoder=IDENTITY, driver_args={},
  3538. adapter_args={}, do_connect=True):
  3539. self.db = db
  3540. self.dbengine = "mysql"
  3541. self.uri = uri
  3542. self.pool_size = pool_size
  3543. self.db_codec = db_codec
  3544. self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split(
  3545. os.sep+'applications'+os.sep,1)[1])
  3546. ruri = uri.split("://")[1]
  3547. m = self.REGEX_URI.match(ruri)
  3548. if not m:
  3549. raise SyntaxError, "Invalid URI string in SQLDB: %s" % self.uri
  3550. instance = credential_decoder(m.group('instance'))
  3551. self.dbstring = db = credential_decoder(m.group('db'))
  3552. driver_args['instance'] = instance
  3553. self.createdb = createdb = adapter_args.get('createdb',True)
  3554. if not createdb:
  3555. driver_args['database'] = db
  3556. def connector(driver_args=driver_args):
  3557. return rdbms.connect(**driver_args)
  3558. self.connector = connector
  3559. if do_connect: self.reconnect()
  3560. def after_connection(self):
  3561. if self.createdb:
  3562. # self.execute('DROP DATABASE %s' % self.dbstring)
  3563. self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring)
  3564. self.execute('USE %s' % self.dbstring)
  3565. self.execute("SET FOREIGN_KEY_CHECKS=1;")
  3566. self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
  3567. def execute(self, command, *a, **b):
  3568. return self.log_execute(command.decode('utf8'), *a, **b)
  3569. class NoSQLAdapter(BaseAdapter):
  3570. can_select_for_update = False
  3571. @staticmethod
  3572. def to_unicode(obj):
  3573. if isinstance(obj, str):
  3574. return obj.decode('utf8')
  3575. elif not isinstance(obj, unicode):
  3576. return unicode(obj)
  3577. return obj
  3578. def id_query(self, table):
  3579. return table._id > 0
  3580. def represent(self, obj, fieldtype):
  3581. field_is_type = fieldtype.startswith
  3582. if isinstance(obj, CALLABLETYPES):
  3583. obj = obj()
  3584. if isinstance(fieldtype, SQLCustomType):
  3585. return fieldtype.encoder(obj)
  3586. if isinstance(obj, (Expression, Field)):
  3587. raise SyntaxError, "non supported on GAE"
  3588. if self.dbengine == 'google:datastore':
  3589. if isinstance(fieldtype, gae.Property):
  3590. return obj
  3591. is_string = isinstance(fieldtype,str)
  3592. is_list = is_string and field_is_type('list:')
  3593. if is_list:
  3594. if not obj:
  3595. obj = []
  3596. if not isinstance(obj, (list, tuple)):
  3597. obj = [obj]
  3598. if obj == '' and not \
  3599. (is_string and fieldtype[:2] in ['st','te','pa','up']):
  3600. return None
  3601. if not obj is None:
  3602. if isinstance(obj, list) and not is_list:
  3603. obj = [self.represent(o, fieldtype) for o in obj]
  3604. elif fieldtype in ('integer','bigint','id'):
  3605. obj = long(obj)
  3606. elif fieldtype == 'double':
  3607. obj = float(obj)
  3608. elif is_string and field_is_type('reference'):
  3609. if isinstance(obj, (Row, Reference)):
  3610. obj = obj['id']
  3611. obj = long(obj)
  3612. elif fieldtype == 'boolean':
  3613. if obj and not str(obj)[0].upper() in '0F':
  3614. obj = True
  3615. else:
  3616. obj = False
  3617. elif fieldtype == 'date':
  3618. if not isinstance(obj, datetime.date):
  3619. (y, m, d) = map(int,str(obj).strip().split('-'))
  3620. obj = datetime.date(y, m, d)
  3621. elif isinstance(obj,datetime.datetime):
  3622. (y, m, d) = (obj.year, obj.month, obj.day)
  3623. obj = datetime.date(y, m, d)
  3624. elif fieldtype == 'time':
  3625. if not isinstance(obj, datetime.time):
  3626. time_items = map(int,str(obj).strip().split(':')[:3])
  3627. if len(time_items) == 3:
  3628. (h, mi, s) = time_items
  3629. else:
  3630. (h, mi, s) = time_items + [0]
  3631. obj = datetime.time(h, mi, s)
  3632. elif fieldtype == 'datetime':
  3633. if not isinstance(obj, datetime.datetime):
  3634. (y, m, d) = map(int,str(obj)[:10].strip().split('-'))
  3635. time_items = map(int,str(obj)[11:].strip().split(':')[:3])
  3636. while len(time_items)<3:
  3637. time_items.append(0)
  3638. (h, mi, s) = time_items
  3639. obj = datetime.datetime(y, m, d, h, mi, s)
  3640. elif fieldtype == 'blob':
  3641. pass
  3642. elif is_string and field_is_type('list:string'):
  3643. return map(self.to_unicode,obj)
  3644. elif is_list:
  3645. return map(int,obj)
  3646. else:
  3647. obj = self.to_unicode(obj)
  3648. return obj
  3649. def _insert(self,table,fields):
  3650. return 'insert %s in %s' % (fields, table)
  3651. def _count(self,query,distinct=None):
  3652. return 'count %s' % repr(query)
  3653. def _select(self,query,fields,attributes):
  3654. return 'select %s where %s' % (repr(fields), repr(query))
  3655. def _delete(self,tablename, query):
  3656. return 'delete %s where %s' % (repr(tablename),repr(query))
  3657. def _update(self,tablename,query,fields):
  3658. return 'update %s (%s) where %s' % (repr(tablename),
  3659. repr(fields),repr(query))
  3660. def commit(self):
  3661. """
  3662. remember: no transactions on many NoSQL
  3663. """
  3664. pass
  3665. def rollback(self):
  3666. """
  3667. remember: no transactions on many NoSQL
  3668. """
  3669. pass
  3670. def close_connection(self):
  3671. """
  3672. remember: no transactions on many NoSQL
  3673. """
  3674. pass
  3675. # these functions should never be called!
  3676. def OR(self,first,second): raise SyntaxError, "Not supported"
  3677. def AND(self,first,second): raise SyntaxError, "Not supported"
  3678. def AS(self,first,second): raise SyntaxError, "Not supported"
  3679. def ON(self,first,second): raise SyntaxError, "Not supported"
  3680. def STARTSWITH(self,first,second=None): raise SyntaxError, "Not supported"
  3681. def ENDSWITH(self,first,second=None): raise SyntaxError, "Not supported"
  3682. def ADD(self,first,second): raise SyntaxError, "Not supported"
  3683. def SUB(self,first,second): raise SyntaxError, "Not supported"
  3684. def MUL(self,first,second): raise SyntaxError, "Not supported"
  3685. def DIV(self,first,second): raise SyntaxError, "Not supported"
  3686. def LOWER(self,first): raise SyntaxError, "Not supported"
  3687. def UPPER(self,first): raise SyntaxError, "Not supported"
  3688. def EXTRACT(self,first,what): raise SyntaxError, "Not supported"
  3689. def AGGREGATE(self,first,what): raise SyntaxError, "Not supported"
  3690. def LEFT_JOIN(self): raise SyntaxError, "Not supported"
  3691. def RANDOM(self): raise SyntaxError, "Not supported"
  3692. def SUBSTRING(self,field,parameters): raise SyntaxError, "Not supported"
  3693. def PRIMARY_KEY(self,key): raise SyntaxError, "Not supported"
  3694. def ILIKE(self,first,second): raise SyntaxError, "Not supported"
  3695. def drop(self,table,mode): raise SyntaxError, "Not supported"
  3696. def alias(self,table,alias): raise SyntaxError, "Not supported"
  3697. def migrate_table(self,*a,**b): raise SyntaxError, "Not supported"
  3698. def distributed_transaction_begin(self,key): raise SyntaxError, "Not supported"
  3699. def prepare(self,key): raise SyntaxError, "Not supported"
  3700. def commit_prepared(self,key): raise SyntaxError, "Not supported"
  3701. def rollback_prepared(self,key): raise SyntaxError, "Not supported"
  3702. def concat_add(self,table): raise SyntaxError, "Not supported"
  3703. def constraint_name(self, table, fieldname): raise SyntaxError, "Not supported"
  3704. def create_sequence_and_triggers(self, query, table, **args): pass
  3705. def log_execute(self,*a,**b): raise SyntaxError, "Not supported"
  3706. def execute(self,*a,**b): raise SyntaxError, "Not supported"
  3707. def represent_exceptions(self, obj, fieldtype): raise SyntaxError, "Not supported"
  3708. def lastrowid(self,table): raise SyntaxError, "Not supported"
  3709. def integrity_error_class(self): raise SyntaxError, "Not supported"
  3710. def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError, "Not supported"
  3711. class GAEF(object):
  3712. def __init__(self,name,op,value,apply):
  3713. self.name=name=='id' and '__key__' or name
  3714. self.op=op
  3715. self.value=value
  3716. self.apply=apply
  3717. def __repr__(self):
  3718. return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
  3719. class GoogleDatastoreAdapter(NoSQLAdapter):
  3720. uploads_in_blob = True
  3721. types = {}
  3722. def file_exists(self, filename): pass
  3723. def file_open(self, filename, mode='rb', lock=True): pass
  3724. def file_close(self, fileobj): pass
  3725. REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)')
  3726. def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
  3727. credential_decoder=IDENTITY, driver_args={},
  3728. adapter_args={}, do_connect=True):
  3729. self.types.update({
  3730. 'boolean': gae.BooleanProperty,
  3731. 'string': (lambda: gae.StringProperty(multiline=True)),
  3732. 'text': gae.TextProperty,
  3733. 'password': gae.StringProperty,
  3734. 'blob': gae.BlobProperty,
  3735. 'upload': gae.StringProperty,
  3736. 'integer': gae.IntegerProperty,
  3737. 'bigint': gae.IntegerProperty,
  3738. 'float': gae.FloatProperty,
  3739. 'double': gae.FloatProperty,
  3740. 'decimal': GAEDecimalProperty,
  3741. 'date': gae.DateProperty,
  3742. 'time': gae.TimeProperty,
  3743. 'datetime': gae.DateTimeProperty,
  3744. 'id': None,
  3745. 'reference': gae.IntegerProperty,
  3746. 'list:string': (lambda: gae.StringListProperty(default=None)),
  3747. 'list:integer': (lambda: gae.ListProperty(int,default=None)),
  3748. 'list:reference': (lambda: gae.ListProperty(int,default=None)),
  3749. })
  3750. self.db = db
  3751. self.uri = uri
  3752. self.dbengine = 'google:datastore'
  3753. self.folder = folder
  3754. db['_lastsql'] = ''
  3755. self.db_codec = 'UTF-8'
  3756. self.pool_size = 0
  3757. match = self.REGEX_NAMESPACE.match(uri)
  3758. if match:
  3759. namespace_manager.set_namespace(match.group('namespace'))
  3760. def parse_id(self, value, field_type):
  3761. return value
  3762. def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
  3763. myfields = {}
  3764. for field in table:
  3765. if isinstance(polymodel,Table) and field.name in polymodel.fields():
  3766. continue
  3767. attr = {}
  3768. field_type = field.type
  3769. if isinstance(field_type, SQLCustomType):
  3770. ftype = self.types[field_type.native or field_type.type](**attr)
  3771. elif isinstance(field_type, gae.Property):
  3772. ftype = field_type
  3773. elif field_type.startswith('id'):
  3774. continue
  3775. elif field_type.startswith('decimal'):
  3776. precision, scale = field_type[7:].strip('()').split(',')
  3777. precision = int(precision)
  3778. scale = int(scale)
  3779. ftype = GAEDecimalProperty(precision, scale, **attr)
  3780. elif field_type.startswith('reference'):
  3781. if field.notnull:
  3782. attr = dict(required=True)
  3783. referenced = field_type[10:].strip()
  3784. ftype = self.types[field_type[:9]](referenced)
  3785. elif field_type.startswith('list:reference'):
  3786. if field.notnull:
  3787. attr = dict(required=True)
  3788. referenced = field_type[15:].strip()
  3789. ftype = self.types[field_type[:14]](**attr)
  3790. elif field_type.startswith('list:'):
  3791. ftype = self.types[field_type](**attr)
  3792. elif not field_type in self.types\
  3793. or not self.types[field_type]:
  3794. raise SyntaxError, 'Field: unknown field type: %s' % field_type
  3795. else:
  3796. ftype = self.types[field_type](**attr)
  3797. myfields[field.name] = ftype
  3798. if not polymodel:
  3799. table._tableobj = classobj(table._tablename, (gae.Model, ), myfields)
  3800. elif polymodel==True:
  3801. table._tableobj = classobj(table._tablename, (PolyModel, ), myfields)
  3802. elif isinstance(polymodel,Table):
  3803. table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields)
  3804. else:
  3805. raise SyntaxError, "polymodel must be None, True, a table or a tablename"
  3806. return None
  3807. def expand(self,expression,field_type=None):
  3808. if isinstance(expression,Field):
  3809. if expression.type in ('text','blob'):
  3810. raise SyntaxError, 'AppEngine does not index by: %s' % expression.type
  3811. return expression.name
  3812. elif isinstance(expression, (Expression, Query)):
  3813. if not expression.second is None:
  3814. return expression.op(expression.first, expression.second)
  3815. elif not expression.first is None:
  3816. return expression.op(expression.first)
  3817. else:
  3818. return expression.op()
  3819. elif field_type:
  3820. return self.represent(expression,field_type)
  3821. elif isinstance(expression,(list,tuple)):
  3822. return ','.join([self.represent(item,field_type) for item in expression])
  3823. else:
  3824. return str(expression)
  3825. ### TODO from gql.py Expression
  3826. def AND(self,first,second):
  3827. a = self.expand(first)
  3828. b = self.expand(second)
  3829. if b[0].name=='__key__' and a[0].name!='__key__':
  3830. return b+a
  3831. return a+b
  3832. def EQ(self,first,second=None):
  3833. if isinstance(second, Key):
  3834. return [GAEF(first.name,'=',second,lambda a,b:a==b)]
  3835. return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
  3836. def NE(self,first,second=None):
  3837. if first.type != 'id':
  3838. return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)]
  3839. else:
  3840. if not second is None:
  3841. second = Key.from_path(first._tablename, long(second))
  3842. return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
  3843. def LT(self,first,second=None):
  3844. if first.type != 'id':
  3845. return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)]
  3846. else:
  3847. second = Key.from_path(first._tablename, long(second))
  3848. return [GAEF(first.name,'<',second,lambda a,b:a<b)]
  3849. def LE(self,first,second=None):
  3850. if first.type != 'id':
  3851. return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)]
  3852. else:
  3853. second = Key.from_path(first._tablename, long(second))
  3854. return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
  3855. def GT(self,first,second=None):
  3856. if first.type != 'id' or second==0 or second == '0':
  3857. return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)]
  3858. else:
  3859. second = Key.from_path(first._tablename, long(second))
  3860. return [GAEF(first.name,'>',second,lambda a,b:a>b)]
  3861. def GE(self,first,second=None):
  3862. if first.type != 'id':
  3863. return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)]
  3864. else:
  3865. second = Key.from_path(first._tablename, long(second))
  3866. return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
  3867. def INVERT(self,first):
  3868. return '-%s' % first.name
  3869. def COMMA(self,first,second):
  3870. return '%s, %s' % (self.expand(first),self.expand(second))
  3871. def BELONGS(self,first,second=None):
  3872. if not isinstance(second,(list, tuple)):
  3873. raise SyntaxError, "Not supported"
  3874. if first.type != 'id':
  3875. return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)]
  3876. else:
  3877. second = [Key.from_path(first._tablename, int(i)) for i in second]
  3878. return [GAEF(first.name,'in',second,lambda a,b:a in b)]
  3879. def CONTAINS(self,first,second):
  3880. if not first.type.startswith('list:'):
  3881. raise SyntaxError, "Not supported"
  3882. return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
  3883. def NOT(self,first):
  3884. nops = { self.EQ: self.NE,
  3885. self.NE: self.EQ,
  3886. self.LT: self.GE,
  3887. self.GT: self.LE,
  3888. self.LE: self.GT,
  3889. self.GE: self.LT}
  3890. if not isinstance(first,Query):
  3891. raise SyntaxError, "Not suported"
  3892. nop = nops.get(first.op,None)
  3893. if not nop:
  3894. raise SyntaxError, "Not suported %s" % first.op.__name__
  3895. first.op = nop
  3896. return self.expand(first)
  3897. def truncate(self,table,mode):
  3898. self.db(table._id).delete()
  3899. def select_raw(self,query,fields=None,attributes=None):
  3900. db = self.db
  3901. fields = fields or []
  3902. attributes = attributes or {}
  3903. args_get = attributes.get
  3904. new_fields = []
  3905. for item in fields:
  3906. if isinstance(item,SQLALL):
  3907. new_fields += item._table
  3908. else:
  3909. new_fields.append(item)
  3910. fields = new_fields
  3911. if query:
  3912. tablename = self.get_table(query)
  3913. elif fields:
  3914. tablename = fields[0].tablename
  3915. query = db._adapter.id_query(fields[0].table)
  3916. else:
  3917. raise SyntaxError, "Unable to determine a tablename"
  3918. if query:
  3919. if use_common_filters(query):
  3920. query = self.common_filter(query,[tablename])
  3921. #tableobj is a GAE Model class (or subclass)
  3922. tableobj = db[tablename]._tableobj
  3923. filters = self.expand(query)
  3924. projection = None
  3925. if len(db[tablename].fields) == len(fields):
  3926. #getting all fields, not a projection query
  3927. projection = None
  3928. elif args_get('projection') == True:
  3929. projection = []
  3930. for f in fields:
  3931. if f.type in ['text', 'blob']:
  3932. raise SyntaxError, \
  3933. "text and blob field types not allowed in projection queries"
  3934. else:
  3935. projection.append(f.name)
  3936. # projection's can't include 'id'.
  3937. # it will be added to the result later
  3938. query_projection = [
  3939. p for p in projection if \
  3940. p != db[tablename]._id.name] if projection \
  3941. else None
  3942. cursor = None
  3943. if isinstance(args_get('reusecursor'), str):
  3944. cursor = args_get('reusecursor')
  3945. items = gae.Query(tableobj, projection=query_projection,
  3946. cursor=cursor)
  3947. for filter in filters:
  3948. if args_get('projection') == True and \
  3949. filter.name in query_projection and \
  3950. filter.op in ['=', '<=', '>=']:
  3951. raise SyntaxError, \
  3952. "projection fields cannot have equality filters"
  3953. if filter.name=='__key__' and filter.op=='>' and filter.value==0:
  3954. continue
  3955. elif filter.name=='__key__' and filter.op=='=':
  3956. if filter.value==0:
  3957. items = []
  3958. elif isinstance(filter.value, Key):
  3959. # key qeuries return a class instance,
  3960. # can't use projection
  3961. # extra values will be ignored in post-processing later
  3962. item = tableobj.get(filter.value)
  3963. items = (item and [item]) or []
  3964. else:
  3965. # key qeuries return a class instance,
  3966. # can't use projection
  3967. # extra values will be ignored in post-processing later
  3968. item = tableobj.get_by_id(filter.value)
  3969. items = (item and [item]) or []
  3970. elif isinstance(items,list): # i.e. there is a single record!
  3971. items = [i for i in items if filter.apply(
  3972. getattr(item,filter.name),filter.value)]
  3973. else:
  3974. if filter.name=='__key__' and filter.op != 'in':
  3975. items.order('__key__')
  3976. items = items.filter('%s %s' % (filter.name,filter.op),
  3977. filter.value)
  3978. if not isinstance(items,list):
  3979. if args_get('left', None):
  3980. raise SyntaxError, 'Set: no left join in appengine'
  3981. if args_get('groupby', None):
  3982. raise SyntaxError, 'Set: no groupby in appengine'
  3983. orderby = args_get('orderby', False)
  3984. if orderby:
  3985. ### THIS REALLY NEEDS IMPROVEMENT !!!
  3986. if isinstance(orderby, (list, tuple)):
  3987. orderby = xorify(orderby)
  3988. if isinstance(orderby,Expression):
  3989. orderby = self.expand(orderby)
  3990. orders = orderby.split(', ')
  3991. for order in orders:
  3992. order={'-id':'-__key__','id':'__key__'}.get(order,order)
  3993. items = items.order(order)
  3994. if args_get('limitby', None):
  3995. (lmin, lmax) = attributes['limitby']
  3996. (limit, offset) = (lmax - lmin, lmin)
  3997. rows = items.fetch(limit,offset=offset)
  3998. #cursor is only useful if there was a limit and we didn't return
  3999. # all results
  4000. if args_get('reusecursor'):
  4001. db['_lastcursor'] = items.cursor()
  4002. items = rows
  4003. return (items, tablename, projection or db[tablename].fields)
  4004. def select(self,query,fields,attributes):
  4005. """
  4006. This is the GAE version of select. some notes to consider:
  4007. - db['_lastsql'] is not set because there is not SQL statement string
  4008. for a GAE query
  4009. - 'nativeRef' is a magical fieldname used for self references on GAE
  4010. - optional attribute 'projection' when set to True will trigger
  4011. use of the GAE projection queries. note that there are rules for
  4012. what is accepted imposed by GAE: each field must be indexed,
  4013. projection queries cannot contain blob or text fields, and you
  4014. cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection
  4015. - optional attribute 'reusecursor' allows use of cursor with queries
  4016. that have the limitby attribute. Set the attribute to True for the
  4017. first query, set it to the value of db['_lastcursor'] to continue
  4018. a previous query. The user must save the cursor value between
  4019. requests, and the filters must be identical. It is up to the user
  4020. to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors
  4021. """
  4022. (items, tablename, fields) = self.select_raw(query,fields,attributes)
  4023. # self.db['_lastsql'] = self._select(query,fields,attributes)
  4024. rows = [[(t==self.db[tablename]._id.name and item) or \
  4025. (t=='nativeRef' and item) or getattr(item, t) \
  4026. for t in fields] for item in items]
  4027. colnames = ['%s.%s' % (tablename, t) for t in fields]
  4028. processor = attributes.get('processor',self.parse)
  4029. return processor(rows,fields,colnames,False)
  4030. def count(self,query,distinct=None):
  4031. if distinct:
  4032. raise RuntimeError, "COUNT DISTINCT not supported"
  4033. (items, tablename, fields) = self.select_raw(query)
  4034. # self.db['_lastsql'] = self._count(query)
  4035. try:
  4036. return len(items)
  4037. except TypeError:
  4038. return items.count(limit=None)
  4039. def delete(self,tablename, query):
  4040. """
  4041. This function was changed on 2010-05-04 because according to
  4042. http://code.google.com/p/googleappengine/issues/detail?id=3119
  4043. GAE no longer supports deleting more than 1000 records.
  4044. """
  4045. # self.db['_lastsql'] = self._delete(tablename,query)
  4046. (items, tablename, fields) = self.select_raw(query)
  4047. # items can be one item or a query
  4048. if not isinstance(items,list):
  4049. counter = items.count(limit=None)
  4050. leftitems = items.fetch(1000)
  4051. while len(leftitems):
  4052. gae.delete(leftitems)
  4053. leftitems = items.fetch(1000)
  4054. else:
  4055. counter = len(items)
  4056. gae.delete(items)
  4057. return counter
  4058. def update(self,tablename,query,update_fields):
  4059. # self.db['_lastsql'] = self._update(tablename,query,update_fields)
  4060. (items, tablename, fields) = self.select_raw(query)
  4061. counter = 0
  4062. for item in items:
  4063. for field, value in update_fields:
  4064. setattr(item, field.name, self.represent(value,field.type))
  4065. item.put()
  4066. counter += 1
  4067. LOGGER.info(str(counter))
  4068. return counter
  4069. def insert(self,table,fields):
  4070. dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields)
  4071. # table._db['_lastsql'] = self._insert(table,fields)
  4072. tmp = table._tableobj(**dfields)
  4073. tmp.put()
  4074. rid = Reference(tmp.key().id())
  4075. (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key())
  4076. return rid
  4077. def bulk_insert(self,table,items):
  4078. parsed_items = []
  4079. for item in items:
  4080. dfields=dict((f.name,self.represent(v,f.type)) for f,v in item)
  4081. parsed_items.append(table._tableobj(**dfields))
  4082. gae.put(parsed_items)
  4083. return True
  4084. def uuid2int(uuidv):
  4085. return uuid.UUID(uuidv).int
  4086. def int2uuid(n):
  4087. return str(uuid.UUID(int=n))
  4088. class CouchDBAdapter(NoSQLAdapter):
  4089. drivers = ('couchdb',)
  4090. uploads_in_blob = True
  4091. types = {
  4092. 'boolean': bool,
  4093. 'string': str,
  4094. 'text': str,
  4095. 'password': str,
  4096. 'blob': str,
  4097. 'upload': str,
  4098. 'integer': long,
  4099. 'bigint': long,
  4100. 'float': float,
  4101. 'double': float,
  4102. 'date': datetime.date,
  4103. 'time': datetime.time,
  4104. 'datetime': datetime.datetime,
  4105. 'id': long,
  4106. 'reference': long,
  4107. 'list:string': list,
  4108. 'list:integer': list,
  4109. 'list:reference': list,
  4110. }
  4111. def file_exists(self, filename): pass
  4112. def file_open(self, filename, mode='rb', lock=True): pass
  4113. def file_close(self, fileobj): pass
  4114. def expand(self,expression,field_type=None):
  4115. if isinstance(expression,Field):
  4116. if expression.type=='id':
  4117. return "%s._id" % expression.tablename
  4118. return BaseAdapter.expand(self,expression,field_type)
  4119. def AND(self,first,second):
  4120. return '(%s && %s)' % (self.expand(first),self.expand(second))
  4121. def OR(self,first,second):
  4122. return '(%s || %s)' % (self.expand(first),self.expand(second))
  4123. def EQ(self,first,second):
  4124. if second is None:
  4125. return '(%s == null)' % self.expand(first)
  4126. return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
  4127. def NE(self,first,second):
  4128. if second is None:
  4129. return '(%s != null)' % self.expand(first)
  4130. return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
  4131. def COMMA(self,first,second):
  4132. return '%s + %s' % (self.expand(first),self.expand(second))
  4133. def represent(self, obj, fieldtype):
  4134. value = NoSQLAdapter.represent(self, obj, fieldtype)
  4135. if fieldtype=='id':
  4136. return repr(str(int(value)))
  4137. elif fieldtype in ('date','time','datetime','boolean'):
  4138. return serializers.json(value)
  4139. return repr(not isinstance(value,unicode) and value \
  4140. or value and value.encode('utf8'))
  4141. def __init__(self,db,uri='couchdb://127.0.0.1:5984',
  4142. pool_size=0,folder=None,db_codec ='UTF-8',
  4143. credential_decoder=IDENTITY, driver_args={},
  4144. adapter_args={}, do_connect=True):
  4145. self.db = db
  4146. self.uri = uri
  4147. if do_connect: self.find_driver(adapter_args)
  4148. self.dbengine = 'couchdb'
  4149. self.folder = folder
  4150. db['_lastsql'] = ''
  4151. self.db_codec = 'UTF-8'
  4152. self.pool_size = pool_size
  4153. url='http://'+uri[10:]
  4154. def connector(url=url,driver_args=driver_args):
  4155. return self.driver.Server(url,**driver_args)
  4156. self.reconnect(connector,cursor=False)
  4157. def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
  4158. if migrate:
  4159. try:
  4160. self.connection.create(table._tablename)
  4161. except:
  4162. pass
  4163. def insert(self,table,fields):
  4164. id = uuid2int(web2py_uuid())
  4165. ctable = self.connection[table._tablename]
  4166. values = dict((k.name,self.represent(v,k.type)) for k,v in fields)
  4167. values['_id'] = str(id)
  4168. ctable.save(values)
  4169. return id
  4170. def _select(self,query,fields,attributes):
  4171. if not isinstance(query,Query):
  4172. raise SyntaxError, "Not Supported"
  4173. for key in set(attributes.keys())-SELECT_ARGS:
  4174. raise SyntaxError, 'invalid select attribute: %s' % key
  4175. new_fields=[]
  4176. for item in fields:
  4177. if isinstance(item,SQLALL):
  4178. new_fields += item._table
  4179. else:
  4180. new_fields.append(item)
  4181. def uid(fd):
  4182. return fd=='id' and '_id' or fd
  4183. def get(row,fd):
  4184. return fd=='id' and int(row['_id']) or row.get(fd,None)
  4185. fields = new_fields
  4186. tablename = self.get_table(query)
  4187. fieldnames = [f.name for f in (fields or self.db[tablename])]
  4188. colnames = ['%s.%s' % (tablename,k) for k in fieldnames]
  4189. fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames])
  4190. fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\
  4191. dict(t=tablename,
  4192. query=self.expand(query),
  4193. order='%s._id' % tablename,
  4194. fields=fields)
  4195. return fn, colnames
  4196. def select(self,query,fields,attributes):
  4197. if not isinstance(query,Query):
  4198. raise SyntaxError, "Not Supported"
  4199. fn, colnames = self._select(query,fields,attributes)
  4200. tablename = colnames[0].split('.')[0]
  4201. ctable = self.connection[tablename]
  4202. rows = [cols['value'] for cols in ctable.query(fn)]
  4203. processor = attributes.get('processor',self.parse)
  4204. return processor(rows,fields,colnames,False)
  4205. def delete(self,tablename,query):
  4206. if not isinstance(query,Query):
  4207. raise SyntaxError, "Not Supported"
  4208. if query.first.type=='id' and query.op==self.EQ:
  4209. id = query.second
  4210. tablename = query.first.tablename
  4211. assert(tablename == query.first.tablename)
  4212. ctable = self.connection[tablename]
  4213. try:
  4214. del ctable[str(id)]
  4215. return 1
  4216. except couchdb.http.ResourceNotFound:
  4217. return 0
  4218. else:
  4219. tablename = self.get_table(query)
  4220. rows = self.select(query,[self.db[tablename]._id],{})
  4221. ctable = self.connection[tablename]
  4222. for row in rows:
  4223. del ctable[str(row.id)]
  4224. return len(rows)
  4225. def update(self,tablename,query,fields):
  4226. if not isinstance(query,Query):
  4227. raise SyntaxError, "Not Supported"
  4228. if query.first.type=='id' and query.op==self.EQ:
  4229. id = query.second
  4230. tablename = query.first.tablename
  4231. ctable = self.connection[tablename]
  4232. try:
  4233. doc = ctable[str(id)]
  4234. for key,value in fields:
  4235. doc[key.name] = self.represent(value,self.db[tablename][key.name].type)
  4236. ctable.save(doc)
  4237. return 1
  4238. except couchdb.http.ResourceNotFound:
  4239. return 0
  4240. else:
  4241. tablename = self.get_table(query)
  4242. rows = self.select(query,[self.db[tablename]._id],{})
  4243. ctable = self.connection[tablename]
  4244. table = self.db[tablename]
  4245. for row in rows:
  4246. doc = ctable[str(row.id)]
  4247. for key,value in fields:
  4248. doc[key.name] = self.represent(value,table[key.name].type)
  4249. ctable.save(doc)
  4250. return len(rows)
  4251. def count(self,query,distinct=None):
  4252. if distinct:
  4253. raise RuntimeError, "COUNT DISTINCT not supported"
  4254. if not isinstance(query,Query):
  4255. raise SyntaxError, "Not Supported"
  4256. tablename = self.get_table(query)
  4257. rows = self.select(query,[self.db[tablename]._id],{})
  4258. return len(rows)
  4259. def cleanup(text):
  4260. """
  4261. validates that the given text is clean: only contains [0-9a-zA-Z_]
  4262. """
  4263. if not REGEX_ALPHANUMERIC.match(text):
  4264. raise SyntaxError, 'invalid table or field name: %s' % text
  4265. return text
  4266. class MongoDBAdapter(NoSQLAdapter):
  4267. drivers = ('pymongo',)
  4268. uploads_in_blob = True
  4269. types = {
  4270. 'boolean': bool,
  4271. 'string': str,
  4272. 'text': str,
  4273. 'password': str,
  4274. 'blob': str,
  4275. 'upload': str,
  4276. 'integer': long,
  4277. 'bigint': long,
  4278. 'float': float,
  4279. 'double': float,
  4280. 'date': datetime.date,
  4281. 'time': datetime.time,
  4282. 'datetime': datetime.datetime,
  4283. 'id': long,
  4284. 'reference': long,
  4285. 'list:string': list,
  4286. 'list:integer': list,
  4287. 'list:reference': list,
  4288. }
  4289. def __init__(self,db,uri='mongodb://127.0.0.1:5984/db',
  4290. pool_size=0,folder=None,db_codec ='UTF-8',
  4291. credential_decoder=IDENTITY, driver_args={},
  4292. adapter_args={}, do_connect=True):
  4293. self.db = db
  4294. self.uri = uri
  4295. if do_connect: self.find_driver(adapter_args)
  4296. m=None
  4297. try:
  4298. #Since version 2
  4299. import pymongo.uri_parser
  4300. m = pymongo.uri_parser.parse_uri(uri)
  4301. except ImportError:
  4302. try:
  4303. #before version 2 of pymongo
  4304. import pymongo.connection
  4305. m = pymongo.connection._parse_uri(uri)
  4306. except ImportError:
  4307. raise ImportError("Uriparser for mongodb is not available")
  4308. except:
  4309. raise SyntaxError("This type of uri is not supported by the mongodb uri parser")
  4310. self.dbengine = 'mongodb'
  4311. self.folder = folder
  4312. db['_lastsql'] = ''
  4313. self.db_codec = 'UTF-8'
  4314. self.pool_size = pool_size
  4315. #this is the minimum amount of replicates that it should wait for on insert/update
  4316. self.minimumreplication = adapter_args.get('minimumreplication',0)
  4317. #by default alle insert and selects are performand asynchronous, but now the default is
  4318. #synchronous, except when overruled by either this default or function parameter
  4319. self.safe = adapter_args.get('safe',True)
  4320. if isinstance(m,tuple):
  4321. m = {"database" : m[1]}
  4322. if m.get('database')==None:
  4323. raise SyntaxError("Database is required!")
  4324. def connector(uri=self.uri,m=m):
  4325. try:
  4326. return self.driver.Connection(uri)[m.get('database')]
  4327. except self.driver.errors.ConnectionFailure, inst:
  4328. raise SyntaxError, "The connection to " + uri + " could not be made"
  4329. except Exception, inst:
  4330. if inst == "cannot specify database without a username and password":
  4331. raise SyntaxError("You are probebly running version 1.1 of pymongo which contains a bug which requires authentication. Update your pymongo.")
  4332. else:
  4333. raise SyntaxError("This is not an official Mongodb uri (http://www.mongodb.org/display/DOCS/Connections) Error : %s" % inst)
  4334. self.reconnect(connector,cursor=False)
  4335. def represent(self, obj, fieldtype):
  4336. value = NoSQLAdapter.represent(self, obj, fieldtype)
  4337. if fieldtype =='date':
  4338. if value == None:
  4339. return value
  4340. t = datetime.time(0, 0, 0)#this piece of data can be stripped of based on the fieldtype
  4341. return datetime.datetime.combine(value, t) #mongodb doesn't has a date object and so it must datetime, string or integer
  4342. elif fieldtype == 'time':
  4343. if value == None:
  4344. return value
  4345. d = datetime.date(2000, 1, 1) #this piece of data can be stripped of based on the fieldtype
  4346. return datetime.datetime.combine(d, value) #mongodb doesn't has a time object and so it must datetime, string or integer
  4347. elif fieldtype == 'list:string' or fieldtype == 'list:integer' or fieldtype == 'list:reference':
  4348. return value #raise SyntaxError, "Not Supported"
  4349. return value
  4350. #Safe determines whether a asynchronious request is done or a synchronious action is done
  4351. #For safety, we use by default synchronious requests
  4352. def insert(self,table,fields,safe=None):
  4353. if safe==None:
  4354. safe=self.safe
  4355. ctable = self.connection[table._tablename]
  4356. values = dict((k.name,self.represent(v,table[k.name].type)) for k,v in fields)
  4357. ctable.insert(values,safe=safe)
  4358. return int(str(values['_id']), 16)
  4359. def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None, isCapped=False):
  4360. if isCapped:
  4361. raise RuntimeError, "Not implemented"
  4362. else:
  4363. pass
  4364. def count(self,query,distinct=None,snapshot=True):
  4365. if distinct:
  4366. raise RuntimeError, "COUNT DISTINCT not supported"
  4367. if not isinstance(query,Query):
  4368. raise SyntaxError, "Not Supported"
  4369. tablename = self.get_table(query)
  4370. return int(self.select(query,[self.db[tablename]._id],{},count=True,snapshot=snapshot)['count'])
  4371. #Maybe it would be faster if we just implemented the pymongo .count() function which is probably quicker?
  4372. # therefor call __select() connection[table].find(query).count() Since this will probably reduce the return set?
  4373. def expand(self, expression, field_type=None):
  4374. try:
  4375. from pymongo.objectid import ObjectId
  4376. except ImportError:
  4377. from bson.objectid import ObjectId
  4378. #if isinstance(expression,Field):
  4379. # if expression.type=='id':
  4380. # return {_id}"
  4381. if isinstance(expression, Query):
  4382. # any query using 'id':=
  4383. # set name as _id (as per pymongo/mongodb primary key)
  4384. # convert second arg to an objectid field
  4385. # (if its not already)
  4386. # if second arg is 0 convert to objectid
  4387. if isinstance(expression.first,Field) and \
  4388. expression.first.type == 'id':
  4389. expression.first.name = '_id'
  4390. if expression.second != 0 and \
  4391. not isinstance(expression.second,ObjectId):
  4392. if isinstance(expression.second,int):
  4393. try:
  4394. # Because the reference field is by default
  4395. # an integer and therefore this must be an
  4396. # integer to be able to work with other
  4397. # databases
  4398. expression.second = ObjectId(("%X" % expression.second))
  4399. except:
  4400. raise SyntaxError, 'The second argument must by an integer that can represent an objectid.'
  4401. else:
  4402. try:
  4403. #But a direct id is also possible
  4404. expression.second = ObjectId(expression.second)
  4405. except:
  4406. raise SyntaxError, 'second argument must be of type ObjectId or an objectid representable integer'
  4407. elif expression.second == 0:
  4408. expression.second = ObjectId('000000000000000000000000')
  4409. return expression.op(expression.first, expression.second)
  4410. if isinstance(expression, Field):
  4411. if expression.type=='id':
  4412. return "_id"
  4413. else:
  4414. return expression.name
  4415. #return expression
  4416. elif isinstance(expression, (Expression, Query)):
  4417. if not expression.second is None:
  4418. return expression.op(expression.first, expression.second)
  4419. elif not expression.first is None:
  4420. return expression.op(expression.first)
  4421. elif not isinstance(expression.op, str):
  4422. return expression.op()
  4423. else:
  4424. return expression.op
  4425. elif field_type:
  4426. return str(self.represent(expression,field_type))
  4427. elif isinstance(expression,(list,tuple)):
  4428. return ','.join(self.represent(item,field_type) for item in expression)
  4429. else:
  4430. return expression
  4431. def _select(self,query,fields,attributes):
  4432. try:
  4433. from bson.son import SON
  4434. except ImportError:
  4435. from pymongo.son import SON
  4436. if 'for_update' in attributes:
  4437. logging.warn('mongodb does not support for_update')
  4438. for key in set(attributes.keys())-set(('limitby','orderby','for_update')):
  4439. if attributes[key]!=None:
  4440. raise SyntaxError, 'invalid select attribute: %s' % key
  4441. new_fields=[]
  4442. mongosort_list = []
  4443. # try an orderby attribute
  4444. orderby = attributes.get('orderby', False)
  4445. limitby = attributes.get('limitby', False)
  4446. #distinct = attributes.get('distinct', False)
  4447. if orderby:
  4448. #print "in if orderby %s" % orderby
  4449. if isinstance(orderby, (list, tuple)):
  4450. orderby = xorify(orderby)
  4451. # !!!! need to add 'random'
  4452. for f in self.expand(orderby).split(','):
  4453. if f.startswith('-'):
  4454. mongosort_list.append((f[1:],-1))
  4455. else:
  4456. mongosort_list.append((f,1))
  4457. if limitby:
  4458. limitby_skip, limitby_limit = limitby
  4459. else:
  4460. limitby_skip = limitby_limit = 0
  4461. mongofields_dict = SON()
  4462. mongoqry_dict = {}
  4463. for item in fields:
  4464. if isinstance(item,SQLALL):
  4465. new_fields += item._table
  4466. else:
  4467. new_fields.append(item)
  4468. fields = new_fields
  4469. if isinstance(query,Query):
  4470. tablename = self.get_table(query)
  4471. elif len(fields) != 0:
  4472. tablename = fields[0].tablename
  4473. else:
  4474. raise SyntaxError, "The table name could not be found in the query nor from the select statement."
  4475. mongoqry_dict = self.expand(query)
  4476. fields = fields or self.db[tablename]
  4477. for field in fields:
  4478. mongofields_dict[field.name] = 1
  4479. return tablename, mongoqry_dict, mongofields_dict, \
  4480. mongosort_list, limitby_limit, limitby_skip
  4481. # need to define all the 'sql' methods gt,lt etc....
  4482. def select(self,query,fields,attributes,count=False,snapshot=False):
  4483. try:
  4484. from pymongo.objectid import ObjectId
  4485. except ImportError:
  4486. from bson.objectid import ObjectId
  4487. tablename, mongoqry_dict, mongofields_dict, \
  4488. mongosort_list, limitby_limit, limitby_skip = \
  4489. self._select(query,fields,attributes)
  4490. ctable = self.connection[tablename]
  4491. if count:
  4492. return {'count' : ctable.find(
  4493. mongoqry_dict, mongofields_dict,
  4494. skip=limitby_skip, limit=limitby_limit,
  4495. sort=mongosort_list, snapshot=snapshot).count()}
  4496. else:
  4497. mongo_list_dicts = ctable.find(
  4498. mongoqry_dict, mongofields_dict,
  4499. skip=limitby_skip, limit=limitby_limit,
  4500. sort=mongosort_list, snapshot=snapshot) # pymongo cursor object
  4501. # DEBUG: print "mongo_list_dicts=%s" % mongo_list_dicts
  4502. rows = []
  4503. ### populate row in proper order
  4504. colnames = [str(field) for field in fields]
  4505. for k,record in enumerate(mongo_list_dicts):
  4506. row=[]
  4507. for fullcolname in colnames:
  4508. colname = fullcolname.split('.')[1]
  4509. column = '_id' if colname=='id' else colname
  4510. if column in record:
  4511. if column == '_id' and isinstance(
  4512. record[column],ObjectId):
  4513. value = int(str(record[column]),16)
  4514. elif column != '_id':
  4515. value = record[column]
  4516. else:
  4517. value = None
  4518. else:
  4519. value = None
  4520. row.append(value)
  4521. rows.append(row)
  4522. processor = attributes.get('processor',self.parse)
  4523. return processor(rows,fields,colnames,False)
  4524. def INVERT(self,first):
  4525. #print "in invert first=%s" % first
  4526. return '-%s' % self.expand(first)
  4527. def drop(self, table, mode=''):
  4528. ctable = self.connection[table._tablename]
  4529. ctable.drop()
  4530. def truncate(self,table,mode,safe=None):
  4531. if safe==None:
  4532. safe=self.safe
  4533. ctable = self.connection[table._tablename]
  4534. ctable.remove(None, safe=True)
  4535. #the update function should return a string
  4536. def oupdate(self,tablename,query,fields):
  4537. if not isinstance(query,Query):
  4538. raise SyntaxError, "Not Supported"
  4539. filter = None
  4540. if query:
  4541. filter = self.expand(query)
  4542. f_v = []
  4543. modify = { '$set' : dict(((k.name,self.represent(v,k.type)) for k,v in fields)) }
  4544. return modify,filter
  4545. #TODO implement update
  4546. #TODO implement set operator
  4547. #TODO implement find and modify
  4548. #todo implement complex update
  4549. def update(self,tablename,query,fields,safe=None):
  4550. if safe==None:
  4551. safe=self.safe
  4552. #return amount of adjusted rows or zero, but no exceptions related not finding the result
  4553. if not isinstance(query,Query):
  4554. raise RuntimeError, "Not implemented"
  4555. amount = self.count(query,False)
  4556. modify,filter = self.oupdate(tablename,query,fields)
  4557. try:
  4558. if safe:
  4559. return self.connection[tablename].update(filter,modify,multi=True,safe=safe).n
  4560. else:
  4561. amount =self.count(query)
  4562. self.connection[tablename].update(filter,modify,multi=True,safe=safe)
  4563. return amount
  4564. except:
  4565. #TODO Reverse update query to verifiy that the query succeded
  4566. return 0
  4567. """
  4568. An special update operator that enables the update of specific field
  4569. return a dict
  4570. """
  4571. #this function returns a dict with the where clause and update fields
  4572. def _update(self,tablename,query,fields):
  4573. return str(self.oupdate(tablename,query,fields))
  4574. def bulk_insert(self, table, items):
  4575. return [self.insert(table,item) for item in items]
  4576. #TODO This will probably not work:(
  4577. def NOT(self, first):
  4578. result = {}
  4579. result["$not"] = self.expand(first)
  4580. return result
  4581. def AND(self,first,second):
  4582. f = self.expand(first)
  4583. s = self.expand(second)
  4584. f.update(s)
  4585. return f
  4586. def OR(self,first,second):
  4587. # pymongo expects: .find( {'$or' : [{'name':'1'}, {'name':'2'}] } )
  4588. result = {}
  4589. f = self.expand(first)
  4590. s = self.expand(second)
  4591. result['$or'] = [f,s]
  4592. return result
  4593. def BELONGS(self, first, second):
  4594. if isinstance(second, str):
  4595. return {self.expand(first) : {"$in" : [ second[:-1]]} }
  4596. elif second==[] or second==():
  4597. return {1:0}
  4598. items = [self.expand(item, first.type) for item in second]
  4599. return {self.expand(first) : {"$in" : items} }
  4600. def EQ(self,first,second):
  4601. result = {}
  4602. #if second is None:
  4603. #return '(%s == null)' % self.expand(first)
  4604. #return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
  4605. result[self.expand(first)] = self.expand(second)
  4606. return result
  4607. def NE(self, first, second=None):
  4608. result = {}
  4609. result[self.expand(first)] = {'$ne': self.expand(second)}
  4610. return result
  4611. def LT(self,first,second=None):
  4612. if second is None:
  4613. raise RuntimeError, "Cannot compare %s < None" % first
  4614. result = {}
  4615. result[self.expand(first)] = {'$lt': self.expand(second)}
  4616. return result
  4617. def LE(self,first,second=None):
  4618. if second is None:
  4619. raise RuntimeError, "Cannot compare %s <= None" % first
  4620. result = {}
  4621. result[self.expand(first)] = {'$lte': self.expand(second)}
  4622. return result
  4623. def GT(self,first,second):
  4624. result = {}
  4625. result[self.expand(first)] = {'$gt': self.expand(second)}
  4626. return result
  4627. def GE(self,first,second=None):
  4628. if second is None:
  4629. raise RuntimeError, "Cannot compare %s >= None" % first
  4630. result = {}
  4631. result[self.expand(first)] = {'$gte': self.expand(second)}
  4632. return result
  4633. def ADD(self, first, second):
  4634. raise NotImplementedError, "This must yet be replaced with javascript in order to accomplish this. Sorry"
  4635. return '%s + %s' % (self.expand(first), self.expand(second, first.type))
  4636. def SUB(self, first, second):
  4637. raise NotImplementedError, "This must yet be replaced with javascript in order to accomplish this. Sorry"
  4638. return '(%s - %s)' % (self.expand(first), self.expand(second, first.type))
  4639. def MUL(self, first, second):
  4640. raise NotImplementedError, "This must yet be replaced with javascript in order to accomplish this. Sorry"
  4641. return '(%s * %s)' % (self.expand(first), self.expand(second, first.type))
  4642. def DIV(self, first, second):
  4643. raise NotImplementedError, "This must yet be replaced with javascript in order to accomplish this. Sorry"
  4644. return '(%s / %s)' % (self.expand(first), self.expand(second, first.type))
  4645. def MOD(self, first, second):
  4646. raise NotImplementedError, "This must yet be replaced with javascript in order to accomplish this. Sorry"
  4647. return '(%s %% %s)' % (self.expand(first), self.expand(second, first.type))
  4648. def AS(self, first, second):
  4649. raise NotImplementedError, "This must yet be replaced with javascript in order to accomplish this. Sorry"
  4650. return '%s AS %s' % (self.expand(first), second)
  4651. #We could implement an option that simulates a full featured SQL database. But I think the option should be set explicit or implemented as another library.
  4652. def ON(self, first, second):
  4653. raise NotImplementedError, "This is not possible in NoSQL, but can be simulated with a wrapper."
  4654. return '%s ON %s' % (self.expand(first), self.expand(second))
  4655. #
  4656. # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS
  4657. # WHICH ONE IS BEST?
  4658. #
  4659. def COMMA(self, first, second):
  4660. return '%s, %s' % (self.expand(first), self.expand(second))
  4661. def LIKE(self, first, second):
  4662. #escaping regex operators?
  4663. return {self.expand(first) : ('%s' % self.expand(second, 'string').replace('%','/'))}
  4664. def STARTSWITH(self, first, second):
  4665. #escaping regex operators?
  4666. return {self.expand(first) : ('/^%s/' % self.expand(second, 'string'))}
  4667. def ENDSWITH(self, first, second):
  4668. #escaping regex operators?
  4669. return {self.expand(first) : ('/%s^/' % self.expand(second, 'string'))}
  4670. def CONTAINS(self, first, second):
  4671. #There is a technical difference, but mongodb doesn't support that, but the result will be the same
  4672. return {self.expand(first) : ('/%s/' % self.expand(second, 'string'))}
  4673. def LIKE(self, first, second):
  4674. import re
  4675. return {self.expand(first) : {'$regex' : re.escape(self.expand(second, 'string')).replace('%','.*')}}
  4676. #TODO verify full compatibilty with official SQL Like operator
  4677. def STARTSWITH(self, first, second):
  4678. #TODO Solve almost the same problem as with endswith
  4679. import re
  4680. return {self.expand(first) : {'$regex' : '^' + re.escape(self.expand(second, 'string'))}}
  4681. #TODO verify full compatibilty with official SQL Like operator
  4682. def ENDSWITH(self, first, second):
  4683. #escaping regex operators?
  4684. #TODO if searched for a name like zsa_corbitt and the function is endswith('a') then this is also returned. Aldo it end with a t
  4685. import re
  4686. return {self.expand(first) : {'$regex' : re.escape(self.expand(second, 'string')) + '$'}}
  4687. #TODO verify full compatibilty with official oracle contains operator
  4688. def CONTAINS(self, first, second):
  4689. #There is a technical difference, but mongodb doesn't support that, but the result will be the same
  4690. #TODO contains operators need to be transformed to Regex
  4691. return {self.expand(first) : {' $regex' : ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
  4692. #
  4693. # END REDUNDANCY
  4694. #
  4695. class IMAPAdapter(NoSQLAdapter):
  4696. drivers = ('imaplib',)
  4697. """ IMAP server adapter
  4698. This class is intended as an interface with
  4699. email IMAP servers to perform simple queries in the
  4700. web2py DAL query syntax, so email read, search and
  4701. other related IMAP mail services (as those implemented
  4702. by brands like Google(r), and Yahoo!(r)
  4703. can be managed from web2py applications.
  4704. The code uses examples by Yuji Tomita on this post:
  4705. http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137
  4706. and is based in docs for Python imaplib, python email
  4707. and email IETF's (i.e. RFC2060 and RFC3501)
  4708. This adapter was tested with a small set of operations with Gmail(r). Other
  4709. services requests could raise command syntax and response data issues.
  4710. It creates its table and field names "statically",
  4711. meaning that the developer should leave the table and field
  4712. definitions to the DAL instance by calling the adapter's
  4713. .define_tables() method. The tables are defined with the
  4714. IMAP server mailbox list information.
  4715. .define_tables() returns a dictionary mapping dal tablenames
  4716. to the server mailbox names with the following structure:
  4717. {<tablename>: str <server mailbox name>}
  4718. Here is a list of supported fields:
  4719. Field Type Description
  4720. ################################################################
  4721. uid string
  4722. answered boolean Flag
  4723. created date
  4724. content list:string A list of text or html parts
  4725. to string
  4726. cc string
  4727. bcc string
  4728. size integer the amount of octets of the message*
  4729. deleted boolean Flag
  4730. draft boolean Flag
  4731. flagged boolean Flag
  4732. sender string
  4733. recent boolean Flag
  4734. seen boolean Flag
  4735. subject string
  4736. mime string The mime header declaration
  4737. email string The complete RFC822 message**
  4738. attachments list:string Each non text decoded part as string
  4739. *At the application side it is measured as the length of the RFC822
  4740. message string
  4741. WARNING: As row id's are mapped to email sequence numbers,
  4742. make sure your imap client web2py app does not delete messages
  4743. during select or update actions, to prevent
  4744. updating or deleting different messages.
  4745. Sequence numbers change whenever the mailbox is updated.
  4746. To avoid this sequence numbers issues, it is recommended the use
  4747. of uid fields in query references (although the update and delete
  4748. in separate actions rule still applies).
  4749. # This is the code recommended to start imap support
  4750. # at the app's model:
  4751. imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl
  4752. imapdb.define_tables()
  4753. Here is an (incomplete) list of possible imap commands:
  4754. # Count today's unseen messages
  4755. # smaller than 6000 octets from the
  4756. # inbox mailbox
  4757. q = imapdb.INBOX.seen == False
  4758. q &= imapdb.INBOX.created == datetime.date.today()
  4759. q &= imapdb.INBOX.size < 6000
  4760. unread = imapdb(q).count()
  4761. # Fetch last query messages
  4762. rows = imapdb(q).select()
  4763. # it is also possible to filter query select results with limitby and
  4764. # sequences of mailbox fields
  4765. set.select(<fields sequence>, limitby=(<int>, <int>))
  4766. # Mark last query messages as seen
  4767. messages = [row.uid for row in rows]
  4768. seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True)
  4769. # Delete messages in the imap database that have mails from mr. Gumby
  4770. deleted = 0
  4771. for mailbox in imapdb.tables
  4772. deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete()
  4773. # It is possible also to mark messages for deletion instead of ereasing them
  4774. # directly with set.update(deleted=True)
  4775. # This object give access
  4776. # to the adapter auto mailbox
  4777. # mapped names (which native
  4778. # mailbox has what table name)
  4779. db.mailboxes <dict> # tablename, server native name pairs
  4780. # To retrieve a table native mailbox name use:
  4781. db.<table>.mailbox
  4782. """
  4783. types = {
  4784. 'string': str,
  4785. 'text': str,
  4786. 'date': datetime.date,
  4787. 'datetime': datetime.datetime,
  4788. 'id': long,
  4789. 'boolean': bool,
  4790. 'integer': int,
  4791. 'bigint': long,
  4792. 'blob': str,
  4793. 'list:string': str,
  4794. }
  4795. dbengine = 'imap'
  4796. REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$')
  4797. def __init__(self,
  4798. db,
  4799. uri,
  4800. pool_size=0,
  4801. folder=None,
  4802. db_codec ='UTF-8',
  4803. credential_decoder=IDENTITY,
  4804. driver_args={},
  4805. adapter_args={}, do_connect=True):
  4806. # db uri: user@example.com:password@imap.server.com:123
  4807. # TODO: max size adapter argument for preventing large mail transfers
  4808. self.db = db
  4809. self.uri = uri
  4810. if do_connect: self.find_driver(adapter_args)
  4811. self.pool_size=pool_size
  4812. self.folder = folder
  4813. self.db_codec = db_codec
  4814. self.credential_decoder = credential_decoder
  4815. self.driver_args = driver_args
  4816. self.adapter_args = adapter_args
  4817. self.mailbox_size = None
  4818. self.charset = sys.getfilesystemencoding()
  4819. # imap class
  4820. self.imap4 = None
  4821. uri = uri.split("://")[1]
  4822. """ MESSAGE is an identifier for sequence number"""
  4823. self.flags = ['\\Deleted', '\\Draft', '\\Flagged',
  4824. '\\Recent', '\\Seen', '\\Answered']
  4825. self.search_fields = {
  4826. 'id': 'MESSAGE', 'created': 'DATE',
  4827. 'uid': 'UID', 'sender': 'FROM',
  4828. 'to': 'TO', 'cc': 'CC',
  4829. 'bcc': 'BCC', 'content': 'TEXT',
  4830. 'size': 'SIZE', 'deleted': '\\Deleted',
  4831. 'draft': '\\Draft', 'flagged': '\\Flagged',
  4832. 'recent': '\\Recent', 'seen': '\\Seen',
  4833. 'subject': 'SUBJECT', 'answered': '\\Answered',
  4834. 'mime': None, 'email': None,
  4835. 'attachments': None
  4836. }
  4837. db['_lastsql'] = ''
  4838. m = self.REGEX_URI.match(uri)
  4839. user = m.group('user')
  4840. password = m.group('password')
  4841. host = m.group('host')
  4842. port = int(m.group('port'))
  4843. over_ssl = False
  4844. if port==993:
  4845. over_ssl = True
  4846. driver_args.update(host=host,port=port, password=password, user=user)
  4847. def connector(driver_args=driver_args):
  4848. # it is assumed sucessful authentication alLways
  4849. # TODO: support direct connection and login tests
  4850. if over_ssl:
  4851. self.imap4 = self.driver.IMAP4_SSL
  4852. else:
  4853. self.imap4 = self.driver.IMAP4
  4854. connection = self.imap4(driver_args["host"], driver_args["port"])
  4855. data = connection.login(driver_args["user"], driver_args["password"])
  4856. # static mailbox list
  4857. connection.mailbox_names = None
  4858. # dummy cursor function
  4859. connection.cursor = lambda : True
  4860. return connection
  4861. self.db.define_tables = self.define_tables
  4862. self.connector = connector
  4863. if do_connect: self.reconnect()
  4864. def reconnect(self, f=None, cursor=True):
  4865. """
  4866. IMAP4 Pool connection method
  4867. imap connection lacks of self cursor command.
  4868. A custom command should be provided as a replacement
  4869. for connection pooling to prevent uncaught remote session
  4870. closing
  4871. """
  4872. if getattr(self,'connection',None) != None:
  4873. return
  4874. if f is None:
  4875. f = self.connector
  4876. if not self.pool_size:
  4877. self.connection = f()
  4878. self.cursor = cursor and self.connection.cursor()
  4879. else:
  4880. POOLS = ConnectionPool.POOLS
  4881. uri = self.uri
  4882. while True:
  4883. GLOBAL_LOCKER.acquire()
  4884. if not uri in POOLS:
  4885. POOLS[uri] = []
  4886. if POOLS[uri]:
  4887. self.connection = POOLS[uri].pop()
  4888. GLOBAL_LOCKER.release()
  4889. self.cursor = cursor and self.connection.cursor()
  4890. if self.cursor and self.check_active_connection:
  4891. try:
  4892. # check if connection is alive or close it
  4893. result, data = self.connection.list()
  4894. except:
  4895. # Possible connection reset error
  4896. # TODO: read exception class
  4897. self.connection = f()
  4898. break
  4899. else:
  4900. GLOBAL_LOCKER.release()
  4901. self.connection = f()
  4902. self.cursor = cursor and self.connection.cursor()
  4903. break
  4904. self.after_connection()
  4905. def get_last_message(self, tablename):
  4906. last_message = None
  4907. # request mailbox list to the server
  4908. # if needed
  4909. if not isinstance(self.connection.mailbox_names, dict):
  4910. self.get_mailboxes()
  4911. try:
  4912. result = self.connection.select(self.connection.mailbox_names[tablename])
  4913. last_message = int(result[1][0])
  4914. except (IndexError, ValueError, TypeError, KeyError), e:
  4915. LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e))
  4916. return last_message
  4917. def get_uid_bounds(self, tablename):
  4918. if not isinstance(self.connection.mailbox_names, dict):
  4919. self.get_mailboxes()
  4920. # fetch first and last messages
  4921. # return (first, last) messages uid's
  4922. last_message = self.get_last_message(tablename)
  4923. result, data = self.connection.uid("search", None, "(ALL)")
  4924. uid_list = data[0].strip().split()
  4925. if len(uid_list) <= 0:
  4926. return None
  4927. else:
  4928. return (uid_list[0], uid_list[-1])
  4929. def convert_date(self, date, add=None):
  4930. if add is None:
  4931. add = datetime.timedelta()
  4932. """ Convert a date object to a string
  4933. with d-Mon-Y style for IMAP or the inverse
  4934. case
  4935. add <timedelta> adds to the date object
  4936. """
  4937. months = [None, "Jan","Feb","Mar","Apr","May","Jun",
  4938. "Jul", "Aug","Sep","Oct","Nov","Dec"]
  4939. if isinstance(date, basestring):
  4940. # Prevent unexpected date response format
  4941. try:
  4942. dayname, datestring = date.split(",")
  4943. except (ValueError):
  4944. LOGGER.debug("Could not parse date text: %s" % date)
  4945. return None
  4946. date_list = datestring.strip().split()
  4947. year = int(date_list[2])
  4948. month = months.index(date_list[1])
  4949. day = int(date_list[0])
  4950. hms = map(int, date_list[3].split(":"))
  4951. return datetime.datetime(year, month, day,
  4952. hms[0], hms[1], hms[2]) + add
  4953. elif isinstance(date, (datetime.datetime, datetime.date)):
  4954. return (date + add).strftime("%d-%b-%Y")
  4955. else:
  4956. return None
  4957. def encode_text(self, text, charset, errors="replace"):
  4958. """ convert text for mail to unicode"""
  4959. if text is None:
  4960. text = ""
  4961. else:
  4962. if isinstance(text, str):
  4963. if charset is not None:
  4964. text = unicode(text, charset, errors)
  4965. else:
  4966. text = unicode(text, "utf-8", errors)
  4967. else:
  4968. raise Exception("Unsupported mail text type %s" % type(text))
  4969. return text.encode("utf-8")
  4970. def get_charset(self, message):
  4971. charset = message.get_content_charset()
  4972. return charset
  4973. def reset_mailboxes(self):
  4974. self.connection.mailbox_names = None
  4975. self.get_mailboxes()
  4976. def get_mailboxes(self):
  4977. """ Query the mail database for mailbox names """
  4978. mailboxes_list = self.connection.list()
  4979. self.connection.mailbox_names = dict()
  4980. mailboxes = list()
  4981. x = 0
  4982. for item in mailboxes_list[1]:
  4983. x = x + 1
  4984. item = item.strip()
  4985. if not "NOSELECT" in item.upper():
  4986. sub_items = item.split("\"")
  4987. sub_items = [sub_item for sub_item in sub_items \
  4988. if len(sub_item.strip()) > 0]
  4989. mailbox = sub_items[len(sub_items) - 1]
  4990. # remove unwanted characters and store original names
  4991. # Don't allow leading non alphabetic characters
  4992. mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox)))
  4993. mailboxes.append(mailbox_name)
  4994. self.connection.mailbox_names[mailbox_name] = mailbox
  4995. return mailboxes
  4996. def get_query_mailbox(self, query):
  4997. nofield = True
  4998. tablename = None
  4999. attr = query
  5000. while nofield:
  5001. if hasattr(attr, "first"):
  5002. attr = attr.first
  5003. if isinstance(attr, Field):
  5004. return attr.tablename
  5005. elif isinstance(attr, Query):
  5006. pass
  5007. else:
  5008. return None
  5009. else:
  5010. return None
  5011. return tablename
  5012. def is_flag(self, flag):
  5013. if self.search_fields.get(flag, None) in self.flags:
  5014. return True
  5015. else:
  5016. return False
  5017. def define_tables(self):
  5018. """
  5019. Auto create common IMAP fileds
  5020. This function creates fields definitions "statically"
  5021. meaning that custom fields as in other adapters should
  5022. not be supported and definitions handled on a service/mode
  5023. basis (local syntax for Gmail(r), Ymail(r)
  5024. Returns a dictionary with tablename, server native mailbox name
  5025. pairs.
  5026. """
  5027. if not isinstance(self.connection.mailbox_names, dict):
  5028. self.get_mailboxes()
  5029. mailboxes = self.connection.mailbox_names.keys()
  5030. for mailbox_name in mailboxes:
  5031. self.db.define_table("%s" % mailbox_name,
  5032. Field("uid", "string", writable=False),
  5033. Field("answered", "boolean"),
  5034. Field("created", "datetime", writable=False),
  5035. Field("content", "list:string", writable=False),
  5036. Field("to", "string", writable=False),
  5037. Field("cc", "string", writable=False),
  5038. Field("bcc", "string", writable=False),
  5039. Field("size", "integer", writable=False),
  5040. Field("deleted", "boolean"),
  5041. Field("draft", "boolean"),
  5042. Field("flagged", "boolean"),
  5043. Field("sender", "string", writable=False),
  5044. Field("recent", "boolean", writable=False),
  5045. Field("seen", "boolean"),
  5046. Field("subject", "string", writable=False),
  5047. Field("mime", "string", writable=False),
  5048. Field("email", "string", writable=False, readable=False),
  5049. Field("attachments", "list:string", writable=False, readable=False),
  5050. )
  5051. # Set a special _mailbox attribute for storing
  5052. # native mailbox names
  5053. self.db[mailbox_name].mailbox = \
  5054. self.connection.mailbox_names[mailbox_name]
  5055. # Set the db instance mailbox collections
  5056. self.db.mailboxes = self.connection.mailbox_names
  5057. return self.db.mailboxes
  5058. def create_table(self, *args, **kwargs):
  5059. # not implemented
  5060. LOGGER.debug("Create table feature is not implemented for %s" % type(self))
  5061. def _select(self,query,fields,attributes):
  5062. """ Search and Fetch records and return web2py
  5063. rows
  5064. """
  5065. if use_common_filters(query):
  5066. query = self.common_filter(query, [self.get_query_mailbox(query),])
  5067. # move this statement elsewhere (upper-level)
  5068. import email
  5069. import email.header
  5070. decode_header = email.header.decode_header
  5071. # get records from imap server with search + fetch
  5072. # convert results to a dictionary
  5073. tablename = None
  5074. fetch_results = list()
  5075. if isinstance(query, (Expression, Query)):
  5076. tablename = self.get_table(query)
  5077. mailbox = self.connection.mailbox_names.get(tablename, None)
  5078. if isinstance(query, Expression):
  5079. pass
  5080. elif isinstance(query, Query):
  5081. if mailbox is not None:
  5082. # select with readonly
  5083. selected = self.connection.select(mailbox, True)
  5084. self.mailbox_size = int(selected[1][0])
  5085. search_query = "(%s)" % str(query).strip()
  5086. search_result = self.connection.uid("search", None, search_query)
  5087. # Normal IMAP response OK is assumed (change this)
  5088. if search_result[0] == "OK":
  5089. # For "light" remote server responses just get the first
  5090. # ten records (change for non-experimental implementation)
  5091. # However, light responses are not guaranteed with this
  5092. # approach, just fewer messages.
  5093. # TODO: change limitby single to 2-tuple argument
  5094. limitby = attributes.get('limitby', None)
  5095. messages_set = search_result[1][0].split()
  5096. # descending order
  5097. messages_set.reverse()
  5098. if limitby is not None:
  5099. # TODO: asc/desc attributes
  5100. messages_set = messages_set[int(limitby[0]):int(limitby[1])]
  5101. # Partial fetches are not used since the email
  5102. # library does not seem to support it (it converts
  5103. # partial messages to mangled message instances)
  5104. imap_fields = "(RFC822)"
  5105. if len(messages_set) > 0:
  5106. # create fetch results object list
  5107. # fetch each remote message and store it in memmory
  5108. # (change to multi-fetch command syntax for faster
  5109. # transactions)
  5110. for uid in messages_set:
  5111. # fetch the RFC822 message body
  5112. typ, data = self.connection.uid("fetch", uid, imap_fields)
  5113. if typ == "OK":
  5114. fr = {"message": int(data[0][0].split()[0]),
  5115. "uid": int(uid),
  5116. "email": email.message_from_string(data[0][1]),
  5117. "raw_message": data[0][1]
  5118. }
  5119. fr["multipart"] = fr["email"].is_multipart()
  5120. # fetch flags for the message
  5121. ftyp, fdata = self.connection.uid("fetch", uid, "(FLAGS)")
  5122. if ftyp == "OK":
  5123. fr["flags"] = self.driver.ParseFlags(fdata[0])
  5124. fetch_results.append(fr)
  5125. else:
  5126. # error retrieving the flags for this message
  5127. pass
  5128. else:
  5129. # error retrieving the message body
  5130. pass
  5131. elif isinstance(query, basestring):
  5132. # not implemented
  5133. pass
  5134. else:
  5135. pass
  5136. imapqry_dict = {}
  5137. imapfields_dict = {}
  5138. if len(fields) == 1 and isinstance(fields[0], SQLALL):
  5139. allfields = True
  5140. elif len(fields) == 0:
  5141. allfields = True
  5142. else:
  5143. allfields = False
  5144. if allfields:
  5145. fieldnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()]
  5146. else:
  5147. fieldnames = ["%s.%s" % (tablename, field.name) for field in fields]
  5148. for k in fieldnames:
  5149. imapfields_dict[k] = k
  5150. imapqry_list = list()
  5151. imapqry_array = list()
  5152. for fr in fetch_results:
  5153. attachments = []
  5154. content = []
  5155. size = 0
  5156. n = int(fr["message"])
  5157. item_dict = dict()
  5158. message = fr["email"]
  5159. uid = fr["uid"]
  5160. charset = self.get_charset(message)
  5161. flags = fr["flags"]
  5162. raw_message = fr["raw_message"]
  5163. # Return messages data mapping static fields
  5164. # and fetched results. Mapping should be made
  5165. # outside the select function (with auxiliary
  5166. # instance methods)
  5167. # pending: search flags states trough the email message
  5168. # instances for correct output
  5169. if "%s.id" % tablename in fieldnames:
  5170. item_dict["%s.id" % tablename] = n
  5171. if "%s.created" % tablename in fieldnames:
  5172. item_dict["%s.created" % tablename] = self.convert_date(message["Date"])
  5173. if "%s.uid" % tablename in fieldnames:
  5174. item_dict["%s.uid" % tablename] = uid
  5175. if "%s.sender" % tablename in fieldnames:
  5176. # If there is no encoding found in the message header
  5177. # force utf-8 replacing characters (change this to
  5178. # module's defaults). Applies to .sender, .to, .cc and .bcc fields
  5179. #############################################################################
  5180. # TODO: External function to manage encoding and decoding of message strings
  5181. #############################################################################
  5182. item_dict["%s.sender" % tablename] = self.encode_text(message["From"], charset)
  5183. if "%s.to" % tablename in fieldnames:
  5184. item_dict["%s.to" % tablename] = self.encode_text(message["To"], charset)
  5185. if "%s.cc" % tablename in fieldnames:
  5186. if "Cc" in message.keys():
  5187. item_dict["%s.cc" % tablename] = self.encode_text(message["Cc"], charset)
  5188. else:
  5189. item_dict["%s.cc" % tablename] = ""
  5190. if "%s.bcc" % tablename in fieldnames:
  5191. if "Bcc" in message.keys():
  5192. item_dict["%s.bcc" % tablename] = self.encode_text(message["Bcc"], charset)
  5193. else:
  5194. item_dict["%s.bcc" % tablename] = ""
  5195. if "%s.deleted" % tablename in fieldnames:
  5196. item_dict["%s.deleted" % tablename] = "\\Deleted" in flags
  5197. if "%s.draft" % tablename in fieldnames:
  5198. item_dict["%s.draft" % tablename] = "\\Draft" in flags
  5199. if "%s.flagged" % tablename in fieldnames:
  5200. item_dict["%s.flagged" % tablename] = "\\Flagged" in flags
  5201. if "%s.recent" % tablename in fieldnames:
  5202. item_dict["%s.recent" % tablename] = "\\Recent" in flags
  5203. if "%s.seen" % tablename in fieldnames:
  5204. item_dict["%s.seen" % tablename] = "\\Seen" in flags
  5205. if "%s.subject" % tablename in fieldnames:
  5206. subject = message["Subject"]
  5207. decoded_subject = decode_header(subject)
  5208. text = decoded_subject[0][0]
  5209. encoding = decoded_subject[0][1]
  5210. if encoding in (None, ""):
  5211. encoding = charset
  5212. item_dict["%s.subject" % tablename] = self.encode_text(text, encoding)
  5213. if "%s.answered" % tablename in fieldnames:
  5214. item_dict["%s.answered" % tablename] = "\\Answered" in flags
  5215. if "%s.mime" % tablename in fieldnames:
  5216. item_dict["%s.mime" % tablename] = message.get_content_type()
  5217. # Here goes the whole RFC822 body as an email instance
  5218. # for controller side custom processing
  5219. # The message is stored as a raw string
  5220. # >> email.message_from_string(raw string)
  5221. # returns a Message object for enhanced object processing
  5222. if "%s.email" % tablename in fieldnames:
  5223. item_dict["%s.email" % tablename] = self.encode_text(raw_message, charset)
  5224. # Size measure as suggested in a Velocity Reviews post
  5225. # by Tim Williams: "how to get size of email attachment"
  5226. # Note: len() and server RFC822.SIZE reports doesn't match
  5227. # To retrieve the server size for representation would add a new
  5228. # fetch transaction to the process
  5229. for part in message.walk():
  5230. if "%s.attachments" % tablename in fieldnames:
  5231. if not "text" in part.get_content_maintype():
  5232. attachments.append(part.get_payload(decode=True))
  5233. if "%s.content" % tablename in fieldnames:
  5234. if "text" in part.get_content_maintype():
  5235. payload = self.encode_text(part.get_payload(decode=True), charset)
  5236. content.append(payload)
  5237. if "%s.size" % tablename in fieldnames:
  5238. if part is not None:
  5239. size += len(str(part))
  5240. item_dict["%s.content" % tablename] = bar_encode(content)
  5241. item_dict["%s.attachments" % tablename] = bar_encode(attachments)
  5242. item_dict["%s.size" % tablename] = size
  5243. imapqry_list.append(item_dict)
  5244. # extra object mapping for the sake of rows object
  5245. # creation (sends an array or lists)
  5246. for item_dict in imapqry_list:
  5247. imapqry_array_item = list()
  5248. for fieldname in fieldnames:
  5249. imapqry_array_item.append(item_dict[fieldname])
  5250. imapqry_array.append(imapqry_array_item)
  5251. return tablename, imapqry_array, fieldnames
  5252. def select(self,query,fields,attributes):
  5253. tablename, imapqry_array , fieldnames = self._select(query,fields,attributes)
  5254. # parse result and return a rows object
  5255. colnames = fieldnames
  5256. processor = attributes.get('processor',self.parse)
  5257. return processor(imapqry_array, fields, colnames)
  5258. def update(self, tablename, query, fields):
  5259. if use_common_filters(query):
  5260. query = self.common_filter(query, [tablename,])
  5261. mark = []
  5262. unmark = []
  5263. rowcount = 0
  5264. query = str(query)
  5265. if query:
  5266. for item in fields:
  5267. field = item[0]
  5268. name = field.name
  5269. value = item[1]
  5270. if self.is_flag(name):
  5271. flag = self.search_fields[name]
  5272. if (value is not None) and (flag != "\\Recent"):
  5273. if value:
  5274. mark.append(flag)
  5275. else:
  5276. unmark.append(flag)
  5277. result, data = self.connection.select(
  5278. self.connection.mailbox_names[tablename])
  5279. string_query = "(%s)" % query
  5280. result, data = self.connection.search(None, string_query)
  5281. store_list = [item.strip() for item in data[0].split()
  5282. if item.strip().isdigit()]
  5283. # change marked flags
  5284. for number in store_list:
  5285. result = None
  5286. if len(mark) > 0:
  5287. result, data = self.connection.store(
  5288. number, "+FLAGS", "(%s)" % " ".join(mark))
  5289. if len(unmark) > 0:
  5290. result, data = self.connection.store(
  5291. number, "-FLAGS", "(%s)" % " ".join(unmark))
  5292. if result == "OK":
  5293. rowcount += 1
  5294. return rowcount
  5295. def count(self,query,distinct=None):
  5296. counter = 0
  5297. tablename = self.get_query_mailbox(query)
  5298. if query and tablename is not None:
  5299. if use_common_filters(query):
  5300. query = self.common_filter(query, [tablename,])
  5301. result, data = self.connection.select(self.connection.mailbox_names[tablename])
  5302. string_query = "(%s)" % query
  5303. result, data = self.connection.search(None, string_query)
  5304. store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
  5305. counter = len(store_list)
  5306. return counter
  5307. def delete(self, tablename, query):
  5308. counter = 0
  5309. if query:
  5310. if use_common_filters(query):
  5311. query = self.common_filter(query, [tablename,])
  5312. result, data = self.connection.select(self.connection.mailbox_names[tablename])
  5313. string_query = "(%s)" % query
  5314. result, data = self.connection.search(None, string_query)
  5315. store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
  5316. for number in store_list:
  5317. result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)")
  5318. if result == "OK":
  5319. counter += 1
  5320. if counter > 0:
  5321. result, data = self.connection.expunge()
  5322. return counter
  5323. def BELONGS(self, first, second):
  5324. result = None
  5325. name = self.search_fields[first.name]
  5326. if name == "MESSAGE":
  5327. values = [str(val) for val in second if str(val).isdigit()]
  5328. result = "%s" % ",".join(values).strip()
  5329. elif name == "UID":
  5330. values = [str(val) for val in second if str(val).isdigit()]
  5331. result = "UID %s" % ",".join(values).strip()
  5332. else:
  5333. raise Exception("Operation not supported")
  5334. # result = "(%s %s)" % (self.expand(first), self.expand(second))
  5335. return result
  5336. def CONTAINS(self, first, second):
  5337. result = None
  5338. name = self.search_fields[first.name]
  5339. if name in ("FROM", "TO", "SUBJECT", "TEXT"):
  5340. result = "%s \"%s\"" % (name, self.expand(second))
  5341. else:
  5342. if first.name in ("cc", "bcc"):
  5343. result = "%s \"%s\"" % (first.name.upper(), self.expand(second))
  5344. elif first.name == "mime":
  5345. result = "HEADER Content-Type \"%s\"" % self.expand(second)
  5346. else:
  5347. raise Exception("Operation not supported")
  5348. return result
  5349. def GT(self, first, second):
  5350. result = None
  5351. name = self.search_fields[first.name]
  5352. if name == "MESSAGE":
  5353. last_message = self.get_last_message(first.tablename)
  5354. result = "%d:%d" % (int(self.expand(second)) + 1, last_message)
  5355. elif name == "UID":
  5356. # GT and LT may not return
  5357. # expected sets depending on
  5358. # the uid format implemented
  5359. try:
  5360. pedestal, threshold = self.get_uid_bounds(first.tablename)
  5361. except TypeError, e:
  5362. LOGGER.debug("Error requesting uid bounds: %s", str(e))
  5363. return ""
  5364. try:
  5365. lower_limit = int(self.expand(second)) + 1
  5366. except (ValueError, TypeError), e:
  5367. raise Exception("Operation not supported (non integer UID)")
  5368. result = "UID %s:%s" % (lower_limit, threshold)
  5369. elif name == "DATE":
  5370. result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1))
  5371. elif name == "SIZE":
  5372. result = "LARGER %s" % self.expand(second)
  5373. else:
  5374. raise Exception("Operation not supported")
  5375. return result
  5376. def GE(self, first, second):
  5377. result = None
  5378. name = self.search_fields[first.name]
  5379. if name == "MESSAGE":
  5380. last_message = self.get_last_message(first.tablename)
  5381. result = "%s:%s" % (self.expand(second), last_message)
  5382. elif name == "UID":
  5383. # GT and LT may not return
  5384. # expected sets depending on
  5385. # the uid format implemented
  5386. try:
  5387. pedestal, threshold = self.get_uid_bounds(first.tablename)
  5388. except TypeError, e:
  5389. LOGGER.debug("Error requesting uid bounds: %s", str(e))
  5390. return ""
  5391. lower_limit = self.expand(second)
  5392. result = "UID %s:%s" % (lower_limit, threshold)
  5393. elif name == "DATE":
  5394. result = "SINCE %s" % self.convert_date(second)
  5395. else:
  5396. raise Exception("Operation not supported")
  5397. return result
  5398. def LT(self, first, second):
  5399. result = None
  5400. name = self.search_fields[first.name]
  5401. if name == "MESSAGE":
  5402. result = "%s:%s" % (1, int(self.expand(second)) - 1)
  5403. elif name == "UID":
  5404. try:
  5405. pedestal, threshold = self.get_uid_bounds(first.tablename)
  5406. except TypeError, e:
  5407. LOGGER.debug("Error requesting uid bounds: %s", str(e))
  5408. return ""
  5409. try:
  5410. upper_limit = int(self.expand(second)) - 1
  5411. except (ValueError, TypeError), e:
  5412. raise Exception("Operation not supported (non integer UID)")
  5413. result = "UID %s:%s" % (pedestal, upper_limit)
  5414. elif name == "DATE":
  5415. result = "BEFORE %s" % self.convert_date(second)
  5416. elif name == "SIZE":
  5417. result = "SMALLER %s" % self.expand(second)
  5418. else:
  5419. raise Exception("Operation not supported")
  5420. return result
  5421. def LE(self, first, second):
  5422. result = None
  5423. name = self.search_fields[first.name]
  5424. if name == "MESSAGE":
  5425. result = "%s:%s" % (1, self.expand(second))
  5426. elif name == "UID":
  5427. try:
  5428. pedestal, threshold = self.get_uid_bounds(first.tablename)
  5429. except TypeError, e:
  5430. LOGGER.debug("Error requesting uid bounds: %s", str(e))
  5431. return ""
  5432. upper_limit = int(self.expand(second))
  5433. result = "UID %s:%s" % (pedestal, upper_limit)
  5434. elif name == "DATE":
  5435. result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1))
  5436. else:
  5437. raise Exception("Operation not supported")
  5438. return result
  5439. def NE(self, first, second=None):
  5440. if (second is None) and isinstance(first, Field):
  5441. # All records special table query
  5442. if first.type == "id":
  5443. return self.GE(first, 1)
  5444. result = self.NOT(self.EQ(first, second))
  5445. result = result.replace("NOT NOT", "").strip()
  5446. return result
  5447. def EQ(self,first,second):
  5448. name = self.search_fields[first.name]
  5449. result = None
  5450. if name is not None:
  5451. if name == "MESSAGE":
  5452. # query by message sequence number
  5453. result = "%s" % self.expand(second)
  5454. elif name == "UID":
  5455. result = "UID %s" % self.expand(second)
  5456. elif name == "DATE":
  5457. result = "ON %s" % self.convert_date(second)
  5458. elif name in self.flags:
  5459. if second:
  5460. result = "%s" % (name.upper()[1:])
  5461. else:
  5462. result = "NOT %s" % (name.upper()[1:])
  5463. else:
  5464. raise Exception("Operation not supported")
  5465. else:
  5466. raise Exception("Operation not supported")
  5467. return result
  5468. def AND(self, first, second):
  5469. result = "%s %s" % (self.expand(first), self.expand(second))
  5470. return result
  5471. def OR(self, first, second):
  5472. result = "OR %s %s" % (self.expand(first), self.expand(second))
  5473. return "%s" % result.replace("OR OR", "OR")
  5474. def NOT(self, first):
  5475. result = "NOT %s" % self.expand(first)
  5476. return result
  5477. ########################################################################
  5478. # end of adapters
  5479. ########################################################################
  5480. ADAPTERS = {
  5481. 'sqlite': SQLiteAdapter,
  5482. 'spatialite': SpatiaLiteAdapter,
  5483. 'sqlite:memory': SQLiteAdapter,
  5484. 'spatialite:memory': SpatiaLiteAdapter,
  5485. 'mysql': MySQLAdapter,
  5486. 'postgres': PostgreSQLAdapter,
  5487. 'postgres:psycopg2': PostgreSQLAdapter,
  5488. 'postgres:pg8000': PostgreSQLAdapter,
  5489. 'postgres2:psycopg2': NewPostgreSQLAdapter,
  5490. 'postgres2:pg8000': NewPostgreSQLAdapter,
  5491. 'oracle': OracleAdapter,
  5492. 'mssql': MSSQLAdapter,
  5493. 'mssql2': MSSQL2Adapter,
  5494. 'sybase': SybaseAdapter,
  5495. 'db2': DB2Adapter,
  5496. 'teradata': TeradataAdapter,
  5497. 'informix': InformixAdapter,
  5498. 'firebird': FireBirdAdapter,
  5499. 'firebird_embedded': FireBirdAdapter,
  5500. 'ingres': IngresAdapter,
  5501. 'ingresu': IngresUnicodeAdapter,
  5502. 'sapdb': SAPDBAdapter,
  5503. 'cubrid': CubridAdapter,
  5504. 'jdbc:sqlite': JDBCSQLiteAdapter,
  5505. 'jdbc:sqlite:memory': JDBCSQLiteAdapter,
  5506. 'jdbc:postgres': JDBCPostgreSQLAdapter,
  5507. 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility
  5508. 'google:datastore': GoogleDatastoreAdapter,
  5509. 'google:sql': GoogleSQLAdapter,
  5510. 'couchdb': CouchDBAdapter,
  5511. 'mongodb': MongoDBAdapter,
  5512. 'imap': IMAPAdapter
  5513. }
  5514. def sqlhtml_validators(field):
  5515. """
  5516. Field type validation, using web2py's validators mechanism.
  5517. makes sure the content of a field is in line with the declared
  5518. fieldtype
  5519. """
  5520. db = field.db
  5521. if not have_validators:
  5522. return []
  5523. field_type, field_length = field.type, field.length
  5524. if isinstance(field_type, SQLCustomType):
  5525. if hasattr(field_type, 'validator'):
  5526. return field_type.validator
  5527. else:
  5528. field_type = field_type.type
  5529. elif not isinstance(field_type,str):
  5530. return []
  5531. requires=[]
  5532. def ff(r,id):
  5533. row=r(id)
  5534. if not row:
  5535. return id
  5536. elif hasattr(r, '_format') and isinstance(r._format,str):
  5537. return r._format % row
  5538. elif hasattr(r, '_format') and callable(r._format):
  5539. return r._format(row)
  5540. else:
  5541. return id
  5542. if field_type == 'string':
  5543. requires.append(validators.IS_LENGTH(field_length))
  5544. elif field_type == 'text':
  5545. requires.append(validators.IS_LENGTH(field_length))
  5546. elif field_type == 'password':
  5547. requires.append(validators.IS_LENGTH(field_length))
  5548. elif field_type == 'double' or field_type == 'float':
  5549. requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))
  5550. elif field_type in ('integer','bigint'):
  5551. requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100))
  5552. elif field_type.startswith('decimal'):
  5553. requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))
  5554. elif field_type == 'date':
  5555. requires.append(validators.IS_DATE())
  5556. elif field_type == 'time':
  5557. requires.append(validators.IS_TIME())
  5558. elif field_type == 'datetime':
  5559. requires.append(validators.IS_DATETIME())
  5560. elif db and field_type.startswith('reference') and \
  5561. field_type.find('.') < 0 and \
  5562. field_type[10:] in db.tables:
  5563. referenced = db[field_type[10:]]
  5564. def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id)
  5565. field.represent = field.represent or repr_ref
  5566. if hasattr(referenced, '_format') and referenced._format:
  5567. requires = validators.IS_IN_DB(db,referenced._id,
  5568. referenced._format)
  5569. if field.unique:
  5570. requires._and = validators.IS_NOT_IN_DB(db,field)
  5571. if field.tablename == field_type[10:]:
  5572. return validators.IS_EMPTY_OR(requires)
  5573. return requires
  5574. elif db and field_type.startswith('list:reference') and \
  5575. field_type.find('.') < 0 and \
  5576. field_type[15:] in db.tables:
  5577. referenced = db[field_type[15:]]
  5578. def list_ref_repr(ids, row=None, r=referenced, f=ff):
  5579. if not ids:
  5580. return None
  5581. refs = None
  5582. db, id = r._db, r._id
  5583. if isinstance(db._adapter, GoogleDatastoreAdapter):
  5584. def count(values): return db(id.belongs(values)).select(id)
  5585. rx = range(0, len(ids), 30)
  5586. refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx])
  5587. else:
  5588. refs = db(id.belongs(ids)).select(id)
  5589. return (refs and ', '.join(str(f(r,x.id)) for x in refs) or '')
  5590. field.represent = field.represent or list_ref_repr
  5591. if hasattr(referenced, '_format') and referenced._format:
  5592. requires = validators.IS_IN_DB(db,referenced._id,
  5593. referenced._format,multiple=True)
  5594. else:
  5595. requires = validators.IS_IN_DB(db,referenced._id,
  5596. multiple=True)
  5597. if field.unique:
  5598. requires._and = validators.IS_NOT_IN_DB(db,field)
  5599. return requires
  5600. elif field_type.startswith('list:'):
  5601. def repr_list(values,row=None): return', '.join(str(v) for v in (values or []))
  5602. field.represent = field.represent or repr_list
  5603. if field.unique:
  5604. requires.insert(0,validators.IS_NOT_IN_DB(db,field))
  5605. sff = ['in', 'do', 'da', 'ti', 'de', 'bo']
  5606. if field.notnull and not field_type[:2] in sff:
  5607. requires.insert(0, validators.IS_NOT_EMPTY())
  5608. elif not field.notnull and field_type[:2] in sff and requires:
  5609. requires[-1] = validators.IS_EMPTY_OR(requires[-1])
  5610. return requires
  5611. def bar_escape(item):
  5612. return str(item).replace('|', '||')
  5613. def bar_encode(items):
  5614. return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
  5615. def bar_decode_integer(value):
  5616. if not hasattr(value,'split') and hasattr(value,'read'):
  5617. value = value.read()
  5618. return [int(x) for x in value.split('|') if x.strip()]
  5619. def bar_decode_string(value):
  5620. return [x.replace('||', '|') for x in
  5621. REGEX_UNPACK.split(value[1:-1]) if x.strip()]
  5622. class Row(object):
  5623. """
  5624. a dictionary that lets you do d['a'] as well as d.a
  5625. this is only used to store a Row
  5626. """
  5627. def __init__(self,*args,**kwargs):
  5628. self.__dict__.update(*args,**kwargs)
  5629. def __getitem__(self, key):
  5630. key=str(key)
  5631. m = REGEX_TABLE_DOT_FIELD.match(key)
  5632. if key in self.get('_extra',{}):
  5633. return self._extra[key]
  5634. elif m:
  5635. try:
  5636. return ogetattr(self, m.group(1))[m.group(2)]
  5637. except (KeyError,AttributeError,TypeError):
  5638. key = m.group(2)
  5639. return ogetattr(self, key)
  5640. def __setitem__(self, key, value):
  5641. setattr(self, str(key), value)
  5642. __call__ = __getitem__
  5643. def get(self,key,default=None):
  5644. return self.__dict__.get(key,default)
  5645. def __contains__(self,key):
  5646. return key in self.__dict__
  5647. has_key = __contains__
  5648. def __nonzero__(self):
  5649. return len(self.__dict__)>0
  5650. def update(self, *args, **kwargs):
  5651. self.__dict__.update(*args, **kwargs)
  5652. def keys(self):
  5653. return self.__dict__.keys()
  5654. def items(self):
  5655. return self.__dict__.items()
  5656. def values(self):
  5657. return self.__dict__.values()
  5658. def __iter__(self):
  5659. return self.__dict__.__iter__()
  5660. def iteritems(self):
  5661. return self.__dict__.iteritems()
  5662. def __str__(self):
  5663. ### this could be made smarter
  5664. return '<Row %s>' % self.as_dict()
  5665. def __repr__(self):
  5666. return '<Row %s>' % self.as_dict()
  5667. def __int__(self):
  5668. return object.__getattribute__(self,'id')
  5669. def __eq__(self,other):
  5670. try:
  5671. return self.as_dict() == other.as_dict()
  5672. except AttributeError:
  5673. return False
  5674. def __ne__(self,other):
  5675. return not (self == other)
  5676. def __copy__(self):
  5677. return Row(dict(self))
  5678. def as_dict(self, datetime_to_str=False, custom_types=None):
  5679. SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict]
  5680. if isinstance(custom_types,(list,tuple,set)):
  5681. SERIALIZABLE_TYPES += custom_types
  5682. elif custom_types:
  5683. SERIALIZABLE_TYPES.append(custom_types)
  5684. d = dict(self)
  5685. for k in copy.copy(d.keys()):
  5686. v=d[k]
  5687. if d[k] is None:
  5688. continue
  5689. elif isinstance(v,Row):
  5690. d[k]=v.as_dict()
  5691. elif isinstance(v,Reference):
  5692. d[k]=int(v)
  5693. elif isinstance(v,decimal.Decimal):
  5694. d[k]=float(v)
  5695. elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)):
  5696. if datetime_to_str:
  5697. d[k] = v.isoformat().replace('T',' ')[:19]
  5698. elif not isinstance(v,tuple(SERIALIZABLE_TYPES)):
  5699. del d[k]
  5700. return d
  5701. ################################################################################
  5702. # Everything below should be independent of the specifics of the database
  5703. # and should work for RDBMs and some NoSQL databases
  5704. ################################################################################
  5705. class SQLCallableList(list):
  5706. def __call__(self):
  5707. return copy.copy(self)
  5708. def smart_query(fields,text):
  5709. if not isinstance(fields,(list,tuple)):
  5710. fields = [fields]
  5711. new_fields = []
  5712. for field in fields:
  5713. if isinstance(field,Field):
  5714. new_fields.append(field)
  5715. elif isinstance(field,Table):
  5716. for ofield in field:
  5717. new_fields.append(ofield)
  5718. else:
  5719. raise RuntimeError, "fields must be a list of fields"
  5720. fields = new_fields
  5721. field_map = {}
  5722. for field in fields:
  5723. n = field.name.lower()
  5724. if not n in field_map:
  5725. field_map[n] = field
  5726. n = str(field).lower()
  5727. if not n in field_map:
  5728. field_map[n] = field
  5729. constants = {}
  5730. i = 0
  5731. while True:
  5732. m = REGEX_CONST_STRING.search(text)
  5733. if not m: break
  5734. text = text[:m.start()]+('#%i' % i)+text[m.end():]
  5735. constants[str(i)] = m.group()[1:-1]
  5736. i+=1
  5737. text = re.sub('\s+',' ',text).lower()
  5738. for a,b in [('&','and'),
  5739. ('|','or'),
  5740. ('~','not'),
  5741. ('==','='),
  5742. ('<','<'),
  5743. ('>','>'),
  5744. ('<=','<='),
  5745. ('>=','>='),
  5746. ('<>','!='),
  5747. ('=<','<='),
  5748. ('=>','>='),
  5749. ('=','='),
  5750. (' less or equal than ','<='),
  5751. (' greater or equal than ','>='),
  5752. (' equal or less than ','<='),
  5753. (' equal or greater than ','>='),
  5754. (' less or equal ','<='),
  5755. (' greater or equal ','>='),
  5756. (' equal or less ','<='),
  5757. (' equal or greater ','>='),
  5758. (' not equal to ','!='),
  5759. (' not equal ','!='),
  5760. (' equal to ','='),
  5761. (' equal ','='),
  5762. (' equals ','!='),
  5763. (' less than ','<'),
  5764. (' greater than ','>'),
  5765. (' starts with ','startswith'),
  5766. (' ends with ','endswith'),
  5767. (' is ','=')]:
  5768. if a[0]==' ':
  5769. text = text.replace(' is'+a,' %s ' % b)
  5770. text = text.replace(a,' %s ' % b)
  5771. text = re.sub('\s+',' ',text).lower()
  5772. text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text)
  5773. query = field = neg = op = logic = None
  5774. for item in text.split():
  5775. if field is None:
  5776. if item == 'not':
  5777. neg = True
  5778. elif not neg and not logic and item in ('and','or'):
  5779. logic = item
  5780. elif item in field_map:
  5781. field = field_map[item]
  5782. else:
  5783. raise RuntimeError, "Invalid syntax"
  5784. elif not field is None and op is None:
  5785. op = item
  5786. elif not op is None:
  5787. if item.startswith('#'):
  5788. if not item[1:] in constants:
  5789. raise RuntimeError, "Invalid syntax"
  5790. value = constants[item[1:]]
  5791. else:
  5792. value = item
  5793. if field.type in ('text','string'):
  5794. if op == '=': op = 'like'
  5795. if op == '=': new_query = field==value
  5796. elif op == '<': new_query = field<value
  5797. elif op == '>': new_query = field>value
  5798. elif op == '<=': new_query = field<=value
  5799. elif op == '>=': new_query = field>=value
  5800. elif op == '!=': new_query = field!=value
  5801. elif field.type in ('text','string'):
  5802. if op == 'contains': new_query = field.contains(value)
  5803. elif op == 'like': new_query = field.like(value)
  5804. elif op == 'startswith': new_query = field.startswith(value)
  5805. elif op == 'endswith': new_query = field.endswith(value)
  5806. else: raise RuntimeError, "Invalid operation"
  5807. elif field._db._adapter.dbengine=='google:datastore' and \
  5808. field.type in ('list:integer', 'list:string', 'list:reference'):
  5809. if op == 'contains': new_query = field.contains(value)
  5810. else: raise RuntimeError, "Invalid operation"
  5811. else: raise RuntimeError, "Invalid operation"
  5812. if neg: new_query = ~new_query
  5813. if query is None:
  5814. query = new_query
  5815. elif logic == 'and':
  5816. query &= new_query
  5817. elif logic == 'or':
  5818. query |= new_query
  5819. field = op = neg = logic = None
  5820. return query
  5821. class DAL(object):
  5822. """
  5823. an instance of this class represents a database connection
  5824. Example::
  5825. db = DAL('sqlite://test.db')
  5826. db.define_table('tablename', Field('fieldname1'),
  5827. Field('fieldname2'))
  5828. """
  5829. def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
  5830. if not hasattr(THREAD_LOCAL,'db_instances'):
  5831. THREAD_LOCAL.db_instances = {}
  5832. if not hasattr(THREAD_LOCAL,'db_instances_zombie'):
  5833. THREAD_LOCAL.db_instances_zombie = {}
  5834. if uri == '<zombie>':
  5835. db_uid = kwargs['db_uid'] # a zombie must have a db_uid!
  5836. if db_uid in THREAD_LOCAL.db_instances:
  5837. db_group = THREAD_LOCAL.db_instances[db_uid]
  5838. db = db_group[-1]
  5839. elif db_uid in THREAD_LOCAL.db_instances_zombie:
  5840. db = THREAD_LOCAL.db_instances_zombie[db_uid]
  5841. else:
  5842. db = super(DAL, cls).__new__(cls)
  5843. THREAD_LOCAL.db_instances_zombie[db_uid] = db
  5844. else:
  5845. db_uid = kwargs.get('db_uid',hashlib.md5(repr(uri)).hexdigest())
  5846. if db_uid in THREAD_LOCAL.db_instances_zombie:
  5847. db = THREAD_LOCAL.db_instances_zombie[db_uid]
  5848. del THREAD_LOCAL.db_instances_zombie[db_uid]
  5849. else:
  5850. db = super(DAL, cls).__new__(cls)
  5851. db_group = THREAD_LOCAL.db_instances.get(db_uid,[])
  5852. db_group.append(db)
  5853. THREAD_LOCAL.db_instances[db_uid] = db_group
  5854. db._db_uid = db_uid
  5855. return db
  5856. @staticmethod
  5857. def set_folder(folder):
  5858. """
  5859. # ## this allows gluon to set a folder for this thread
  5860. # ## <<<<<<<<< Should go away as new DAL replaces old sql.py
  5861. """
  5862. BaseAdapter.set_folder(folder)
  5863. @staticmethod
  5864. def distributed_transaction_begin(*instances):
  5865. if not instances:
  5866. return
  5867. thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
  5868. keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
  5869. instances = enumerate(instances)
  5870. for (i, db) in instances:
  5871. if not db._adapter.support_distributed_transaction():
  5872. raise SyntaxError, \
  5873. 'distributed transaction not suported by %s' % db._dbname
  5874. for (i, db) in instances:
  5875. db._adapter.distributed_transaction_begin(keys[i])
  5876. @staticmethod
  5877. def distributed_transaction_commit(*instances):
  5878. if not instances:
  5879. return
  5880. instances = enumerate(instances)
  5881. thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
  5882. keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
  5883. for (i, db) in instances:
  5884. if not db._adapter.support_distributed_transaction():
  5885. raise SyntaxError, \
  5886. 'distributed transaction not suported by %s' % db._dbanme
  5887. try:
  5888. for (i, db) in instances:
  5889. db._adapter.prepare(keys[i])
  5890. except:
  5891. for (i, db) in instances:
  5892. db._adapter.rollback_prepared(keys[i])
  5893. raise RuntimeError, 'failure to commit distributed transaction'
  5894. else:
  5895. for (i, db) in instances:
  5896. db._adapter.commit_prepared(keys[i])
  5897. return
  5898. def __init__(self, uri='sqlite://dummy.db',
  5899. pool_size=0, folder=None,
  5900. db_codec='UTF-8', check_reserved=None,
  5901. migrate=True, fake_migrate=False,
  5902. migrate_enabled=True, fake_migrate_all=False,
  5903. decode_credentials=False, driver_args=None,
  5904. adapter_args=None, attempts=5, auto_import=False,
  5905. bigint_id=False,debug=False,lazy_tables=False,
  5906. db_uid=None, do_connect=True):
  5907. """
  5908. Creates a new Database Abstraction Layer instance.
  5909. Keyword arguments:
  5910. :uri: string that contains information for connecting to a database.
  5911. (default: 'sqlite://dummy.db')
  5912. :pool_size: How many open connections to make to the database object.
  5913. :folder: <please update me>
  5914. :db_codec: string encoding of the database (default: 'UTF-8')
  5915. :check_reserved: list of adapters to check tablenames and column names
  5916. against sql reserved keywords. (Default None)
  5917. * 'common' List of sql keywords that are common to all database types
  5918. such as "SELECT, INSERT". (recommended)
  5919. * 'all' Checks against all known SQL keywords. (not recommended)
  5920. <adaptername> Checks against the specific adapters list of keywords
  5921. (recommended)
  5922. * '<adaptername>_nonreserved' Checks against the specific adapters
  5923. list of nonreserved keywords. (if available)
  5924. :migrate (defaults to True) sets default migrate behavior for all tables
  5925. :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables
  5926. :migrate_enabled (defaults to True). If set to False disables ALL migrations
  5927. :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables
  5928. :attempts (defaults to 5). Number of times to attempt connecting
  5929. """
  5930. if uri == '<zombie>' and db_uid is not None: return
  5931. if not decode_credentials:
  5932. credential_decoder = lambda cred: cred
  5933. else:
  5934. credential_decoder = lambda cred: urllib.unquote(cred)
  5935. if folder:
  5936. self.set_folder(folder)
  5937. self._uri = uri
  5938. self._pool_size = pool_size
  5939. self._db_codec = db_codec
  5940. self._lastsql = ''
  5941. self._timings = []
  5942. self._pending_references = {}
  5943. self._request_tenant = 'request_tenant'
  5944. self._common_fields = []
  5945. self._referee_name = '%(table)s'
  5946. self._bigint_id = bigint_id
  5947. self._debug = debug
  5948. self._migrated = []
  5949. self._LAZY_TABLES = {}
  5950. self._lazy_tables = lazy_tables
  5951. if not str(attempts).isdigit() or attempts < 0:
  5952. attempts = 5
  5953. if uri:
  5954. uris = isinstance(uri,(list,tuple)) and uri or [uri]
  5955. error = ''
  5956. connected = False
  5957. for k in range(attempts):
  5958. for uri in uris:
  5959. try:
  5960. if is_jdbc and not uri.startswith('jdbc:'):
  5961. uri = 'jdbc:'+uri
  5962. self._dbname = REGEX_DBNAME.match(uri).group()
  5963. if not self._dbname in ADAPTERS:
  5964. raise SyntaxError, "Error in URI '%s' or database not supported" % self._dbname
  5965. # notice that driver args or {} else driver_args
  5966. # defaults to {} global, not correct
  5967. kwargs = dict(db=self,uri=uri,
  5968. pool_size=pool_size,
  5969. folder=folder,
  5970. db_codec=db_codec,
  5971. credential_decoder=credential_decoder,
  5972. driver_args=driver_args or {},
  5973. adapter_args=adapter_args or {},
  5974. do_connect=do_connect)
  5975. self._adapter = ADAPTERS[self._dbname](**kwargs)
  5976. if bigint_id:
  5977. types = ADAPTERS[self._dbname].types
  5978. self._adapter.types = copy.copy(types) # copy so multiple DAL() possible
  5979. if 'big-id' in types and 'reference' in types:
  5980. self._adapter.types['id'] = types['big-id']
  5981. self._adapter.types['reference'] = types['big-reference']
  5982. connected = True
  5983. break
  5984. except SyntaxError:
  5985. raise
  5986. except Exception, error:
  5987. tb = traceback.format_exc()
  5988. sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb))
  5989. if connected:
  5990. break
  5991. else:
  5992. time.sleep(1)
  5993. if not connected:
  5994. raise RuntimeError, "Failure to connect, tried %d times:\n%s" % (attempts, tb)
  5995. else:
  5996. self._adapter = BaseAdapter(db=self,pool_size=0,
  5997. uri='None',folder=folder,
  5998. db_codec=db_codec)
  5999. migrate = fake_migrate = False
  6000. adapter = self._adapter
  6001. self._uri_hash = hashlib.md5(adapter.uri).hexdigest()
  6002. self._tables = SQLCallableList()
  6003. self.check_reserved = check_reserved
  6004. if self.check_reserved:
  6005. from reserved_sql_keywords import ADAPTERS as RSK
  6006. self.RSK = RSK
  6007. self._migrate = migrate
  6008. self._fake_migrate = fake_migrate
  6009. self._migrate_enabled = migrate_enabled
  6010. self._fake_migrate_all = fake_migrate_all
  6011. if auto_import:
  6012. self.import_table_definitions(adapter.folder)
  6013. @property
  6014. def tables(self):
  6015. return self._tables
  6016. def import_table_definitions(self,path,migrate=False,fake_migrate=False):
  6017. pattern = pjoin(path,self._uri_hash+'_*.table')
  6018. for filename in glob.glob(pattern):
  6019. tfile = self._adapter.file_open(filename, 'r')
  6020. try:
  6021. sql_fields = cPickle.load(tfile)
  6022. name = filename[len(pattern)-7:-6]
  6023. mf = [(value['sortable'],
  6024. Field(key,
  6025. type=value['type'],
  6026. length=value.get('length',None),
  6027. notnull=value.get('notnull',False),
  6028. unique=value.get('unique',False))) \
  6029. for key, value in sql_fields.iteritems()]
  6030. mf.sort(lambda a,b: cmp(a[0],b[0]))
  6031. self.define_table(name,*[item[1] for item in mf],
  6032. **dict(migrate=migrate,fake_migrate=fake_migrate))
  6033. finally:
  6034. self._adapter.file_close(tfile)
  6035. def check_reserved_keyword(self, name):
  6036. """
  6037. Validates ``name`` against SQL keywords
  6038. Uses self.check_reserve which is a list of
  6039. operators to use.
  6040. self.check_reserved
  6041. ['common', 'postgres', 'mysql']
  6042. self.check_reserved
  6043. ['all']
  6044. """
  6045. for backend in self.check_reserved:
  6046. if name.upper() in self.RSK[backend]:
  6047. raise SyntaxError, 'invalid table/column name "%s" is a "%s" reserved SQL keyword' % (name, backend.upper())
  6048. def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
  6049. """
  6050. EXAMPLE:
  6051. db.define_table('person',Field('name'),Field('info'))
  6052. db.define_table('pet',Field('owner',db.person),Field('name'),Field('info'))
  6053. @request.restful()
  6054. def index():
  6055. def GET(*args,**vars):
  6056. patterns = [
  6057. "/friends[person]",
  6058. "/{friend.name.startswith}",
  6059. "/{friend.name}/:field",
  6060. "/{friend.name}/pets[pet.owner]",
  6061. "/{friend.name}/pet[pet.owner]/{pet.name}",
  6062. "/{friend.name}/pet[pet.owner]/{pet.name}/:field"
  6063. ]
  6064. parser = db.parse_as_rest(patterns,args,vars)
  6065. if parser.status == 200:
  6066. return dict(content=parser.response)
  6067. else:
  6068. raise HTTP(parser.status,parser.error)
  6069. def POST(table_name,**vars):
  6070. if table_name == 'person':
  6071. return db.person.validate_and_insert(**vars)
  6072. elif table_name == 'pet':
  6073. return db.pet.validate_and_insert(**vars)
  6074. else:
  6075. raise HTTP(400)
  6076. return locals()
  6077. """
  6078. db = self
  6079. re1 = REGEX_SEARCH_PATTERN
  6080. re2 = REGEX_SQUARE_BRACKETS
  6081. def auto_table(table,base='',depth=0):
  6082. patterns = []
  6083. for field in db[table].fields:
  6084. if base:
  6085. tag = '%s/%s' % (base,field.replace('_','-'))
  6086. else:
  6087. tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-'))
  6088. f = db[table][field]
  6089. if not f.readable: continue
  6090. if f.type=='id' or 'slug' in field or f.type.startswith('reference'):
  6091. tag += '/{%s.%s}' % (table,field)
  6092. patterns.append(tag)
  6093. patterns.append(tag+'/:field')
  6094. elif f.type.startswith('boolean'):
  6095. tag += '/{%s.%s}' % (table,field)
  6096. patterns.append(tag)
  6097. patterns.append(tag+'/:field')
  6098. elif f.type in ('float','double','integer','bigint'):
  6099. tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field)
  6100. patterns.append(tag)
  6101. patterns.append(tag+'/:field')
  6102. elif f.type.startswith('list:'):
  6103. tag += '/{%s.%s.contains}' % (table,field)
  6104. patterns.append(tag)
  6105. patterns.append(tag+'/:field')
  6106. elif f.type in ('date','datetime'):
  6107. tag+= '/{%s.%s.year}' % (table,field)
  6108. patterns.append(tag)
  6109. patterns.append(tag+'/:field')
  6110. tag+='/{%s.%s.month}' % (table,field)
  6111. patterns.append(tag)
  6112. patterns.append(tag+'/:field')
  6113. tag+='/{%s.%s.day}' % (table,field)
  6114. patterns.append(tag)
  6115. patterns.append(tag+'/:field')
  6116. if f.type in ('datetime','time'):
  6117. tag+= '/{%s.%s.hour}' % (table,field)
  6118. patterns.append(tag)
  6119. patterns.append(tag+'/:field')
  6120. tag+='/{%s.%s.minute}' % (table,field)
  6121. patterns.append(tag)
  6122. patterns.append(tag+'/:field')
  6123. tag+='/{%s.%s.second}' % (table,field)
  6124. patterns.append(tag)
  6125. patterns.append(tag+'/:field')
  6126. if depth>0:
  6127. for f in db[table]._referenced_by:
  6128. tag+='/%s[%s.%s]' % (table,f.tablename,f.name)
  6129. patterns.append(tag)
  6130. patterns += auto_table(table,base=tag,depth=depth-1)
  6131. return patterns
  6132. if patterns==DEFAULT:
  6133. patterns=[]
  6134. for table in db.tables:
  6135. if not table.startswith('auth_'):
  6136. patterns.append('/%s[%s]' % (table,table))
  6137. patterns += auto_table(table,base='',depth=1)
  6138. else:
  6139. i = 0
  6140. while i<len(patterns):
  6141. pattern = patterns[i]
  6142. tokens = pattern.split('/')
  6143. if tokens[-1].startswith(':auto') and re2.match(tokens[-1]):
  6144. new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],
  6145. '/'.join(tokens[:-1]))
  6146. patterns = patterns[:i]+new_patterns+patterns[i+1:]
  6147. i += len(new_patterns)
  6148. else:
  6149. i += 1
  6150. if '/'.join(args) == 'patterns':
  6151. return Row({'status':200,'pattern':'list',
  6152. 'error':None,'response':patterns})
  6153. for pattern in patterns:
  6154. otable=table=None
  6155. if not isinstance(queries,dict):
  6156. dbset=db(queries)
  6157. i=0
  6158. tags = pattern[1:].split('/')
  6159. if len(tags)!=len(args):
  6160. continue
  6161. for tag in tags:
  6162. if re1.match(tag):
  6163. # print 're1:'+tag
  6164. tokens = tag[1:-1].split('.')
  6165. table, field = tokens[0], tokens[1]
  6166. if not otable or table == otable:
  6167. if len(tokens)==2 or tokens[2]=='eq':
  6168. query = db[table][field]==args[i]
  6169. elif tokens[2]=='ne':
  6170. query = db[table][field]!=args[i]
  6171. elif tokens[2]=='lt':
  6172. query = db[table][field]<args[i]
  6173. elif tokens[2]=='gt':
  6174. query = db[table][field]>args[i]
  6175. elif tokens[2]=='ge':
  6176. query = db[table][field]>=args[i]
  6177. elif tokens[2]=='le':
  6178. query = db[table][field]<=args[i]
  6179. elif tokens[2]=='year':
  6180. query = db[table][field].year()==args[i]
  6181. elif tokens[2]=='month':
  6182. query = db[table][field].month()==args[i]
  6183. elif tokens[2]=='day':
  6184. query = db[table][field].day()==args[i]
  6185. elif tokens[2]=='hour':
  6186. query = db[table][field].hour()==args[i]
  6187. elif tokens[2]=='minute':
  6188. query = db[table][field].minutes()==args[i]
  6189. elif tokens[2]=='second':
  6190. query = db[table][field].seconds()==args[i]
  6191. elif tokens[2]=='startswith':
  6192. query = db[table][field].startswith(args[i])
  6193. elif tokens[2]=='contains':
  6194. query = db[table][field].contains(args[i])
  6195. else:
  6196. raise RuntimeError, "invalid pattern: %s" % pattern
  6197. if len(tokens)==4 and tokens[3]=='not':
  6198. query = ~query
  6199. elif len(tokens)>=4:
  6200. raise RuntimeError, "invalid pattern: %s" % pattern
  6201. if not otable and isinstance(queries,dict):
  6202. dbset = db(queries[table])
  6203. dbset=dbset(query)
  6204. else:
  6205. raise RuntimeError, "missing relation in pattern: %s" % pattern
  6206. elif re2.match(tag) and args[i]==tag[:tag.find('[')]:
  6207. ref = tag[tag.find('[')+1:-1]
  6208. if '.' in ref and otable:
  6209. table,field = ref.split('.')
  6210. # print table,field
  6211. if nested_select:
  6212. try:
  6213. dbset=db(db[table][field].belongs(dbset._select(db[otable]._id)))
  6214. except ValueError:
  6215. return Row({'status':400,'pattern':pattern,
  6216. 'error':'invalid path','response':None})
  6217. else:
  6218. items = [item.id for item in dbset.select(db[otable]._id)]
  6219. dbset=db(db[table][field].belongs(items))
  6220. else:
  6221. table = ref
  6222. if not otable and isinstance(queries,dict):
  6223. dbset = db(queries[table])
  6224. dbset=dbset(db[table])
  6225. elif tag==':field' and table:
  6226. # # print 're3:'+tag
  6227. field = args[i]
  6228. if not field in db[table]: break
  6229. # hand-built patterns should respect .readable=False as well
  6230. if not db[table][field].readable:
  6231. return Row({'status':418,'pattern':pattern,
  6232. 'error':'I\'m a teapot','response':None})
  6233. try:
  6234. item = dbset.select(db[table][field],limitby=(0,1)).first()
  6235. except ValueError:
  6236. return Row({'status':400,'pattern':pattern,
  6237. 'error':'invalid path','response':None})
  6238. if not item:
  6239. return Row({'status':404,'pattern':pattern,
  6240. 'error':'record not found','response':None})
  6241. else:
  6242. return Row({'status':200,'response':item[field],
  6243. 'pattern':pattern})
  6244. elif tag != args[i]:
  6245. break
  6246. otable = table
  6247. i += 1
  6248. if i==len(tags) and table:
  6249. ofields = vars.get('order',db[table]._id.name).split('|')
  6250. try:
  6251. orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields]
  6252. except (KeyError, AttributeError):
  6253. return Row({'status':400,'error':'invalid orderby','response':None})
  6254. fields = [field for field in db[table] if field.readable]
  6255. count = dbset.count()
  6256. try:
  6257. offset = int(vars.get('offset',None) or 0)
  6258. limits = (offset,int(vars.get('limit',None) or 1000)+offset)
  6259. except ValueError:
  6260. Row({'status':400,'error':'invalid limits','response':None})
  6261. if count > limits[1]-limits[0]:
  6262. Row({'status':400,'error':'too many records','response':None})
  6263. try:
  6264. response = dbset.select(limitby=limits,orderby=orderby,*fields)
  6265. except ValueError:
  6266. return Row({'status':400,'pattern':pattern,
  6267. 'error':'invalid path','response':None})
  6268. return Row({'status':200,'response':response,
  6269. 'pattern':pattern,'count':count})
  6270. return Row({'status':400,'error':'no matching pattern','response':None})
  6271. def define_table(
  6272. self,
  6273. tablename,
  6274. *fields,
  6275. **args
  6276. ):
  6277. if not isinstance(tablename,str):
  6278. raise SyntaxError, "missing table name"
  6279. elif hasattr(self,tablename) or tablename in self.tables:
  6280. if not args.get('redefine',False):
  6281. raise SyntaxError, 'table already defined: %s' % tablename
  6282. elif tablename.startswith('_') or hasattr(self,tablename) or \
  6283. REGEX_PYTHON_KEYWORDS.match(tablename):
  6284. raise SyntaxError, 'invalid table name: %s' % tablename
  6285. elif self.check_reserved:
  6286. self.check_reserved_keyword(tablename)
  6287. else:
  6288. invalid_args = set(args)-TABLE_ARGS
  6289. if invalid_args:
  6290. raise SyntaxError, 'invalid table "%s" attributes: %s' \
  6291. % (tablename,invalid_args)
  6292. if self._lazy_tables and not tablename in self._LAZY_TABLES:
  6293. self._LAZY_TABLES[tablename] = (tablename,fields,args)
  6294. table = None
  6295. else:
  6296. table = self.lazy_define_table(tablename,*fields,**args)
  6297. if not tablename in self.tables:
  6298. self.tables.append(tablename)
  6299. return table
  6300. def lazy_define_table(
  6301. self,
  6302. tablename,
  6303. *fields,
  6304. **args
  6305. ):
  6306. args_get = args.get
  6307. common_fields = self._common_fields
  6308. if common_fields:
  6309. fields = list(fields) + list(common_fields)
  6310. table_class = args_get('table_class',Table)
  6311. table = table_class(self, tablename, *fields, **args)
  6312. table._actual = True
  6313. self[tablename] = table
  6314. table._create_references() # must follow above line to handle self references
  6315. migrate = self._migrate_enabled and args_get('migrate',self._migrate)
  6316. if migrate and not self._uri in (None,'None') \
  6317. or self._adapter.dbengine=='google:datastore':
  6318. fake_migrate = self._fake_migrate_all or \
  6319. args_get('fake_migrate',self._fake_migrate)
  6320. polymodel = args_get('polymodel',None)
  6321. try:
  6322. GLOBAL_LOCKER.acquire()
  6323. self._adapter.create_table(table,migrate=migrate,
  6324. fake_migrate=fake_migrate,
  6325. polymodel=polymodel)
  6326. finally:
  6327. GLOBAL_LOCKER.release()
  6328. else:
  6329. table._dbt = None
  6330. on_define = args_get('on_define',None)
  6331. if on_define: on_define(table)
  6332. return table
  6333. def __contains__(self, tablename):
  6334. try:
  6335. return tablename in self.tables
  6336. except AttributeError:
  6337. # The instance has no .tables attribute yet
  6338. return False
  6339. has_key = __contains__
  6340. def get(self,key,default=None):
  6341. return self.__dict__.get(key,default)
  6342. def __iter__(self):
  6343. for tablename in self.tables:
  6344. yield self[tablename]
  6345. def __getitem__(self, key):
  6346. return self.__getattr__(str(key))
  6347. def __getattr__(self, key):
  6348. if ogetattr(self,'_lazy_tables') and \
  6349. key in ogetattr(self,'_LAZY_TABLES'):
  6350. tablename, fields, args = self._LAZY_TABLES.pop(key)
  6351. return self.lazy_define_table(tablename,*fields,**args)
  6352. return ogetattr(self, key)
  6353. def __setitem__(self, key, value):
  6354. osetattr(self, str(key), value)
  6355. def __setattr__(self, key, value):
  6356. if key[:1]!='_' and key in self:
  6357. raise SyntaxError, \
  6358. 'Object %s exists and cannot be redefined' % key
  6359. osetattr(self,key,value)
  6360. __delitem__ = object.__delattr__
  6361. def __repr__(self):
  6362. if hasattr(self,'_uri'):
  6363. return '<DAL uri="%s">' % hide_password(str(self._uri))
  6364. else:
  6365. return '<DAL db_uid="%s">' % self._db_uid
  6366. def smart_query(self,fields,text):
  6367. return Set(self, smart_query(fields,text))
  6368. def __call__(self, query=None, ignore_common_filters=None):
  6369. if isinstance(query,Table):
  6370. query = self._adapter.id_query(query)
  6371. elif isinstance(query,Field):
  6372. query = query!=None
  6373. return Set(self, query, ignore_common_filters=ignore_common_filters)
  6374. def commit(self):
  6375. self._adapter.commit()
  6376. def rollback(self):
  6377. self._adapter.rollback()
  6378. def close(self):
  6379. self._adapter.close()
  6380. if self._db_uid in THREAD_LOCAL.db_instances:
  6381. db_group = THREAD_LOCAL.db_instances[self._db_uid]
  6382. db_group.remove(self)
  6383. if not db_group:
  6384. del THREAD_LOCAL.db_instances[self._db_uid]
  6385. def executesql(self, query, placeholders=None, as_dict=False,
  6386. fields=None, colnames=None):
  6387. """
  6388. placeholders is optional and will always be None.
  6389. If using raw SQL with placeholders, placeholders may be
  6390. a sequence of values to be substituted in
  6391. or, (if supported by the DB driver), a dictionary with keys
  6392. matching named placeholders in your SQL.
  6393. Added 2009-12-05 "as_dict" optional argument. Will always be
  6394. None when using DAL. If using raw SQL can be set to True
  6395. and the results cursor returned by the DB driver will be
  6396. converted to a sequence of dictionaries keyed with the db
  6397. field names. Tested with SQLite but should work with any database
  6398. since the cursor.description used to get field names is part of the
  6399. Python dbi 2.0 specs. Results returned with as_dict=True are
  6400. the same as those returned when applying .to_list() to a DAL query.
  6401. [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}]
  6402. Added 2012-08-24 "fields" and "colnames" optional arguments. If either
  6403. is provided, the results cursor returned by the DB driver will be
  6404. converted to a DAL Rows object using the db._adapter.parse() method.
  6405. The "fields" argument is a list of DAL Field objects that match the
  6406. fields returned from the DB. The Field objects should be part of one or
  6407. more Table objects defined on the DAL object. The "fields" list can
  6408. include one or more DAL Table objects in addition to or instead of
  6409. including Field objects, or it can be just a single table (not in a
  6410. list). In that case, the Field objects will be extracted from the
  6411. table(s).
  6412. Instead of specifying the "fields" argument, the "colnames" argument
  6413. can be specified as a list of field names in tablename.fieldname format.
  6414. Again, these should represent tables and fields defined on the DAL
  6415. object.
  6416. It is also possible to specify both "fields" and the associated
  6417. "colnames". In that case, "fields" can also include DAL Expression
  6418. objects in addition to Field objects. For Field objects in "fields",
  6419. the associated "colnames" must still be in tablename.fieldname format.
  6420. For Expression objects in "fields", the associated "colnames" can
  6421. be any arbitrary labels.
  6422. Note, the DAL Table objects referred to by "fields" or "colnames" can
  6423. be dummy tables and do not have to represent any real tables in the
  6424. database. Also, note that the "fields" and "colnames" must be in the
  6425. same order as the fields in the results cursor returned from the DB.
  6426. """
  6427. adapter = self._adapter
  6428. if placeholders:
  6429. adapter.execute(query, placeholders)
  6430. else:
  6431. adapter.execute(query)
  6432. if as_dict:
  6433. if not hasattr(adapter.cursor,'description'):
  6434. raise RuntimeError, "database does not support executesql(...,as_dict=True)"
  6435. # Non-DAL legacy db query, converts cursor results to dict.
  6436. # sequence of 7-item sequences. each sequence tells about a column.
  6437. # first item is always the field name according to Python Database API specs
  6438. columns = adapter.cursor.description
  6439. # reduce the column info down to just the field names
  6440. fields = [f[0] for f in columns]
  6441. # will hold our finished resultset in a list
  6442. data = adapter._fetchall()
  6443. # convert the list for each row into a dictionary so it's
  6444. # easier to work with. row['field_name'] rather than row[0]
  6445. return [dict(zip(fields,row)) for row in data]
  6446. try:
  6447. data = adapter._fetchall()
  6448. except:
  6449. return None
  6450. if fields or colnames:
  6451. fields = [] if fields is None else fields
  6452. if not isinstance(fields, list):
  6453. fields = [fields]
  6454. extracted_fields = []
  6455. for field in fields:
  6456. if isinstance(field, Table):
  6457. extracted_fields.extend([f for f in field])
  6458. else:
  6459. extracted_fields.append(field)
  6460. if not colnames:
  6461. colnames = ['%s.%s' % (f.tablename, f.name)
  6462. for f in extracted_fields]
  6463. data = adapter.parse(
  6464. data, fields=extracted_fields, colnames=colnames)
  6465. return data
  6466. def _remove_references_to(self, thistable):
  6467. for table in self:
  6468. table._referenced_by = [field for field in table._referenced_by
  6469. if not field.table==thistable]
  6470. def export_to_csv_file(self, ofile, *args, **kwargs):
  6471. step = int(kwargs.get('max_fetch_rows,',500))
  6472. write_colnames = kwargs['write_colnames'] = \
  6473. kwargs.get("write_colnames", True)
  6474. for table in self.tables:
  6475. ofile.write('TABLE %s\r\n' % table)
  6476. query = self._adapter.id_query(self[table])
  6477. nrows = self(query).count()
  6478. kwargs['write_colnames'] = write_colnames
  6479. for k in range(0,nrows,step):
  6480. self(query).select(limitby=(k,k+step)).export_to_csv_file(
  6481. ofile, *args, **kwargs)
  6482. kwargs['write_colnames'] = False
  6483. ofile.write('\r\n\r\n')
  6484. ofile.write('END')
  6485. def import_from_csv_file(self, ifile, id_map=None, null='<NULL>',
  6486. unique='uuid', *args, **kwargs):
  6487. #if id_map is None: id_map={}
  6488. id_offset = {} # only used if id_map is None
  6489. for line in ifile:
  6490. line = line.strip()
  6491. if not line:
  6492. continue
  6493. elif line == 'END':
  6494. return
  6495. elif not line.startswith('TABLE ') or not line[6:] in self.tables:
  6496. raise SyntaxError, 'invalid file format'
  6497. else:
  6498. tablename = line[6:]
  6499. self[tablename].import_from_csv_file(
  6500. ifile, id_map, null, unique, id_offset, *args, **kwargs)
  6501. def DAL_unpickler(db_uid):
  6502. return DAL('<zombie>',db_uid=db_uid)
  6503. def DAL_pickler(db):
  6504. return DAL_unpickler, (db._db_uid,)
  6505. copy_reg.pickle(DAL, DAL_pickler, DAL_unpickler)
  6506. class SQLALL(object):
  6507. """
  6508. Helper class providing a comma-separated string having all the field names
  6509. (prefixed by table name and '.')
  6510. normally only called from within gluon.sql
  6511. """
  6512. def __init__(self, table):
  6513. self._table = table
  6514. def __str__(self):
  6515. return ', '.join([str(field) for field in self._table])
  6516. class Reference(int):
  6517. def __allocate(self):
  6518. if not self._record:
  6519. self._record = self._table[int(self)]
  6520. if not self._record:
  6521. raise RuntimeError, "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self))
  6522. def __getattr__(self, key):
  6523. if key == 'id':
  6524. return int(self)
  6525. self.__allocate()
  6526. return self._record.get(key, None)
  6527. def get(self, key, default=None):
  6528. return self.__getattr__(key, default)
  6529. def __setattr__(self, key, value):
  6530. if key.startswith('_'):
  6531. int.__setattr__(self, key, value)
  6532. return
  6533. self.__allocate()
  6534. self._record[key] = value
  6535. def __getitem__(self, key):
  6536. if key == 'id':
  6537. return int(self)
  6538. self.__allocate()
  6539. return self._record.get(key, None)
  6540. def __setitem__(self,key,value):
  6541. self.__allocate()
  6542. self._record[key] = value
  6543. def Reference_unpickler(data):
  6544. return marshal.loads(data)
  6545. def Reference_pickler(data):
  6546. try:
  6547. marshal_dump = marshal.dumps(int(data))
  6548. except AttributeError:
  6549. marshal_dump = 'i%s' % struct.pack('<i', int(data))
  6550. return (Reference_unpickler, (marshal_dump,))
  6551. copy_reg.pickle(Reference, Reference_pickler, Reference_unpickler)
  6552. class Table(object):
  6553. """
  6554. an instance of this class represents a database table
  6555. Example::
  6556. db = DAL(...)
  6557. db.define_table('users', Field('name'))
  6558. db.users.insert(name='me') # print db.users._insert(...) to see SQL
  6559. db.users.drop()
  6560. """
  6561. def __init__(
  6562. self,
  6563. db,
  6564. tablename,
  6565. *fields,
  6566. **args
  6567. ):
  6568. """
  6569. Initializes the table and performs checking on the provided fields.
  6570. Each table will have automatically an 'id'.
  6571. If a field is of type Table, the fields (excluding 'id') from that table
  6572. will be used instead.
  6573. :raises SyntaxError: when a supplied field is of incorrect type.
  6574. """
  6575. self._actual = False # set to True by define_table()
  6576. self._tablename = tablename
  6577. self._sequence_name = args.get('sequence_name',None) or \
  6578. db and db._adapter.sequence_name(tablename)
  6579. self._trigger_name = args.get('trigger_name',None) or \
  6580. db and db._adapter.trigger_name(tablename)
  6581. self._common_filter = args.get('common_filter', None)
  6582. self._format = args.get('format',None)
  6583. self._singular = args.get(
  6584. 'singular',tablename.replace('_',' ').capitalize())
  6585. self._plural = args.get(
  6586. 'plural',pluralize(self._singular.lower()).capitalize())
  6587. # horrible but for backard compatibility of appamdin:
  6588. if 'primarykey' in args and args['primarykey']:
  6589. self._primarykey = args.get('primarykey', None)
  6590. self._before_insert = []
  6591. self._before_update = [Set.delete_uploaded_files]
  6592. self._before_delete = [Set.delete_uploaded_files]
  6593. self._after_insert = []
  6594. self._after_update = []
  6595. self._after_delete = []
  6596. fieldnames,newfields=set(),[]
  6597. if hasattr(self,'_primarykey'):
  6598. if not isinstance(self._primarykey,list):
  6599. raise SyntaxError, \
  6600. "primarykey must be a list of fields from table '%s'" \
  6601. % tablename
  6602. if len(self._primarykey)==1:
  6603. self._id = [f for f in fields if isinstance(f,Field) \
  6604. and f.name==self._primarykey[0]][0]
  6605. elif not [f for f in fields if isinstance(f,Field) and f.type=='id']:
  6606. field = Field('id', 'id')
  6607. newfields.append(field)
  6608. fieldnames.add('id')
  6609. self._id = field
  6610. for field in fields:
  6611. if isinstance(field, Field) and not field.name in fieldnames:
  6612. if field.db is not None:
  6613. field = copy.copy(field)
  6614. newfields.append(field)
  6615. fieldnames.add(field.name)
  6616. if field.type=='id':
  6617. self._id = field
  6618. elif isinstance(field, Table):
  6619. table = field
  6620. for field in table:
  6621. if not field.name in fieldnames and not field.type=='id':
  6622. t2 = not table._actual and self._tablename
  6623. field = field.clone(point_self_references_to=t2)
  6624. newfields.append(field)
  6625. fieldnames.add(field.name)
  6626. elif not isinstance(field, (Field, Table)):
  6627. raise SyntaxError, \
  6628. 'define_table argument is not a Field or Table: %s' % field
  6629. fields = newfields
  6630. self._db = db
  6631. tablename = tablename
  6632. self._fields = SQLCallableList()
  6633. self.virtualfields = []
  6634. fields = list(fields)
  6635. if db and db._adapter.uploads_in_blob==True:
  6636. uploadfields = [f.name for f in fields if f.type=='blob']
  6637. for field in fields:
  6638. fn = field.uploadfield
  6639. if isinstance(field, Field) and field.type == 'upload'\
  6640. and fn is True:
  6641. fn = field.uploadfield = '%s_blob' % field.name
  6642. if isinstance(fn,str) and not fn in uploadfields:
  6643. fields.append(Field(fn,'blob',default='',
  6644. writable=False,readable=False))
  6645. lower_fieldnames = set()
  6646. reserved = dir(Table) + ['fields']
  6647. for field in fields:
  6648. field_name = field.name
  6649. if db and db.check_reserved:
  6650. db.check_reserved_keyword(field_name)
  6651. elif field_name in reserved:
  6652. raise SyntaxError, "field name %s not allowed" % field_name
  6653. if field_name.lower() in lower_fieldnames:
  6654. raise SyntaxError, "duplicate field %s in table %s" \
  6655. % (field_name, tablename)
  6656. else:
  6657. lower_fieldnames.add(field_name.lower())
  6658. self.fields.append(field_name)
  6659. self[field_name] = field
  6660. if field.type == 'id':
  6661. self['id'] = field
  6662. field.tablename = field._tablename = tablename
  6663. field.table = field._table = self
  6664. field.db = field._db = db
  6665. if db and not field.type in ('text','blob') and \
  6666. db._adapter.maxcharlength < field.length:
  6667. field.length = db._adapter.maxcharlength
  6668. if field.requires == DEFAULT:
  6669. field.requires = sqlhtml_validators(field)
  6670. self.ALL = SQLALL(self)
  6671. if hasattr(self,'_primarykey'):
  6672. for k in self._primarykey:
  6673. if k not in self.fields:
  6674. raise SyntaxError, \
  6675. "primarykey must be a list of fields from table '%s " % tablename
  6676. else:
  6677. self[k].notnull = True
  6678. @property
  6679. def fields(self):
  6680. return self._fields
  6681. def update(self,*args,**kwargs):
  6682. raise RuntimeError, "Syntax Not Supported"
  6683. def _enable_record_versioning(self,
  6684. archive_db=None,
  6685. archive_name = '%(tablename)s_archive',
  6686. current_record = 'current_record',
  6687. is_active = 'is_active'):
  6688. archive_db = archive_db or self._db
  6689. archive_name = archive_name % dict(tablename=self._tablename)
  6690. if archive_name in archive_db.tables():
  6691. return # do not try define the archive if already exists
  6692. fieldnames = self.fields()
  6693. field_type = self if archive_db is self._db else 'bigint'
  6694. archive_db.define_table(
  6695. archive_name,
  6696. Field(current_record,field_type),
  6697. *[field.clone(unique=False) for field in self])
  6698. self._before_update.append(
  6699. lambda qset,fs,db=archive_db,an=archive_name,cn=current_record:
  6700. archive_record(qset,fs,db[an],cn))
  6701. if is_active and is_active in fieldnames:
  6702. self._before_delete.append(
  6703. lambda qset: qset.update(is_active=False))
  6704. newquery = lambda query, t=self: t.is_active == True
  6705. query = self._common_filter
  6706. if query:
  6707. newquery = query & newquery
  6708. self._common_filter = newquery
  6709. def _validate(self,**vars):
  6710. errors = Row()
  6711. for key,value in vars.iteritems():
  6712. value,error = self[key].validate(value)
  6713. if error:
  6714. errors[key] = error
  6715. return errors
  6716. def _create_references(self):
  6717. db = self._db
  6718. pr = db._pending_references
  6719. self._referenced_by = []
  6720. for field in self:
  6721. fieldname = field.name
  6722. field_type = field.type
  6723. if isinstance(field_type,str) and field_type[:10] == 'reference ':
  6724. ref = field_type[10:].strip()
  6725. if not ref.split():
  6726. raise SyntaxError, 'Table: reference to nothing: %s' %ref
  6727. refs = ref.split('.')
  6728. rtablename = refs[0]
  6729. if not rtablename in db:
  6730. pr[rtablename] = pr.get(rtablename,[]) + [field]
  6731. continue
  6732. rtable = db[rtablename]
  6733. if len(refs)==2:
  6734. rfieldname = refs[1]
  6735. if not hasattr(rtable,'_primarykey'):
  6736. raise SyntaxError,\
  6737. 'keyed tables can only reference other keyed tables (for now)'
  6738. if rfieldname not in rtable.fields:
  6739. raise SyntaxError,\
  6740. "invalid field '%s' for referenced table '%s' in table '%s'" \
  6741. % (rfieldname, rtablename, self._tablename)
  6742. rtable._referenced_by.append(field)
  6743. for referee in pr.get(self._tablename,[]):
  6744. self._referenced_by.append(referee)
  6745. def _filter_fields(self, record, id=False):
  6746. return dict([(k, v) for (k, v) in record.iteritems() if k
  6747. in self.fields and (self[k].type!='id' or id)])
  6748. def _build_query(self,key):
  6749. """ for keyed table only """
  6750. query = None
  6751. for k,v in key.iteritems():
  6752. if k in self._primarykey:
  6753. if query:
  6754. query = query & (self[k] == v)
  6755. else:
  6756. query = (self[k] == v)
  6757. else:
  6758. raise SyntaxError, \
  6759. 'Field %s is not part of the primary key of %s' % \
  6760. (k,self._tablename)
  6761. return query
  6762. def __getitem__(self, key):
  6763. if not key:
  6764. return None
  6765. elif isinstance(key, dict):
  6766. """ for keyed table """
  6767. query = self._build_query(key)
  6768. rows = self._db(query).select()
  6769. if rows:
  6770. return rows[0]
  6771. return None
  6772. elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key):
  6773. return self._db(self._id == key).select(limitby=(0,1)).first()
  6774. elif key:
  6775. return ogetattr(self, str(key))
  6776. def __call__(self, key=DEFAULT, **kwargs):
  6777. for_update = kwargs.get('_for_update',False)
  6778. if '_for_update' in kwargs: del kwargs['_for_update']
  6779. if not key is DEFAULT:
  6780. if isinstance(key, Query):
  6781. record = self._db(key).select(
  6782. limitby=(0,1),for_update=for_update).first()
  6783. elif not str(key).isdigit():
  6784. record = None
  6785. else:
  6786. record = self._db(self._id == key).select(
  6787. limitby=(0,1),for_update=for_update).first()
  6788. if record:
  6789. for k,v in kwargs.iteritems():
  6790. if record[k]!=v: return None
  6791. return record
  6792. elif kwargs:
  6793. query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()])
  6794. return self._db(query).select(limitby=(0,1),for_update=for_update).first()
  6795. else:
  6796. return None
  6797. def __setitem__(self, key, value):
  6798. if isinstance(key, dict) and isinstance(value, dict):
  6799. """ option for keyed table """
  6800. if set(key.keys()) == set(self._primarykey):
  6801. value = self._filter_fields(value)
  6802. kv = {}
  6803. kv.update(value)
  6804. kv.update(key)
  6805. if not self.insert(**kv):
  6806. query = self._build_query(key)
  6807. self._db(query).update(**self._filter_fields(value))
  6808. else:
  6809. raise SyntaxError,\
  6810. 'key must have all fields from primary key: %s'%\
  6811. (self._primarykey)
  6812. elif str(key).isdigit():
  6813. if key == 0:
  6814. self.insert(**self._filter_fields(value))
  6815. elif self._db(self._id == key)\
  6816. .update(**self._filter_fields(value)) is None:
  6817. raise SyntaxError, 'No such record: %s' % key
  6818. else:
  6819. if isinstance(key, dict):
  6820. raise SyntaxError,\
  6821. 'value must be a dictionary: %s' % value
  6822. osetattr(self, str(key), value)
  6823. __getattr__ = __getitem__
  6824. def __setattr__(self, key, value):
  6825. if key[:1]!='_' and key in self:
  6826. raise SyntaxError, 'Object exists and cannot be redefined: %s' % key
  6827. osetattr(self,key,value)
  6828. def __delitem__(self, key):
  6829. if isinstance(key, dict):
  6830. query = self._build_query(key)
  6831. if not self._db(query).delete():
  6832. raise SyntaxError, 'No such record: %s' % key
  6833. elif not str(key).isdigit() or \
  6834. not self._db(self._id == key).delete():
  6835. raise SyntaxError, 'No such record: %s' % key
  6836. def __contains__(self,key):
  6837. return hasattr(self,key)
  6838. has_key = __contains__
  6839. def items(self):
  6840. return self.__dict__.items()
  6841. def __iter__(self):
  6842. for fieldname in self.fields:
  6843. yield self[fieldname]
  6844. def iteritems(self):
  6845. return self.__dict__.iteritems()
  6846. def __repr__(self):
  6847. return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
  6848. def __str__(self):
  6849. if hasattr(self,'_ot') and self._ot is not None:
  6850. if 'Oracle' in str(type(self._db._adapter)): # <<< patch
  6851. return '%s %s' % (self._ot, self._tablename) # <<< patch
  6852. return '%s AS %s' % (self._ot, self._tablename)
  6853. return self._tablename
  6854. def _drop(self, mode = ''):
  6855. return self._db._adapter._drop(self, mode)
  6856. def drop(self, mode = ''):
  6857. return self._db._adapter.drop(self,mode)
  6858. def _listify(self,fields,update=False):
  6859. new_fields = {} # format: new_fields[name] = (field,value)
  6860. # store all fields passed as input in new_fields
  6861. for name in fields:
  6862. if not name in self.fields:
  6863. if name != 'id':
  6864. raise SyntaxError, \
  6865. 'Field %s does not belong to the table' % name
  6866. else:
  6867. field = self[name]
  6868. value = fields[name]
  6869. if field.filter_in:
  6870. value = field.filter_in(value)
  6871. new_fields[name] = (field,value)
  6872. # check all fields that should be in the table but are not passed
  6873. to_compute = []
  6874. for ofield in self:
  6875. name = ofield.name
  6876. if not name in new_fields:
  6877. # if field is supposed to be computed, compute it!
  6878. if ofield.compute: # save those to compute for later
  6879. to_compute.append((name,ofield))
  6880. # if field is required, check its default value
  6881. elif not update and not ofield.default is None:
  6882. value = ofield.default
  6883. fields[name] = value
  6884. new_fields[name] = (ofield,value)
  6885. # if this is an update, user the update field instead
  6886. elif update and not ofield.update is None:
  6887. value = ofield.update
  6888. fields[name] = value
  6889. new_fields[name] = (ofield,value)
  6890. # if the field is still not there but it should, error
  6891. elif not update and ofield.required:
  6892. raise RuntimeError, \
  6893. 'Table: missing required field: %s' % name
  6894. # now deal with fields that are supposed to be computed
  6895. if to_compute:
  6896. row = Row(fields)
  6897. for name,ofield in to_compute:
  6898. # try compute it
  6899. try:
  6900. new_fields[name] = (ofield,ofield.compute(row))
  6901. except (KeyError, AttributeError):
  6902. # error sinlently unless field is required!
  6903. if ofield.required:
  6904. raise SyntaxError, 'unable to comput field: %s' % name
  6905. return new_fields.values()
  6906. def _attempt_upload(self, fields):
  6907. for field in self:
  6908. if field.type=='upload' and field.name in fields:
  6909. value = fields[field.name]
  6910. if value and not isinstance(value,str):
  6911. if hasattr(value,'file') and hasattr(value,'filename'):
  6912. new_name = field.store(value.file,filename=value.filename)
  6913. elif hasattr(value,'read') and hasattr(value,'name'):
  6914. new_name = field.store(value,filename=value.name)
  6915. else:
  6916. raise RuntimeError, "Unable to handle upload"
  6917. fields[field.name] = new_name
  6918. def _insert(self, **fields):
  6919. return self._db._adapter._insert(self,self._listify(fields))
  6920. def insert(self, **fields):
  6921. self._attempt_upload(fields)
  6922. if any(f(fields) for f in self._before_insert): return 0
  6923. ret = self._db._adapter.insert(self,self._listify(fields))
  6924. if ret and self._after_insert:
  6925. fields = Row(fields)
  6926. [f(fields,ret) for f in self._after_insert]
  6927. return ret
  6928. def validate_and_insert(self,**fields):
  6929. response = Row()
  6930. response.errors = Row()
  6931. new_fields = copy.copy(fields)
  6932. for key,value in fields.iteritems():
  6933. value,error = self[key].validate(value)
  6934. if error:
  6935. response.errors[key] = "%s" % error
  6936. else:
  6937. new_fields[key] = value
  6938. if not response.errors:
  6939. response.id = self.insert(**new_fields)
  6940. else:
  6941. response.id = None
  6942. return response
  6943. def update_or_insert(self, _key=DEFAULT, **values):
  6944. if _key is DEFAULT:
  6945. record = self(**values)
  6946. elif isinstance(_key,dict):
  6947. record = self(**_key)
  6948. else:
  6949. record = self(_key)
  6950. if record:
  6951. record.update_record(**values)
  6952. newid = None
  6953. else:
  6954. newid = self.insert(**values)
  6955. return newid
  6956. def bulk_insert(self, items):
  6957. """
  6958. here items is a list of dictionaries
  6959. """
  6960. items = [self._listify(item) for item in items]
  6961. if any(f(item) for item in items for f in self._before_insert):return 0
  6962. ret = self._db._adapter.bulk_insert(self,items)
  6963. ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert]
  6964. return ret
  6965. def _truncate(self, mode = None):
  6966. return self._db._adapter._truncate(self, mode)
  6967. def truncate(self, mode = None):
  6968. return self._db._adapter.truncate(self, mode)
  6969. def import_from_csv_file(
  6970. self,
  6971. csvfile,
  6972. id_map=None,
  6973. null='<NULL>',
  6974. unique='uuid',
  6975. id_offset=None, # id_offset used only when id_map is None
  6976. *args, **kwargs
  6977. ):
  6978. """
  6979. Import records from csv file.
  6980. Column headers must have same names as table fields.
  6981. Field 'id' is ignored.
  6982. If column names read 'table.file' the 'table.' prefix is ignored.
  6983. 'unique' argument is a field which must be unique
  6984. (typically a uuid field)
  6985. 'restore' argument is default False;
  6986. if set True will remove old values in table first.
  6987. 'id_map' ff set to None will not map ids.
  6988. The import will keep the id numbers in the restored table.
  6989. This assumes that there is an field of type id that
  6990. is integer and in incrementing order.
  6991. Will keep the id numbers in restored table.
  6992. """
  6993. delimiter = kwargs.get('delimiter', ',')
  6994. quotechar = kwargs.get('quotechar', '"')
  6995. quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL)
  6996. restore = kwargs.get('restore', False)
  6997. if restore:
  6998. self._db[self].truncate()
  6999. reader = csv.reader(csvfile, delimiter=delimiter,
  7000. quotechar=quotechar, quoting=quoting)
  7001. colnames = None
  7002. if isinstance(id_map, dict):
  7003. if not self._tablename in id_map:
  7004. id_map[self._tablename] = {}
  7005. id_map_self = id_map[self._tablename]
  7006. def fix(field, value, id_map, id_offset):
  7007. list_reference_s='list:reference'
  7008. if value == null:
  7009. value = None
  7010. elif field.type=='blob':
  7011. value = base64.b64decode(value)
  7012. elif field.type=='double' or field.type=='float':
  7013. if not value.strip():
  7014. value = None
  7015. else:
  7016. value = float(value)
  7017. elif field.type in ('integer','bigint'):
  7018. if not value.strip():
  7019. value = None
  7020. else:
  7021. value = int(value)
  7022. elif field.type.startswith('list:string'):
  7023. value = bar_decode_string(value)
  7024. elif field.type.startswith(list_reference_s):
  7025. ref_table = field.type[len(list_reference_s):].strip()
  7026. if id_map is not None:
  7027. value = [id_map[ref_table][int(v)] \
  7028. for v in bar_decode_string(value)]
  7029. else:
  7030. value = [v for v in bar_decode_string(value)]
  7031. elif field.type.startswith('list:'):
  7032. value = bar_decode_integer(value)
  7033. elif id_map and field.type.startswith('reference'):
  7034. try:
  7035. value = id_map[field.type[9:].strip()][int(value)]
  7036. except KeyError:
  7037. pass
  7038. elif id_offset and field.type.startswith('reference'):
  7039. try:
  7040. value = id_offset[field.type[9:].strip()]+int(value)
  7041. except KeyError:
  7042. pass
  7043. return (field.name, value)
  7044. def is_id(colname):
  7045. if colname in self:
  7046. return self[colname].type == 'id'
  7047. else:
  7048. return False
  7049. first = True
  7050. unique_idx = None
  7051. for line in reader:
  7052. if not line:
  7053. break
  7054. if not colnames:
  7055. colnames = [x.split('.',1)[-1] for x in line][:len(line)]
  7056. cols, cid = [], None
  7057. for i,colname in enumerate(colnames):
  7058. if is_id(colname):
  7059. cid = i
  7060. else:
  7061. cols.append(i)
  7062. if colname == unique:
  7063. unique_idx = i
  7064. else:
  7065. items = [fix(self[colnames[i]], line[i], id_map, id_offset) \
  7066. for i in cols if colnames[i] in self.fields]
  7067. if not id_map and cid is not None and id_offset is not None and not unique_idx:
  7068. csv_id = int(line[cid])
  7069. curr_id = self.insert(**dict(items))
  7070. if first:
  7071. first = False
  7072. # First curr_id is bigger than csv_id,
  7073. # then we are not restoring but
  7074. # extending db table with csv db table
  7075. if curr_id>csv_id:
  7076. id_offset[self._tablename] = curr_id-csv_id
  7077. else:
  7078. id_offset[self._tablename] = 0
  7079. # create new id until we get the same as old_id+offset
  7080. while curr_id<csv_id+id_offset[self._tablename]:
  7081. self._db(self._db[self][colnames[cid]] == curr_id).delete()
  7082. curr_id = self.insert(**dict(items))
  7083. # Validation. Check for duplicate of 'unique' &,
  7084. # if present, update instead of insert.
  7085. elif not unique_idx:
  7086. new_id = self.insert(**dict(items))
  7087. else:
  7088. unique_value = line[unique_idx]
  7089. query = self._db[self][unique] == unique_value
  7090. record = self._db(query).select().first()
  7091. if record:
  7092. record.update_record(**dict(items))
  7093. new_id = record[self._id.name]
  7094. else:
  7095. new_id = self.insert(**dict(items))
  7096. if id_map and cid is not None:
  7097. id_map_self[int(line[cid])] = new_id
  7098. def with_alias(self, alias):
  7099. return self._db._adapter.alias(self,alias)
  7100. def on(self, query):
  7101. return Expression(self._db,self._db._adapter.ON,self,query)
  7102. def archive_record(qset,fs,archive_table,current_record):
  7103. tablenames = qset.db._adapter.tables(qset.query)
  7104. if len(tablenames)!=1: raise RuntimeError, "cannot update join"
  7105. table = qset.db[tablenames[0]]
  7106. for row in qset.select():
  7107. fields = archive_table._filter_fields(row)
  7108. fields[current_record] = row.id
  7109. archive_table.insert(**fields)
  7110. return False
  7111. class Expression(object):
  7112. def __init__(
  7113. self,
  7114. db,
  7115. op,
  7116. first=None,
  7117. second=None,
  7118. type=None,
  7119. ):
  7120. self.db = db
  7121. self.op = op
  7122. self.first = first
  7123. self.second = second
  7124. self._table = getattr(first,'_table',None)
  7125. ### self._tablename = first._tablename ## CHECK
  7126. if not type and first and hasattr(first,'type'):
  7127. self.type = first.type
  7128. else:
  7129. self.type = type
  7130. def sum(self):
  7131. db = self.db
  7132. return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
  7133. def max(self):
  7134. db = self.db
  7135. return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
  7136. def min(self):
  7137. db = self.db
  7138. return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
  7139. def len(self):
  7140. db = self.db
  7141. return Expression(db, db._adapter.AGGREGATE, self, 'LENGTH', 'integer')
  7142. def avg(self):
  7143. db = self.db
  7144. return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
  7145. def lower(self):
  7146. db = self.db
  7147. return Expression(db, db._adapter.LOWER, self, None, self.type)
  7148. def upper(self):
  7149. db = self.db
  7150. return Expression(db, db._adapter.UPPER, self, None, self.type)
  7151. def year(self):
  7152. db = self.db
  7153. return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
  7154. def month(self):
  7155. db = self.db
  7156. return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
  7157. def day(self):
  7158. db = self.db
  7159. return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
  7160. def hour(self):
  7161. db = self.db
  7162. return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
  7163. def minutes(self):
  7164. db = self.db
  7165. return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
  7166. def coalesce(self,*others):
  7167. db = self.db
  7168. return Expression(db, db._adapter.COALESCE, self, others, self.type)
  7169. def coalesce_zero(self):
  7170. db = self.db
  7171. return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
  7172. def seconds(self):
  7173. db = self.db
  7174. return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
  7175. def epoch(self):
  7176. db = self.db
  7177. return Expression(db, db._adapter.EPOCH, self, None, 'integer')
  7178. def __getslice__(self, start, stop):
  7179. db = self.db
  7180. if start < 0:
  7181. pos0 = '(%s - %d)' % (self.len(), abs(start) - 1)
  7182. else:
  7183. pos0 = start + 1
  7184. if stop < 0:
  7185. length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0)
  7186. elif stop == sys.maxint:
  7187. length = self.len()
  7188. else:
  7189. length = '(%s - %s)' % (stop + 1, pos0)
  7190. return Expression(db,db._adapter.SUBSTRING,
  7191. self, (pos0, length), self.type)
  7192. def __getitem__(self, i):
  7193. return self[i:i + 1]
  7194. def __str__(self):
  7195. return self.db._adapter.expand(self,self.type)
  7196. def __or__(self, other): # for use in sortby
  7197. db = self.db
  7198. return Expression(db,db._adapter.COMMA,self,other,self.type)
  7199. def __invert__(self):
  7200. db = self.db
  7201. if hasattr(self,'_op') and self.op == db._adapter.INVERT:
  7202. return self.first
  7203. return Expression(db,db._adapter.INVERT,self,type=self.type)
  7204. def __add__(self, other):
  7205. db = self.db
  7206. return Expression(db,db._adapter.ADD,self,other,self.type)
  7207. def __sub__(self, other):
  7208. db = self.db
  7209. if self.type in ('integer','bigint'):
  7210. result_type = 'integer'
  7211. elif self.type in ['date','time','datetime','double','float']:
  7212. result_type = 'double'
  7213. else:
  7214. raise SyntaxError, "subtraction operation not supported for type"
  7215. return Expression(db,db._adapter.SUB,self,other,result_type)
  7216. def __mul__(self, other):
  7217. db = self.db
  7218. return Expression(db,db._adapter.MUL,self,other,self.type)
  7219. def __div__(self, other):
  7220. db = self.db
  7221. return Expression(db,db._adapter.DIV,self,other,self.type)
  7222. def __mod__(self, other):
  7223. db = self.db
  7224. return Expression(db,db._adapter.MOD,self,other,self.type)
  7225. def __eq__(self, value):
  7226. db = self.db
  7227. return Query(db, db._adapter.EQ, self, value)
  7228. def __ne__(self, value):
  7229. db = self.db
  7230. return Query(db, db._adapter.NE, self, value)
  7231. def __lt__(self, value):
  7232. db = self.db
  7233. return Query(db, db._adapter.LT, self, value)
  7234. def __le__(self, value):
  7235. db = self.db
  7236. return Query(db, db._adapter.LE, self, value)
  7237. def __gt__(self, value):
  7238. db = self.db
  7239. return Query(db, db._adapter.GT, self, value)
  7240. def __ge__(self, value):
  7241. db = self.db
  7242. return Query(db, db._adapter.GE, self, value)
  7243. def like(self, value, case_sensitive=False):
  7244. db = self.db
  7245. op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE
  7246. return Query(db, op, self, value)
  7247. def regexp(self, value):
  7248. db = self.db
  7249. return Query(db, db._adapter.REGEXP, self, value)
  7250. def belongs(self, *value):
  7251. """
  7252. Accepts the following inputs:
  7253. field.belongs(1,2)
  7254. field.belongs((1,2))
  7255. field.belongs(query)
  7256. Does NOT accept:
  7257. field.belongs(1)
  7258. """
  7259. db = self.db
  7260. if len(value) == 1:
  7261. value = value[0]
  7262. if isinstance(value,Query):
  7263. value = db(value)._select(value.first._table._id)
  7264. return Query(db, db._adapter.BELONGS, self, value)
  7265. def startswith(self, value):
  7266. db = self.db
  7267. if not self.type in ('string', 'text'):
  7268. raise SyntaxError, "startswith used with incompatible field type"
  7269. return Query(db, db._adapter.STARTSWITH, self, value)
  7270. def endswith(self, value):
  7271. db = self.db
  7272. if not self.type in ('string', 'text'):
  7273. raise SyntaxError, "endswith used with incompatible field type"
  7274. return Query(db, db._adapter.ENDSWITH, self, value)
  7275. def contains(self, value, all=False):
  7276. db = self.db
  7277. if isinstance(value,(list, tuple)):
  7278. subqueries = [self.contains(str(v).strip()) for v in value if str(v).strip()]
  7279. if not subqueries:
  7280. return self.contains('')
  7281. else:
  7282. return reduce(all and AND or OR,subqueries)
  7283. if not self.type in ('string', 'text') and not self.type.startswith('list:'):
  7284. raise SyntaxError, "contains used with incompatible field type"
  7285. return Query(db, db._adapter.CONTAINS, self, value)
  7286. def with_alias(self, alias):
  7287. db = self.db
  7288. return Expression(db, db._adapter.AS, self, alias, self.type)
  7289. # GIS functions
  7290. def st_asgeojson(self, precision=15, options=0, version=1):
  7291. return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self,
  7292. dict(precision=precision, options=options,
  7293. version=version), 'dict')
  7294. def st_astext(self):
  7295. db = self.db
  7296. return Expression(db, db._adapter.ST_ASTEXT, self)
  7297. def st_contained(self, value):
  7298. db = self.db
  7299. return Query(db, db._adapter.ST_CONTAINS, value, self)
  7300. def st_contains(self, value):
  7301. db = self.db
  7302. return Query(db, db._adapter.ST_CONTAINS, self, value)
  7303. def st_distance(self, other):
  7304. db = self.db
  7305. return Expression(db,db._adapter.ST_DISTANCE,self,other,self.type)
  7306. def st_equals(self, value):
  7307. db = self.db
  7308. return Query(db, db._adapter.ST_EQUALS, self, value)
  7309. def st_intersects(self, value):
  7310. db = self.db
  7311. return Query(db, db._adapter.ST_INTERSECTS, self, value)
  7312. def st_overlaps(self, value):
  7313. db = self.db
  7314. return Query(db, db._adapter.ST_OVERLAPS, self, value)
  7315. def st_simplify(self, value):
  7316. db = self.db
  7317. return Expression(db, db._adapter.ST_SIMPLIFY, self, value)
  7318. def st_touches(self, value):
  7319. db = self.db
  7320. return Query(db, db._adapter.ST_TOUCHES, self, value)
  7321. def st_within(self, value):
  7322. db = self.db
  7323. return Query(db, db._adapter.ST_WITHIN, self, value)
  7324. # for use in both Query and sortby
  7325. class SQLCustomType(object):
  7326. """
  7327. allows defining of custom SQL types
  7328. Example::
  7329. decimal = SQLCustomType(
  7330. type ='double',
  7331. native ='integer',
  7332. encoder =(lambda x: int(float(x) * 100)),
  7333. decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) )
  7334. )
  7335. db.define_table(
  7336. 'example',
  7337. Field('value', type=decimal)
  7338. )
  7339. :param type: the web2py type (default = 'string')
  7340. :param native: the backend type
  7341. :param encoder: how to encode the value to store it in the backend
  7342. :param decoder: how to decode the value retrieved from the backend
  7343. :param validator: what validators to use ( default = None, will use the
  7344. default validator for type)
  7345. """
  7346. def __init__(
  7347. self,
  7348. type='string',
  7349. native=None,
  7350. encoder=None,
  7351. decoder=None,
  7352. validator=None,
  7353. _class=None,
  7354. ):
  7355. self.type = type
  7356. self.native = native
  7357. self.encoder = encoder or (lambda x: x)
  7358. self.decoder = decoder or (lambda x: x)
  7359. self.validator = validator
  7360. self._class = _class or type
  7361. def startswith(self, text=None):
  7362. try:
  7363. return self.type.startswith(self, text)
  7364. except TypeError:
  7365. return False
  7366. def __getslice__(self, a=0, b=100):
  7367. return None
  7368. def __getitem__(self, i):
  7369. return None
  7370. def __str__(self):
  7371. return self._class
  7372. class FieldVirtual(object):
  7373. def __init__(self, f):
  7374. self.f = f
  7375. class FieldLazy(object):
  7376. def __init__(self, f, handler=None):
  7377. self.f = f
  7378. self.handler = handler
  7379. def list_represent(x,r=None):
  7380. return ', '.join(str(y) for y in x or [])
  7381. class Field(Expression):
  7382. Virtual = FieldVirtual
  7383. Lazy = FieldLazy
  7384. """
  7385. an instance of this class represents a database field
  7386. example::
  7387. a = Field(name, 'string', length=32, default=None, required=False,
  7388. requires=IS_NOT_EMPTY(), ondelete='CASCADE',
  7389. notnull=False, unique=False,
  7390. uploadfield=True, widget=None, label=None, comment=None,
  7391. uploadfield=True, # True means store on disk,
  7392. # 'a_field_name' means store in this field in db
  7393. # False means file content will be discarded.
  7394. writable=True, readable=True, update=None, authorize=None,
  7395. autodelete=False, represent=None, uploadfolder=None,
  7396. uploadseparate=False # upload to separate directories by uuid_keys
  7397. # first 2 character and tablename.fieldname
  7398. # False - old behavior
  7399. # True - put uploaded file in
  7400. # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2]
  7401. # directory)
  7402. uploadfs=None # a pyfilesystem where to store upload
  7403. to be used as argument of DAL.define_table
  7404. allowed field types:
  7405. string, boolean, integer, double, text, blob,
  7406. date, time, datetime, upload, password
  7407. strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql)
  7408. fields should have a default or they will be required in SQLFORMs
  7409. the requires argument is used to validate the field input in SQLFORMs
  7410. """
  7411. def __init__(
  7412. self,
  7413. fieldname,
  7414. type='string',
  7415. length=None,
  7416. default=DEFAULT,
  7417. required=False,
  7418. requires=DEFAULT,
  7419. ondelete='CASCADE',
  7420. notnull=False,
  7421. unique=False,
  7422. uploadfield=True,
  7423. widget=None,
  7424. label=None,
  7425. comment=None,
  7426. writable=True,
  7427. readable=True,
  7428. update=None,
  7429. authorize=None,
  7430. autodelete=False,
  7431. represent=None,
  7432. uploadfolder=None,
  7433. uploadseparate=False,
  7434. uploadfs=None,
  7435. compute=None,
  7436. custom_store=None,
  7437. custom_retrieve=None,
  7438. custom_retrieve_file_properties=None,
  7439. custom_delete=None,
  7440. filter_in = None,
  7441. filter_out = None,
  7442. custom_qualifier = None,
  7443. ):
  7444. self._db = self.db = None # both for backward compatibility
  7445. self.op = None
  7446. self.first = None
  7447. self.second = None
  7448. self.name = fieldname = cleanup(fieldname)
  7449. if not isinstance(fieldname,str) or hasattr(Table,fieldname) or \
  7450. fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname):
  7451. raise SyntaxError, 'Field: invalid field name: %s' % fieldname
  7452. self.type = type if not isinstance(type, Table) else 'reference %s' % type
  7453. self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512)
  7454. self.default = default if default!=DEFAULT else (update or None)
  7455. self.required = required # is this field required
  7456. self.ondelete = ondelete.upper() # this is for reference fields only
  7457. self.notnull = notnull
  7458. self.unique = unique
  7459. self.uploadfield = uploadfield
  7460. self.uploadfolder = uploadfolder
  7461. self.uploadseparate = uploadseparate
  7462. self.uploadfs = uploadfs
  7463. self.widget = widget
  7464. self.comment = comment
  7465. self.writable = writable
  7466. self.readable = readable
  7467. self.update = update
  7468. self.authorize = authorize
  7469. self.autodelete = autodelete
  7470. self.represent = list_represent if \
  7471. represent==None and type in ('list:integer','list:string') else represent
  7472. self.compute = compute
  7473. self.isattachment = True
  7474. self.custom_store = custom_store
  7475. self.custom_retrieve = custom_retrieve
  7476. self.custom_retrieve_file_properties = custom_retrieve_file_properties
  7477. self.custom_delete = custom_delete
  7478. self.filter_in = filter_in
  7479. self.filter_out = filter_out
  7480. self.custom_qualifier = custom_qualifier
  7481. self.label = label if label!=None else fieldname.replace('_',' ').title()
  7482. self.requires = requires if requires!=None else []
  7483. def set_attributes(self,*args,**attributes):
  7484. self.__dict__.update(*args,**attributes)
  7485. def clone(self,point_self_references_to=False,**args):
  7486. field = copy.copy(self)
  7487. if point_self_references_to and \
  7488. field.type == 'reference %s'+field._tablename:
  7489. field.type = 'reference %s' % point_self_references_to
  7490. field.__dict__.update(args)
  7491. return field
  7492. def store(self, file, filename=None, path=None):
  7493. if self.custom_store:
  7494. return self.custom_store(file,filename,path)
  7495. if isinstance(file, cgi.FieldStorage):
  7496. file = file.file
  7497. filename = filename or file.filename
  7498. elif not filename:
  7499. filename = file.name
  7500. filename = os.path.basename(filename.replace('/', os.sep)\
  7501. .replace('\\', os.sep))
  7502. m = REGEX_STORE_PATTERN.search(filename)
  7503. extension = m and m.group('e') or 'txt'
  7504. uuid_key = web2py_uuid().replace('-', '')[-16:]
  7505. encoded_filename = base64.b16encode(filename).lower()
  7506. newfilename = '%s.%s.%s.%s' % \
  7507. (self._tablename, self.name, uuid_key, encoded_filename)
  7508. newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension
  7509. self_uploadfield = self.uploadfield
  7510. if isinstance(self_uploadfield,Field):
  7511. blob_uploadfield_name = self_uploadfield.uploadfield
  7512. keys={self_uploadfield.name: newfilename,
  7513. blob_uploadfield_name: file.read()}
  7514. self_uploadfield.table.insert(**keys)
  7515. elif self_uploadfield == True:
  7516. if path:
  7517. pass
  7518. elif self.uploadfolder:
  7519. path = self.uploadfolder
  7520. elif self.db._adapter.folder:
  7521. path = pjoin(self.db._adapter.folder, '..', 'uploads')
  7522. else:
  7523. raise RuntimeError,\
  7524. "you must specify a Field(...,uploadfolder=...)"
  7525. if self.uploadseparate:
  7526. if self.uploadfs:
  7527. raise RuntimeError, "not supported"
  7528. path = pjoin(path,"%s.%s" %(self._tablename, self.name),
  7529. uuid_key[:2])
  7530. if not exists(path):
  7531. os.makedirs(path)
  7532. pathfilename = pjoin(path, newfilename)
  7533. if self.uploadfs:
  7534. dest_file = self.uploadfs.open(newfilename, 'wb')
  7535. else:
  7536. dest_file = open(pathfilename, 'wb')
  7537. try:
  7538. shutil.copyfileobj(file, dest_file)
  7539. except IOError:
  7540. raise IOError, 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename
  7541. dest_file.close()
  7542. return newfilename
  7543. def retrieve(self, name, path=None):
  7544. self_uploadfield = self.uploadfield
  7545. if self.custom_retrieve:
  7546. return self.custom_retrieve(name, path)
  7547. import http
  7548. if self.authorize or isinstance(self_uploadfield, str):
  7549. row = self.db(self == name).select().first()
  7550. if not row:
  7551. raise http.HTTP(404)
  7552. if self.authorize and not self.authorize(row):
  7553. raise http.HTTP(403)
  7554. m = REGEX_UPLOAD_PATTERN.match(name)
  7555. if not m or not self.isattachment:
  7556. raise TypeError, 'Can\'t retrieve %s' % name
  7557. file_properties = self.retrieve_file_properties(name,path)
  7558. filename = file_properties['filename']
  7559. if isinstance(self_uploadfield, str): # ## if file is in DB
  7560. stream = cStringIO.StringIO(row[self_uploadfield] or '')
  7561. elif isinstance(self_uploadfield,Field):
  7562. blob_uploadfield_name = self_uploadfield.uploadfield
  7563. query = self_uploadfield == name
  7564. data = self_uploadfield.table(query)[blob_uploadfield_name]
  7565. stream = cStringIO.StringIO(data)
  7566. elif self.uploadfs:
  7567. # ## if file is on pyfilesystem
  7568. stream = self.uploadfs.open(name, 'rb')
  7569. else:
  7570. # ## if file is on regular filesystem
  7571. stream = open(pjoin(file_properties['path'], name), 'rb')
  7572. return (filename, stream)
  7573. def retrieve_file_properties(self, name, path=None):
  7574. self_uploadfield = self.uploadfield
  7575. if self.custom_retrieve_file_properties:
  7576. return self.custom_retrieve_file_properties(name, path)
  7577. try:
  7578. m = REGEX_UPLOAD_PATTERN.match(name)
  7579. if not m or not self.isattachment:
  7580. raise TypeError, 'Can\'t retrieve %s file properties' % name
  7581. filename = base64.b16decode(m.group('name'), True)
  7582. filename = REGEX_CLEANUP_FN.sub('_', filename)
  7583. except (TypeError, AttributeError):
  7584. filename = name
  7585. if isinstance(self_uploadfield, str): # ## if file is in DB
  7586. return dict(path=None,filename=filename)
  7587. elif isinstance(self_uploadfield,Field):
  7588. return dict(path=None,filename=filename)
  7589. else:
  7590. # ## if file is on filesystem
  7591. if path:
  7592. pass
  7593. elif self.uploadfolder:
  7594. path = self.uploadfolder
  7595. else:
  7596. path = pjoin(self.db._adapter.folder, '..', 'uploads')
  7597. if self.uploadseparate:
  7598. t = m.group('table')
  7599. f = m.group('field')
  7600. u = m.group('uuidkey')
  7601. path = pjoin(path,"%s.%s" % (t,f),u[:2])
  7602. return dict(path=path,filename=filename)
  7603. def formatter(self, value):
  7604. requires = self.requires
  7605. if value is None or not requires:
  7606. return value
  7607. if not isinstance(requires, (list, tuple)):
  7608. requires = [requires]
  7609. elif isinstance(requires, tuple):
  7610. requires = list(requires)
  7611. else:
  7612. requires = copy.copy(requires)
  7613. requires.reverse()
  7614. for item in requires:
  7615. if hasattr(item, 'formatter'):
  7616. value = item.formatter(value)
  7617. return value
  7618. def validate(self, value):
  7619. if not self.requires:
  7620. return (value, None)
  7621. requires = self.requires
  7622. if not isinstance(requires, (list, tuple)):
  7623. requires = [requires]
  7624. for validator in requires:
  7625. (value, error) = validator(value)
  7626. if error:
  7627. return (value, error)
  7628. return (value, None)
  7629. def count(self, distinct=None):
  7630. return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
  7631. def __nonzero__(self):
  7632. return True
  7633. def __str__(self):
  7634. try:
  7635. return '%s.%s' % (self.tablename, self.name)
  7636. except:
  7637. return '<no table>.%s' % self.name
  7638. class Query(object):
  7639. """
  7640. a query object necessary to define a set.
  7641. it can be stored or can be passed to DAL.__call__() to obtain a Set
  7642. Example::
  7643. query = db.users.name=='Max'
  7644. set = db(query)
  7645. records = set.select()
  7646. """
  7647. def __init__(
  7648. self,
  7649. db,
  7650. op,
  7651. first=None,
  7652. second=None,
  7653. ignore_common_filters = False,
  7654. ):
  7655. self.db = self._db = db
  7656. self.op = op
  7657. self.first = first
  7658. self.second = second
  7659. self.ignore_common_filters = ignore_common_filters
  7660. def __repr__(self):
  7661. return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
  7662. def __str__(self):
  7663. return self.db._adapter.expand(self)
  7664. def __and__(self, other):
  7665. return Query(self.db,self.db._adapter.AND,self,other)
  7666. def __or__(self, other):
  7667. return Query(self.db,self.db._adapter.OR,self,other)
  7668. def __invert__(self):
  7669. if self.op==self.db._adapter.NOT:
  7670. return self.first
  7671. return Query(self.db,self.db._adapter.NOT,self)
  7672. def case(self,t=1,f=0):
  7673. return self.db._adapter.CASE(self,t,f)
  7674. def xorify(orderby):
  7675. if not orderby:
  7676. return None
  7677. orderby2 = orderby[0]
  7678. for item in orderby[1:]:
  7679. orderby2 = orderby2 | item
  7680. return orderby2
  7681. def use_common_filters(query):
  7682. return (query and hasattr(query,'ignore_common_filters') and \
  7683. not query.ignore_common_filters)
  7684. class Set(object):
  7685. """
  7686. a Set represents a set of records in the database,
  7687. the records are identified by the query=Query(...) object.
  7688. normally the Set is generated by DAL.__call__(Query(...))
  7689. given a set, for example
  7690. set = db(db.users.name=='Max')
  7691. you can:
  7692. set.update(db.users.name='Massimo')
  7693. set.delete() # all elements in the set
  7694. set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10))
  7695. and take subsets:
  7696. subset = set(db.users.id<5)
  7697. """
  7698. def __init__(self, db, query, ignore_common_filters = None):
  7699. self.db = db
  7700. self._db = db # for backward compatibility
  7701. if not ignore_common_filters is None and \
  7702. use_common_filters(query) == ignore_common_filters:
  7703. query = copy.copy(query)
  7704. query.ignore_common_filters = ignore_common_filters
  7705. self.query = query
  7706. def __repr__(self):
  7707. return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
  7708. def __call__(self, query, ignore_common_filters=False):
  7709. if isinstance(query,Table):
  7710. query = self.db._adapter.id_query(query)
  7711. elif isinstance(query,str):
  7712. query = Expression(self.db,query)
  7713. elif isinstance(query,Field):
  7714. query = query!=None
  7715. if self.query:
  7716. return Set(self.db, self.query & query,
  7717. ignore_common_filters = ignore_common_filters)
  7718. else:
  7719. return Set(self.db, query,
  7720. ignore_common_filters = ignore_common_filters)
  7721. def _count(self,distinct=None):
  7722. return self.db._adapter._count(self.query,distinct)
  7723. def _select(self, *fields, **attributes):
  7724. adapter = self.db._adapter
  7725. tablenames = adapter.tables(self.query,
  7726. attributes.get('join',None),
  7727. attributes.get('left',None),
  7728. attributes.get('orderby',None),
  7729. attributes.get('groupby',None))
  7730. fields = adapter.expand_all(fields, tablenames)
  7731. return adapter._select(self.query,fields,attributes)
  7732. def _delete(self):
  7733. db = self.db
  7734. tablename = db._adapter.get_table(self.query)
  7735. return db._adapter._delete(tablename,self.query)
  7736. def _update(self, **update_fields):
  7737. db = self.db
  7738. tablename = db._adapter.get_table(self.query)
  7739. fields = db[tablename]._listify(update_fields,update=True)
  7740. return db._adapter._update(tablename,self.query,fields)
  7741. def isempty(self):
  7742. return not self.select(limitby=(0,1))
  7743. def count(self,distinct=None, cache=None):
  7744. db = self.db
  7745. if cache:
  7746. cache_model, time_expire = cache
  7747. sql = self._count(distinct=distinct)
  7748. key = db._uri + '/' + sql
  7749. if len(key)>200: key = hashlib.md5(key).hexdigest()
  7750. return cache_model(
  7751. key,
  7752. (lambda self=self,distinct=distinct: \
  7753. db._adapter.count(self.query,distinct)),
  7754. time_expire)
  7755. return db._adapter.count(self.query,distinct)
  7756. def select(self, *fields, **attributes):
  7757. adapter = self.db._adapter
  7758. tablenames = adapter.tables(self.query,
  7759. attributes.get('join',None),
  7760. attributes.get('left',None),
  7761. attributes.get('orderby',None),
  7762. attributes.get('groupby',None))
  7763. fields = adapter.expand_all(fields, tablenames)
  7764. return adapter.select(self.query,fields,attributes)
  7765. def nested_select(self,*fields,**attributes):
  7766. return Expression(self.db,self._select(*fields,**attributes))
  7767. def delete(self):
  7768. db = self.db
  7769. tablename = db._adapter.get_table(self.query)
  7770. table = db[tablename]
  7771. if any(f(self) for f in table._before_delete): return 0
  7772. ret = db._adapter.delete(tablename,self.query)
  7773. ret and [f(self) for f in table._after_delete]
  7774. return ret
  7775. def update(self, **update_fields):
  7776. db = self.db
  7777. tablename = db._adapter.get_table(self.query)
  7778. table = db[tablename]
  7779. table._attempt_upload(update_fields)
  7780. if any(f(self,update_fields) for f in table._before_update):
  7781. return 0
  7782. fields = table._listify(update_fields,update=True)
  7783. if not fields:
  7784. raise SyntaxError, "No fields to update"
  7785. ret = db._adapter.update(tablename,self.query,fields)
  7786. ret and [f(self,update_fields) for f in table._after_update]
  7787. return ret
  7788. def update_naive(self, **update_fields):
  7789. """
  7790. same as update but does not call table._before_update and _after_update
  7791. """
  7792. tablename = self.db._adapter.get_table(self.query)
  7793. table = self.db[tablename]
  7794. fields = table._listify(update_fields,update=True)
  7795. if not fields: raise SyntaxError, "No fields to update"
  7796. ret = self.db._adapter.update(tablename,self.query,fields)
  7797. return ret
  7798. def validate_and_update(self, **update_fields):
  7799. tablename = self.db._adapter.get_table(self.query)
  7800. response = Row()
  7801. response.errors = Row()
  7802. new_fields = copy.copy(update_fields)
  7803. for key,value in update_fields.iteritems():
  7804. value,error = self.db[tablename][key].validate(value)
  7805. if error:
  7806. response.errors[key] = error
  7807. else:
  7808. new_fields[key] = value
  7809. table = self.db[tablename]
  7810. if response.errors:
  7811. response.updated = None
  7812. else:
  7813. if not any(f(self,new_fields) for f in table._before_update):
  7814. fields = table._listify(new_fields,update=True)
  7815. if not fields: raise SyntaxError, "No fields to update"
  7816. ret = self.db._adapter.update(tablename,self.query,fields)
  7817. ret and [f(self,new_fields) for f in table._after_update]
  7818. else:
  7819. ret = 0
  7820. response.update = ret
  7821. return response
  7822. def delete_uploaded_files(self, upload_fields=None):
  7823. table = self.db[self.db._adapter.tables(self.query)[0]]
  7824. # ## mind uploadfield==True means file is not in DB
  7825. if upload_fields:
  7826. fields = upload_fields.keys()
  7827. else:
  7828. fields = table.fields
  7829. fields = [f for f in fields if table[f].type == 'upload'
  7830. and table[f].uploadfield == True
  7831. and table[f].autodelete]
  7832. if not fields:
  7833. return False
  7834. for record in self.select(*[table[f] for f in fields]):
  7835. for fieldname in fields:
  7836. field = table[fieldname]
  7837. oldname = record.get(fieldname, None)
  7838. if not oldname:
  7839. continue
  7840. if upload_fields and oldname == upload_fields[fieldname]:
  7841. continue
  7842. if field.custom_delete:
  7843. field.custom_delete(oldname)
  7844. else:
  7845. uploadfolder = field.uploadfolder
  7846. if not uploadfolder:
  7847. uploadfolder = pjoin(
  7848. self.db._adapter.folder, '..', 'uploads')
  7849. if field.uploadseparate:
  7850. items = oldname.split('.')
  7851. uploadfolder = pjoin(
  7852. uploadfolder,
  7853. "%s.%s" % (items[0], items[1]),
  7854. items[2][:2])
  7855. oldpath = pjoin(uploadfolder, oldname)
  7856. if exists(oldpath):
  7857. os.unlink(oldpath)
  7858. return False
  7859. class RecordUpdater(object):
  7860. def __init__(self, colset, table, id):
  7861. self.colset, self.db, self.tablename, self.id = \
  7862. colset, table._db, table._tablename, id
  7863. def __call__(self, **fields):
  7864. colset, db, tablename, id = self.colset, self.db, self.tablename, self.id
  7865. table = db[tablename]
  7866. newfields = fields or dict(colset)
  7867. for fieldname in newfields.keys():
  7868. if not fieldname in table.fields or table[fieldname].type=='id':
  7869. del newfields[fieldname]
  7870. table._db(table._id==id,ignore_common_filters=True).update(**newfields)
  7871. colset.update(newfields)
  7872. return colset
  7873. class RecordDeleter(object):
  7874. def __init__(self, table, id):
  7875. self.db, self.tablename, self.id = table._db, table._tablename, id
  7876. def __call__(self):
  7877. return self.db(self.db[self.tablename]._id==self.id).delete()
  7878. class LazySet(object):
  7879. def __init__(self, field, id):
  7880. self.db, self.tablename, self.fieldname, self.id = \
  7881. field.db, field._tablename, field.name, id
  7882. def _getset(self):
  7883. query = self.db[self.tablename][self.fieldname]==self.id
  7884. return Set(self.db,query)
  7885. def __repr__(self):
  7886. return repr(self._getset())
  7887. def __call__(self, query, ignore_common_filters=False):
  7888. return self._getset()(query, ignore_common_filters)
  7889. def _count(self,distinct=None):
  7890. return self._getset()._count(distinct)
  7891. def _select(self, *fields, **attributes):
  7892. return self._getset()._select(*field,**attributes)
  7893. def _delete(self):
  7894. return self._getset()._delete()
  7895. def _update(self, **update_fields):
  7896. return self._getset()._update(**update_fields)
  7897. def isempty(self):
  7898. return self._getset().isempty()
  7899. def count(self,distinct=None, cache=None):
  7900. return self._getset().count(distinct,cache)
  7901. def select(self, *fields, **attributes):
  7902. return self._getset().select(*fields,**attributes)
  7903. def nested_select(self,*fields,**attributes):
  7904. return self._getset().nested_select(*fields,**attributes)
  7905. def delete(self):
  7906. return self._getset().delete()
  7907. def update(self, **update_fields):
  7908. return self._getset().update(**update_fields)
  7909. def update_naive(self, **update_fields):
  7910. return self._getset().update_naive(**update_fields)
  7911. def validate_and_update(self, **update_fields):
  7912. return self._getset().validate_and_update(**update_fields)
  7913. def delete_uploaded_files(self, upload_fields=None):
  7914. return self._getset().delete_uploaded_files(upload_fields)
  7915. class VirtualCommand(object):
  7916. def __init__(self,method,row):
  7917. self.method=method
  7918. self.row=row
  7919. def __call__(self,*args,**kwargs):
  7920. return self.method(self.row,*args,**kwargs)
  7921. def lazy_virtualfield(f):
  7922. f.__lazy__ = True
  7923. return f
  7924. class Rows(object):
  7925. """
  7926. A wrapper for the return value of a select. It basically represents a table.
  7927. It has an iterator and each row is represented as a dictionary.
  7928. """
  7929. # ## TODO: this class still needs some work to care for ID/OID
  7930. def __init__(
  7931. self,
  7932. db=None,
  7933. records=[],
  7934. colnames=[],
  7935. compact=True,
  7936. rawrows=None
  7937. ):
  7938. self.db = db
  7939. self.records = records
  7940. self.colnames = colnames
  7941. self.compact = compact
  7942. self.response = rawrows
  7943. def __repr__(self):
  7944. return '<Rows (%s)>' % len(self.records)
  7945. def setvirtualfields(self,**keyed_virtualfields):
  7946. """
  7947. db.define_table('x',Field('number','integer'))
  7948. if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)]
  7949. from gluon.dal import lazy_virtualfield
  7950. class MyVirtualFields(object):
  7951. # normal virtual field (backward compatible, discouraged)
  7952. def normal_shift(self): return self.x.number+1
  7953. # lazy virtual field (because of @staticmethod)
  7954. @lazy_virtualfield
  7955. def lazy_shift(instance,row,delta=4): return row.x.number+delta
  7956. db.x.virtualfields.append(MyVirtualFields())
  7957. for row in db(db.x).select():
  7958. print row.number, row.normal_shift, row.lazy_shift(delta=7)
  7959. """
  7960. if not keyed_virtualfields:
  7961. return self
  7962. for row in self.records:
  7963. for (tablename,virtualfields) in keyed_virtualfields.iteritems():
  7964. attributes = dir(virtualfields)
  7965. if not tablename in row:
  7966. box = row[tablename] = Row()
  7967. else:
  7968. box = row[tablename]
  7969. updated = False
  7970. for attribute in attributes:
  7971. if attribute[0] != '_':
  7972. method = getattr(virtualfields,attribute)
  7973. if hasattr(method,'__lazy__'):
  7974. box[attribute]=VirtualCommand(method,row)
  7975. elif type(method)==types.MethodType:
  7976. if not updated:
  7977. virtualfields.__dict__.update(row)
  7978. updated = True
  7979. box[attribute]=method()
  7980. return self
  7981. def __and__(self,other):
  7982. if self.colnames!=other.colnames: raise Exception, 'Cannot & incompatible Rows objects'
  7983. records = self.records+other.records
  7984. return Rows(self.db,records,self.colnames)
  7985. def __or__(self,other):
  7986. if self.colnames!=other.colnames: raise Exception, 'Cannot | incompatible Rows objects'
  7987. records = self.records
  7988. records += [record for record in other.records \
  7989. if not record in records]
  7990. return Rows(self.db,records,self.colnames)
  7991. def __nonzero__(self):
  7992. if len(self.records):
  7993. return 1
  7994. return 0
  7995. def __len__(self):
  7996. return len(self.records)
  7997. def __getslice__(self, a, b):
  7998. return Rows(self.db,self.records[a:b],self.colnames)
  7999. def __getitem__(self, i):
  8000. row = self.records[i]
  8001. keys = row.keys()
  8002. if self.compact and len(keys) == 1 and keys[0] != '_extra':
  8003. return row[row.keys()[0]]
  8004. return row
  8005. def __iter__(self):
  8006. """
  8007. iterator over records
  8008. """
  8009. for i in xrange(len(self)):
  8010. yield self[i]
  8011. def __str__(self):
  8012. """
  8013. serializes the table into a csv file
  8014. """
  8015. s = cStringIO.StringIO()
  8016. self.export_to_csv_file(s)
  8017. return s.getvalue()
  8018. def first(self):
  8019. if not self.records:
  8020. return None
  8021. return self[0]
  8022. def last(self):
  8023. if not self.records:
  8024. return None
  8025. return self[-1]
  8026. def find(self,f,limitby=None):
  8027. """
  8028. returns a new Rows object, a subset of the original object,
  8029. filtered by the function f
  8030. """
  8031. if not self:
  8032. return Rows(self.db, [], self.colnames)
  8033. records = []
  8034. if limitby:
  8035. a,b = limitby
  8036. else:
  8037. a,b = 0,len(self)
  8038. k = 0
  8039. for row in self:
  8040. if f(row):
  8041. if a<=k: records.append(row)
  8042. k += 1
  8043. if k==b: break
  8044. return Rows(self.db, records, self.colnames)
  8045. def exclude(self, f):
  8046. """
  8047. removes elements from the calling Rows object, filtered by the function f,
  8048. and returns a new Rows object containing the removed elements
  8049. """
  8050. if not self.records:
  8051. return Rows(self.db, [], self.colnames)
  8052. removed = []
  8053. i=0
  8054. while i<len(self):
  8055. row = self[i]
  8056. if f(row):
  8057. removed.append(self.records[i])
  8058. del self.records[i]
  8059. else:
  8060. i += 1
  8061. return Rows(self.db, removed, self.colnames)
  8062. def sort(self, f, reverse=False):
  8063. """
  8064. returns a list of sorted elements (not sorted in place)
  8065. """
  8066. rows = Rows(self.db,[],self.colnames,compact=False)
  8067. rows.records = sorted(self,key=f,reverse=reverse)
  8068. return rows
  8069. def group_by_value(self, field):
  8070. """
  8071. regroups the rows, by one of the fields
  8072. """
  8073. if not self.records:
  8074. return {}
  8075. key = str(field)
  8076. grouped_row_group = dict()
  8077. for row in self:
  8078. value = row[key]
  8079. if not value in grouped_row_group:
  8080. grouped_row_group[value] = [row]
  8081. else:
  8082. grouped_row_group[value].append(row)
  8083. return grouped_row_group
  8084. def as_list(self,
  8085. compact=True,
  8086. storage_to_dict=True,
  8087. datetime_to_str=True,
  8088. custom_types=None):
  8089. """
  8090. returns the data as a list or dictionary.
  8091. :param storage_to_dict: when True returns a dict, otherwise a list(default True)
  8092. :param datetime_to_str: convert datetime fields as strings (default True)
  8093. """
  8094. (oc, self.compact) = (self.compact, compact)
  8095. if storage_to_dict:
  8096. items = [item.as_dict(datetime_to_str, custom_types) for item in self]
  8097. else:
  8098. items = [item for item in self]
  8099. self.compact = compact
  8100. return items
  8101. def as_dict(self,
  8102. key='id',
  8103. compact=True,
  8104. storage_to_dict=True,
  8105. datetime_to_str=True,
  8106. custom_types=None):
  8107. """
  8108. returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False)
  8109. :param key: the name of the field to be used as dict key, normally the id
  8110. :param compact: ? (default True)
  8111. :param storage_to_dict: when True returns a dict, otherwise a list(default True)
  8112. :param datetime_to_str: convert datetime fields as strings (default True)
  8113. """
  8114. rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types)
  8115. if isinstance(key,str) and key.count('.')==1:
  8116. (table, field) = key.split('.')
  8117. return dict([(r[table][field],r) for r in rows])
  8118. elif isinstance(key,str):
  8119. return dict([(r[key],r) for r in rows])
  8120. else:
  8121. return dict([(key(r),r) for r in rows])
  8122. def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
  8123. """
  8124. export data to csv, the first line contains the column names
  8125. :param ofile: where the csv must be exported to
  8126. :param null: how null values must be represented (default '<NULL>')
  8127. :param delimiter: delimiter to separate values (default ',')
  8128. :param quotechar: character to use to quote string values (default '"')
  8129. :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL)
  8130. :param represent: use the fields .represent value (default False)
  8131. :param colnames: list of column names to use (default self.colnames)
  8132. This will only work when exporting rows objects!!!!
  8133. DO NOT use this with db.export_to_csv()
  8134. """
  8135. delimiter = kwargs.get('delimiter', ',')
  8136. quotechar = kwargs.get('quotechar', '"')
  8137. quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL)
  8138. represent = kwargs.get('represent', False)
  8139. writer = csv.writer(ofile, delimiter=delimiter,
  8140. quotechar=quotechar, quoting=quoting)
  8141. colnames = kwargs.get('colnames', self.colnames)
  8142. write_colnames = kwargs.get('write_colnames',True)
  8143. # a proper csv starting with the column names
  8144. if write_colnames:
  8145. writer.writerow(colnames)
  8146. def none_exception(value):
  8147. """
  8148. returns a cleaned up value that can be used for csv export:
  8149. - unicode text is encoded as such
  8150. - None values are replaced with the given representation (default <NULL>)
  8151. """
  8152. if value is None:
  8153. return null
  8154. elif isinstance(value, unicode):
  8155. return value.encode('utf8')
  8156. elif isinstance(value,Reference):
  8157. return int(value)
  8158. elif hasattr(value, 'isoformat'):
  8159. return value.isoformat()[:19].replace('T', ' ')
  8160. elif isinstance(value, (list,tuple)): # for type='list:..'
  8161. return bar_encode(value)
  8162. return value
  8163. for record in self:
  8164. row = []
  8165. for col in colnames:
  8166. if not REGEX_TABLE_DOT_FIELD.match(col):
  8167. row.append(record._extra[col])
  8168. else:
  8169. (t, f) = col.split('.')
  8170. field = self.db[t][f]
  8171. if isinstance(record.get(t, None), (Row,dict)):
  8172. value = record[t][f]
  8173. else:
  8174. value = record[f]
  8175. if field.type=='blob' and not value is None:
  8176. value = base64.b64encode(value)
  8177. elif represent and field.represent:
  8178. value = field.represent(value)
  8179. row.append(none_exception(value))
  8180. writer.writerow(row)
  8181. def xml(self,strict=False,row_name='row',rows_name='rows'):
  8182. """
  8183. serializes the table using sqlhtml.SQLTABLE (if present)
  8184. """
  8185. if strict:
  8186. ncols = len(self.colnames)
  8187. def f(row,field,indent=' '):
  8188. if isinstance(row,Row):
  8189. spc = indent+' \n'
  8190. items = [f(row[x],x,indent+' ') for x in row]
  8191. return '%s<%s>\n%s\n%s</%s>' % (
  8192. indent,
  8193. field,
  8194. spc.join(item for item in items if item),
  8195. indent,
  8196. field)
  8197. elif not callable(row):
  8198. if REGEX_ALPHANUMERIC.match(field):
  8199. return '%s<%s>%s</%s>' % (indent,field,row,field)
  8200. else:
  8201. return '%s<extra name="%s">%s</extra>' % \
  8202. (indent,field,row)
  8203. else:
  8204. return None
  8205. return '<%s>\n%s\n</%s>' % (
  8206. rows_name,
  8207. '\n'.join(f(row,row_name) for row in self),
  8208. rows_name)
  8209. import sqlhtml
  8210. return sqlhtml.SQLTABLE(self).xml()
  8211. def json(self, mode='object', default=None):
  8212. """
  8213. serializes the table to a JSON list of objects
  8214. """
  8215. mode = mode.lower()
  8216. if not mode in ['object', 'array']:
  8217. raise SyntaxError, 'Invalid JSON serialization mode: %s' % mode
  8218. def inner_loop(record, col):
  8219. (t, f) = col.split('.')
  8220. res = None
  8221. if not REGEX_TABLE_DOT_FIELD.match(col):
  8222. key = col
  8223. res = record._extra[col]
  8224. else:
  8225. key = f
  8226. if isinstance(record.get(t, None), Row):
  8227. res = record[t][f]
  8228. else:
  8229. res = record[f]
  8230. if mode == 'object':
  8231. return (key, res)
  8232. else:
  8233. return res
  8234. if mode == 'object':
  8235. items = [dict([inner_loop(record, col) for col in
  8236. self.colnames]) for record in self]
  8237. else:
  8238. items = [[inner_loop(record, col) for col in self.colnames]
  8239. for record in self]
  8240. if have_serializers:
  8241. return serializers.json(items,default=default or serializers.custom_json)
  8242. else:
  8243. try:
  8244. import json as simplejson
  8245. except ImportError:
  8246. import gluon.contrib.simplejson as simplejson
  8247. return simplejson.dumps(items)
  8248. ################################################################################
  8249. # dummy function used to define some doctests
  8250. ################################################################################
  8251. def test_all():
  8252. """
  8253. >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\")
  8254. >>> if len(sys.argv)>1: db = DAL(sys.argv[1])
  8255. >>> tmp = db.define_table('users',\
  8256. Field('stringf', 'string', length=32, required=True),\
  8257. Field('booleanf', 'boolean', default=False),\
  8258. Field('passwordf', 'password', notnull=True),\
  8259. Field('uploadf', 'upload'),\
  8260. Field('blobf', 'blob'),\
  8261. Field('integerf', 'integer', unique=True),\
  8262. Field('doublef', 'double', unique=True,notnull=True),\
  8263. Field('datef', 'date', default=datetime.date.today()),\
  8264. Field('timef', 'time'),\
  8265. Field('datetimef', 'datetime'),\
  8266. migrate='test_user.table')
  8267. Insert a field
  8268. >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\
  8269. uploadf=None, integerf=5, doublef=3.14,\
  8270. datef=datetime.date(2001, 1, 1),\
  8271. timef=datetime.time(12, 30, 15),\
  8272. datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15))
  8273. 1
  8274. Drop the table
  8275. >>> db.users.drop()
  8276. Examples of insert, select, update, delete
  8277. >>> tmp = db.define_table('person',\
  8278. Field('name'),\
  8279. Field('birth','date'),\
  8280. migrate='test_person.table')
  8281. >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22')
  8282. >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21')
  8283. commented len(db().select(db.person.ALL))
  8284. commented 2
  8285. >>> me = db(db.person.id==person_id).select()[0] # test select
  8286. >>> me.name
  8287. 'Massimo'
  8288. >>> db.person[2].name
  8289. 'Massimo'
  8290. >>> db.person(2).name
  8291. 'Massimo'
  8292. >>> db.person(name='Massimo').name
  8293. 'Massimo'
  8294. >>> db.person(db.person.name=='Massimo').name
  8295. 'Massimo'
  8296. >>> row = db.person[2]
  8297. >>> row.name == row['name'] == row['person.name'] == row('person.name')
  8298. True
  8299. >>> db(db.person.name=='Massimo').update(name='massimo') # test update
  8300. 1
  8301. >>> db(db.person.name=='Marco').select().first().delete_record() # test delete
  8302. 1
  8303. Update a single record
  8304. >>> me.update_record(name=\"Max\")
  8305. <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}>
  8306. >>> me.name
  8307. 'Max'
  8308. Examples of complex search conditions
  8309. >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select())
  8310. 1
  8311. >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select())
  8312. 1
  8313. >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select())
  8314. 1
  8315. >>> me = db(db.person.id==person_id).select(db.person.name)[0]
  8316. >>> me.name
  8317. 'Max'
  8318. Examples of search conditions using extract from date/datetime/time
  8319. >>> len(db(db.person.birth.month()==12).select())
  8320. 1
  8321. >>> len(db(db.person.birth.year()>1900).select())
  8322. 1
  8323. Example of usage of NULL
  8324. >>> len(db(db.person.birth==None).select()) ### test NULL
  8325. 0
  8326. >>> len(db(db.person.birth!=None).select()) ### test NULL
  8327. 1
  8328. Examples of search conditions using lower, upper, and like
  8329. >>> len(db(db.person.name.upper()=='MAX').select())
  8330. 1
  8331. >>> len(db(db.person.name.like('%ax')).select())
  8332. 1
  8333. >>> len(db(db.person.name.upper().like('%AX')).select())
  8334. 1
  8335. >>> len(db(~db.person.name.upper().like('%AX')).select())
  8336. 0
  8337. orderby, groupby and limitby
  8338. >>> people = db().select(db.person.name, orderby=db.person.name)
  8339. >>> order = db.person.name|~db.person.birth
  8340. >>> people = db().select(db.person.name, orderby=order)
  8341. >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name)
  8342. >>> people = db().select(db.person.name, orderby=order, limitby=(0,100))
  8343. Example of one 2 many relation
  8344. >>> tmp = db.define_table('dog',\
  8345. Field('name'),\
  8346. Field('birth','date'),\
  8347. Field('owner',db.person),\
  8348. migrate='test_dog.table')
  8349. >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id)
  8350. 1
  8351. A simple JOIN
  8352. >>> len(db(db.dog.owner==db.person.id).select())
  8353. 1
  8354. >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id)))
  8355. 1
  8356. Drop tables
  8357. >>> db.dog.drop()
  8358. >>> db.person.drop()
  8359. Example of many 2 many relation and Set
  8360. >>> tmp = db.define_table('author', Field('name'),\
  8361. migrate='test_author.table')
  8362. >>> tmp = db.define_table('paper', Field('title'),\
  8363. migrate='test_paper.table')
  8364. >>> tmp = db.define_table('authorship',\
  8365. Field('author_id', db.author),\
  8366. Field('paper_id', db.paper),\
  8367. migrate='test_authorship.table')
  8368. >>> aid = db.author.insert(name='Massimo')
  8369. >>> pid = db.paper.insert(title='QCD')
  8370. >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid)
  8371. Define a Set
  8372. >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id))
  8373. >>> rows = authored_papers.select(db.author.name, db.paper.title)
  8374. >>> for row in rows: print row.author.name, row.paper.title
  8375. Massimo QCD
  8376. Example of search condition using belongs
  8377. >>> set = (1, 2, 3)
  8378. >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL)
  8379. >>> print rows[0].title
  8380. QCD
  8381. Example of search condition using nested select
  8382. >>> nested_select = db()._select(db.authorship.paper_id)
  8383. >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL)
  8384. >>> print rows[0].title
  8385. QCD
  8386. Example of expressions
  8387. >>> mynumber = db.define_table('mynumber', Field('x', 'integer'))
  8388. >>> db(mynumber).delete()
  8389. 0
  8390. >>> for i in range(10): tmp = mynumber.insert(x=i)
  8391. >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum())
  8392. 45
  8393. >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2)
  8394. 5
  8395. Output in csv
  8396. >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip()
  8397. author.name,paper.title\r
  8398. Massimo,QCD
  8399. Delete all leftover tables
  8400. >>> DAL.distributed_transaction_commit(db)
  8401. >>> db.mynumber.drop()
  8402. >>> db.authorship.drop()
  8403. >>> db.author.drop()
  8404. >>> db.paper.drop()
  8405. """
  8406. ################################################################################
  8407. # deprecated since the new DAL; here only for backward compatibility
  8408. ################################################################################
  8409. SQLField = Field
  8410. SQLTable = Table
  8411. SQLXorable = Expression
  8412. SQLQuery = Query
  8413. SQLSet = Set
  8414. SQLRows = Rows
  8415. SQLStorage = Row
  8416. SQLDB = DAL
  8417. GQLDB = DAL
  8418. DAL.Field = Field # was necessary in gluon/globals.py session.connect
  8419. DAL.Table = Table # was necessary in gluon/globals.py session.connect
  8420. ################################################################################
  8421. # run tests
  8422. ################################################################################
  8423. if __name__ == '__main__':
  8424. import doctest
  8425. doctest.testmod()