/gluon/dal.py
Python | 10770 lines | 10501 code | 101 blank | 168 comment | 129 complexity | 844dc509b81445288502e2e1825430d5 MD5 | raw file
Possible License(s): BSD-2-Clause, MIT, BSD-3-Clause
Large files files are truncated, but you can click here to view the full file
- #!/bin/env python
- # -*- coding: utf-8 -*-
- """
- This file is part of the web2py Web Framework
- Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
- License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
- Thanks to
- * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support
- * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support
- * Denes
- * Chris Clark
- * clach05
- * Denes Lengyel
- * and many others who have contributed to current and previous versions
- This file contains the DAL support for many relational databases,
- including:
- - SQLite & SpatiaLite
- - MySQL
- - Postgres
- - Firebird
- - Oracle
- - MS SQL
- - DB2
- - Interbase
- - Ingres
- - Informix (9+ and SE)
- - SapDB (experimental)
- - Cubrid (experimental)
- - CouchDB (experimental)
- - MongoDB (in progress)
- - Google:nosql
- - Google:sql
- - Teradata
- - IMAP (experimental)
- Example of usage:
- >>> # from dal import DAL, Field
- ### create DAL connection (and create DB if it doesn't exist)
- >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
- ... folder=None)
- ### define a table 'person' (create/alter as necessary)
- >>> person = db.define_table('person',Field('name','string'))
- ### insert a record
- >>> id = person.insert(name='James')
- ### retrieve it by id
- >>> james = person(id)
- ### retrieve it by name
- >>> james = person(name='James')
- ### retrieve it by arbitrary query
- >>> query = (person.name=='James') & (person.name.startswith('J'))
- >>> james = db(query).select(person.ALL)[0]
- ### update one record
- >>> james.update_record(name='Jim')
- <Row {'id': 1, 'name': 'Jim'}>
- ### update multiple records by query
- >>> db(person.name.like('J%')).update(name='James')
- 1
- ### delete records by query
- >>> db(person.name.lower() == 'jim').delete()
- 0
- ### retrieve multiple records (rows)
- >>> people = db(person).select(orderby=person.name,
- ... groupby=person.name, limitby=(0,100))
- ### further filter them
- >>> james = people.find(lambda row: row.name == 'James').first()
- >>> print james.id, james.name
- 1 James
- ### check aggregates
- >>> counter = person.id.count()
- >>> print db(person).select(counter).first()(counter)
- 1
- ### delete one record
- >>> james.delete_record()
- 1
- ### delete (drop) entire database table
- >>> person.drop()
- Supported field types:
- id string text boolean integer double decimal password upload
- blob time date datetime
- Supported DAL URI strings:
- 'sqlite://test.db'
- 'spatialite://test.db'
- 'sqlite:memory'
- 'spatialite:memory'
- 'jdbc:sqlite://test.db'
- 'mysql://root:none@localhost/test'
- 'postgres://mdipierro:password@localhost/test'
- 'postgres:psycopg2://mdipierro:password@localhost/test'
- 'postgres:pg8000://mdipierro:password@localhost/test'
- 'jdbc:postgres://mdipierro:none@localhost/test'
- 'mssql://web2py:none@A64X2/web2py_test'
- 'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
- 'oracle://username:password@database'
- 'firebird://user:password@server:3050/database'
- 'db2://DSN=dsn;UID=user;PWD=pass'
- 'firebird://username:password@hostname/database'
- 'firebird_embedded://username:password@c://path'
- 'informix://user:password@server:3050/database'
- 'informixu://user:password@server:3050/database' # unicode informix
- 'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
- 'google:datastore' # for google app engine datastore
- 'google:sql' # for google app engine with sql (mysql compatible)
- 'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
- 'imap://user:password@server:port' # experimental
- 'mongodb://user:password@server:port/database' # experimental
- For more info:
- help(DAL)
- help(Field)
- """
- ###################################################################################
- # this file only exposes DAL and Field
- ###################################################################################
- __all__ = ['DAL', 'Field']
- DEFAULTLENGTH = {'string':512,
- 'password':512,
- 'upload':512,
- 'text':2**15,
- 'blob':2**31}
- TIMINGSSIZE = 100
- SPATIALLIBS = {
- 'Windows':'libspatialite',
- 'Linux':'libspatialite.so',
- 'Darwin':'libspatialite.dylib'
- }
- DEFAULT_URI = 'sqlite://dummy.db'
- import re
- import sys
- import locale
- import os
- import types
- import datetime
- import threading
- import time
- import csv
- import cgi
- import copy
- import socket
- import logging
- import base64
- import shutil
- import marshal
- import decimal
- import struct
- import urllib
- import hashlib
- import uuid
- import glob
- import traceback
- import platform
- PYTHON_VERSION = sys.version_info[0]
- if PYTHON_VERSION == 2:
- import cPickle as pickle
- import cStringIO as StringIO
- import copy_reg as copyreg
- hashlib_md5 = hashlib.md5
- bytes, unicode = str, unicode
- else:
- import pickle
- from io import StringIO as StringIO
- import copyreg
- long = int
- hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8'))
- bytes, unicode = bytes, str
- CALLABLETYPES = (types.LambdaType, types.FunctionType,
- types.BuiltinFunctionType,
- types.MethodType, types.BuiltinMethodType)
- TABLE_ARGS = set(
- ('migrate','primarykey','fake_migrate','format','redefine',
- 'singular','plural','trigger_name','sequence_name',
- 'common_filter','polymodel','table_class','on_define','actual_name'))
- SELECT_ARGS = set(
- ('orderby', 'groupby', 'limitby','required', 'cache', 'left',
- 'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby'))
- ogetattr = object.__getattribute__
- osetattr = object.__setattr__
- exists = os.path.exists
- pjoin = os.path.join
- ###################################################################################
- # following checks allow the use of dal without web2py, as a standalone module
- ###################################################################################
- try:
- from utils import web2py_uuid
- except (ImportError, SystemError):
- import uuid
- def web2py_uuid(): return str(uuid.uuid4())
- try:
- import portalocker
- have_portalocker = True
- except ImportError:
- have_portalocker = False
- try:
- import serializers
- have_serializers = True
- except ImportError:
- have_serializers = False
- try:
- import json as simplejson
- except ImportError:
- try:
- import gluon.contrib.simplejson as simplejson
- except ImportError:
- simplejson = None
- try:
- import validators
- have_validators = True
- except (ImportError, SyntaxError):
- have_validators = False
- LOGGER = logging.getLogger("web2py.dal")
- DEFAULT = lambda:0
- GLOBAL_LOCKER = threading.RLock()
- THREAD_LOCAL = threading.local()
- # internal representation of tables with field
- # <table>.<field>, tables and fields may only be [a-zA-Z0-9_]
- REGEX_TYPE = re.compile('^([\w\_\:]+)')
- REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*')
- REGEX_W = re.compile('^\w+$')
- REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$')
- REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$')
- REGEX_CLEANUP_FN = re.compile('[\'"\s;]+')
- REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)')
- REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$')
- REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)")
- REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')')
- REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$')
- REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$')
- REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$')
- REGEX_QUOTES = re.compile("'[^']*'")
- REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$')
- REGEX_PASSWORD = re.compile('\://([^:@]*)\:')
- REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)'
- # list of drivers will be built on the fly
- # and lists only what is available
- DRIVERS = []
- try:
- from new import classobj
- from google.appengine.ext import db as gae
- from google.appengine.api import namespace_manager, rdbms
- from google.appengine.api.datastore_types import Key ### for belongs on ID
- from google.appengine.ext.db.polymodel import PolyModel
- DRIVERS.append('google')
- except ImportError:
- pass
- if not 'google' in DRIVERS:
- try:
- from pysqlite2 import dbapi2 as sqlite2
- DRIVERS.append('SQLite(sqlite2)')
- except ImportError:
- LOGGER.debug('no SQLite drivers pysqlite2.dbapi2')
- try:
- from sqlite3 import dbapi2 as sqlite3
- DRIVERS.append('SQLite(sqlite3)')
- except ImportError:
- LOGGER.debug('no SQLite drivers sqlite3')
- try:
- # first try contrib driver, then from site-packages (if installed)
- try:
- import contrib.pymysql as pymysql
- # monkeypatch pymysql because they havent fixed the bug:
- # https://github.com/petehunt/PyMySQL/issues/86
- pymysql.ESCAPE_REGEX = re.compile("'")
- pymysql.ESCAPE_MAP = {"'": "''"}
- # end monkeypatch
- except ImportError:
- import pymysql
- DRIVERS.append('MySQL(pymysql)')
- except ImportError:
- LOGGER.debug('no MySQL driver pymysql')
- try:
- import MySQLdb
- DRIVERS.append('MySQL(MySQLdb)')
- except ImportError:
- LOGGER.debug('no MySQL driver MySQLDB')
- try:
- import psycopg2
- from psycopg2.extensions import adapt as psycopg2_adapt
- DRIVERS.append('PostgreSQL(psycopg2)')
- except ImportError:
- LOGGER.debug('no PostgreSQL driver psycopg2')
- try:
- # first try contrib driver, then from site-packages (if installed)
- try:
- import contrib.pg8000.dbapi as pg8000
- except ImportError:
- import pg8000.dbapi as pg8000
- DRIVERS.append('PostgreSQL(pg8000)')
- except ImportError:
- LOGGER.debug('no PostgreSQL driver pg8000')
- try:
- import cx_Oracle
- DRIVERS.append('Oracle(cx_Oracle)')
- except ImportError:
- LOGGER.debug('no Oracle driver cx_Oracle')
- try:
- try:
- import pyodbc
- except ImportError:
- try:
- import contrib.pypyodbc as pyodbc
- except Exception, e:
- raise ImportError(str(e))
- DRIVERS.append('MSSQL(pyodbc)')
- DRIVERS.append('DB2(pyodbc)')
- DRIVERS.append('Teradata(pyodbc)')
- DRIVERS.append('Ingres(pyodbc)')
- except ImportError:
- LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc')
- try:
- import Sybase
- DRIVERS.append('Sybase(Sybase)')
- except ImportError:
- LOGGER.debug('no Sybase driver')
- try:
- import kinterbasdb
- DRIVERS.append('Interbase(kinterbasdb)')
- DRIVERS.append('Firebird(kinterbasdb)')
- except ImportError:
- LOGGER.debug('no Firebird/Interbase driver kinterbasdb')
- try:
- import fdb
- DRIVERS.append('Firebird(fdb)')
- except ImportError:
- LOGGER.debug('no Firebird driver fdb')
- #####
- try:
- import firebirdsql
- DRIVERS.append('Firebird(firebirdsql)')
- except ImportError:
- LOGGER.debug('no Firebird driver firebirdsql')
- try:
- import informixdb
- DRIVERS.append('Informix(informixdb)')
- LOGGER.warning('Informix support is experimental')
- except ImportError:
- LOGGER.debug('no Informix driver informixdb')
- try:
- import sapdb
- DRIVERS.append('SQL(sapdb)')
- LOGGER.warning('SAPDB support is experimental')
- except ImportError:
- LOGGER.debug('no SAP driver sapdb')
- try:
- import cubriddb
- DRIVERS.append('Cubrid(cubriddb)')
- LOGGER.warning('Cubrid support is experimental')
- except ImportError:
- LOGGER.debug('no Cubrid driver cubriddb')
- try:
- from com.ziclix.python.sql import zxJDBC
- import java.sql
- # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/
- from org.sqlite import JDBC # required by java.sql; ensure we have it
- zxJDBC_sqlite = java.sql.DriverManager
- DRIVERS.append('PostgreSQL(zxJDBC)')
- DRIVERS.append('SQLite(zxJDBC)')
- LOGGER.warning('zxJDBC support is experimental')
- is_jdbc = True
- except ImportError:
- LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC')
- is_jdbc = False
- try:
- import couchdb
- DRIVERS.append('CouchDB(couchdb)')
- except ImportError:
- LOGGER.debug('no Couchdb driver couchdb')
- try:
- import pymongo
- DRIVERS.append('MongoDB(pymongo)')
- except:
- LOGGER.debug('no MongoDB driver pymongo')
- try:
- import imaplib
- DRIVERS.append('IMAP(imaplib)')
- except:
- LOGGER.debug('no IMAP driver imaplib')
- PLURALIZE_RULES = [
- (re.compile('child$'), re.compile('child$'), 'children'),
- (re.compile('oot$'), re.compile('oot$'), 'eet'),
- (re.compile('ooth$'), re.compile('ooth$'), 'eeth'),
- (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'),
- (re.compile('sis$'), re.compile('sis$'), 'ses'),
- (re.compile('man$'), re.compile('man$'), 'men'),
- (re.compile('ife$'), re.compile('ife$'), 'ives'),
- (re.compile('eau$'), re.compile('eau$'), 'eaux'),
- (re.compile('lf$'), re.compile('lf$'), 'lves'),
- (re.compile('[sxz]$'), re.compile('$'), 'es'),
- (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'),
- (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'),
- (re.compile('$'), re.compile('$'), 's'),
- ]
- def pluralize(singular, rules=PLURALIZE_RULES):
- for line in rules:
- re_search, re_sub, replace = line
- plural = re_search.search(singular) and re_sub.sub(replace, singular)
- if plural: return plural
- def hide_password(uri):
- if isinstance(uri,(list,tuple)):
- return [hide_password(item) for item in uri]
- return REGEX_NOPASSWD.sub('******',uri)
- def OR(a,b):
- return a|b
- def AND(a,b):
- return a&b
- def IDENTITY(x): return x
- def varquote_aux(name,quotestr='%s'):
- return name if REGEX_W.match(name) else quotestr % name
- def quote_keyword(a,keyword='timestamp'):
- regex = re.compile('\.keyword(?=\w)')
- a = regex.sub('."%s"' % keyword,a)
- return a
- if 'google' in DRIVERS:
- is_jdbc = False
- class GAEDecimalProperty(gae.Property):
- """
- GAE decimal implementation
- """
- data_type = decimal.Decimal
- def __init__(self, precision, scale, **kwargs):
- super(GAEDecimalProperty, self).__init__(self, **kwargs)
- d = '1.'
- for x in range(scale):
- d += '0'
- self.round = decimal.Decimal(d)
- def get_value_for_datastore(self, model_instance):
- value = super(GAEDecimalProperty, self)\
- .get_value_for_datastore(model_instance)
- if value is None or value == '':
- return None
- else:
- return str(value)
- def make_value_from_datastore(self, value):
- if value is None or value == '':
- return None
- else:
- return decimal.Decimal(value).quantize(self.round)
- def validate(self, value):
- value = super(GAEDecimalProperty, self).validate(value)
- if value is None or isinstance(value, decimal.Decimal):
- return value
- elif isinstance(value, basestring):
- return decimal.Decimal(value)
- raise gae.BadValueError("Property %s must be a Decimal or string."\
- % self.name)
- ###################################################################################
- # class that handles connection pooling (all adapters are derived from this one)
- ###################################################################################
- class ConnectionPool(object):
- POOLS = {}
- check_active_connection = True
- @staticmethod
- def set_folder(folder):
- THREAD_LOCAL.folder = folder
- # ## this allows gluon to commit/rollback all dbs in this thread
- def close(self,action='commit',really=True):
- if action:
- if callable(action):
- action(self)
- else:
- getattr(self, action)()
- # ## if you want pools, recycle this connection
- if self.pool_size:
- GLOBAL_LOCKER.acquire()
- pool = ConnectionPool.POOLS[self.uri]
- if len(pool) < self.pool_size:
- pool.append(self.connection)
- really = False
- GLOBAL_LOCKER.release()
- if really:
- self.close_connection()
- self.connection = None
- @staticmethod
- def close_all_instances(action):
- """ to close cleanly databases in a multithreaded environment """
- dbs = getattr(THREAD_LOCAL,'db_instances',{}).items()
- for db_uid, db_group in dbs:
- for db in db_group:
- if hasattr(db,'_adapter'):
- db._adapter.close(action)
- getattr(THREAD_LOCAL,'db_instances',{}).clear()
- getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear()
- if callable(action):
- action(None)
- return
- def find_or_make_work_folder(self):
- """ this actually does not make the folder. it has to be there """
- self.folder = getattr(THREAD_LOCAL,'folder','')
- if (os.path.isabs(self.folder) and
- isinstance(self, UseDatabaseStoredFile) and
- self.folder.startswith(os.getcwd())):
- self.folder = os.path.relpath(self.folder, os.getcwd())
- # Creating the folder if it does not exist
- if False and self.folder and not exists(self.folder):
- os.mkdir(self.folder)
- def after_connection_hook(self):
- """hook for the after_connection parameter"""
- if callable(self._after_connection):
- self._after_connection(self)
- self.after_connection()
- def after_connection(self):
- """ this it is supposed to be overloaded by adapters"""
- pass
- def reconnect(self, f=None, cursor=True):
- """
- this function defines: self.connection and self.cursor
- (iff cursor is True)
- if self.pool_size>0 it will try pull the connection from the pool
- if the connection is not active (closed by db server) it will loop
- if not self.pool_size or no active connections in pool makes a new one
- """
- if getattr(self,'connection', None) != None:
- return
- if f is None:
- f = self.connector
- # if not hasattr(self, "driver") or self.driver is None:
- # LOGGER.debug("Skipping connection since there's no driver")
- # return
- if not self.pool_size:
- self.connection = f()
- self.cursor = cursor and self.connection.cursor()
- else:
- uri = self.uri
- POOLS = ConnectionPool.POOLS
- while True:
- GLOBAL_LOCKER.acquire()
- if not uri in POOLS:
- POOLS[uri] = []
- if POOLS[uri]:
- self.connection = POOLS[uri].pop()
- GLOBAL_LOCKER.release()
- self.cursor = cursor and self.connection.cursor()
- try:
- if self.cursor and self.check_active_connection:
- self.execute('SELECT 1;')
- break
- except:
- pass
- else:
- GLOBAL_LOCKER.release()
- self.connection = f()
- self.cursor = cursor and self.connection.cursor()
- break
- self.after_connection_hook()
- ###################################################################################
- # this is a generic adapter that does nothing; all others are derived from this one
- ###################################################################################
- class BaseAdapter(ConnectionPool):
- native_json = False
- driver = None
- driver_name = None
- drivers = () # list of drivers from which to pick
- connection = None
- commit_on_alter_table = False
- support_distributed_transaction = False
- uploads_in_blob = False
- can_select_for_update = True
- dbpath = None
- folder = None
- TRUE = 'T'
- FALSE = 'F'
- T_SEP = ' '
- QUOTE_TEMPLATE = '"%s"'
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'CHAR(%(length)s)',
- 'text': 'TEXT',
- 'json': 'TEXT',
- 'password': 'CHAR(%(length)s)',
- 'blob': 'BLOB',
- 'upload': 'CHAR(%(length)s)',
- 'integer': 'INTEGER',
- 'bigint': 'INTEGER',
- 'float':'DOUBLE',
- 'double': 'DOUBLE',
- 'decimal': 'DOUBLE',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'TIMESTAMP',
- 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
- 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'TEXT',
- 'list:string': 'TEXT',
- 'list:reference': 'TEXT',
- # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference'
- 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT',
- 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- }
- def isOperationalError(self,exception):
- if not hasattr(self.driver, "OperationalError"):
- return None
- return isinstance(exception, self.driver.OperationalError)
- def id_query(self, table):
- return table._id != None
- def adapt(self, obj):
- return "'%s'" % obj.replace("'", "''")
- def smart_adapt(self, obj):
- if isinstance(obj,(int,float)):
- return str(obj)
- return self.adapt(str(obj))
- def file_exists(self, filename):
- """
- to be used ONLY for files that on GAE may not be on filesystem
- """
- return exists(filename)
- def file_open(self, filename, mode='rb', lock=True):
- """
- to be used ONLY for files that on GAE may not be on filesystem
- """
- if have_portalocker and lock:
- fileobj = portalocker.LockedFile(filename,mode)
- else:
- fileobj = open(filename,mode)
- return fileobj
- def file_close(self, fileobj):
- """
- to be used ONLY for files that on GAE may not be on filesystem
- """
- if fileobj:
- fileobj.close()
- def file_delete(self, filename):
- os.unlink(filename)
- def find_driver(self,adapter_args,uri=None):
- self.adapter_args = adapter_args
- if getattr(self,'driver',None) != None:
- return
- drivers_available = [driver for driver in self.drivers
- if driver in globals()]
- if uri:
- items = uri.split('://',1)[0].split(':')
- request_driver = items[1] if len(items)>1 else None
- else:
- request_driver = None
- request_driver = request_driver or adapter_args.get('driver')
- if request_driver:
- if request_driver in drivers_available:
- self.driver_name = request_driver
- self.driver = globals().get(request_driver)
- else:
- raise RuntimeError("driver %s not available" % request_driver)
- elif drivers_available:
- self.driver_name = drivers_available[0]
- self.driver = globals().get(self.driver_name)
- else:
- raise RuntimeError("no driver available %s" % str(self.drivers))
- def log(self, message, table=None):
- """ Logs migrations
- It will not log changes if logfile is not specified. Defaults
- to sql.log
- """
- isabs = None
- logfilename = self.adapter_args.get('logfile','sql.log')
- writelog = bool(logfilename)
- if writelog:
- isabs = os.path.isabs(logfilename)
- if table and table._dbt and writelog and self.folder:
- if isabs:
- table._loggername = logfilename
- else:
- table._loggername = pjoin(self.folder, logfilename)
- logfile = self.file_open(table._loggername, 'a')
- logfile.write(message)
- self.file_close(logfile)
- def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8',
- credential_decoder=IDENTITY, driver_args={},
- adapter_args={},do_connect=True, after_connection=None):
- self.db = db
- self.dbengine = "None"
- self.uri = uri
- self.pool_size = pool_size
- self.folder = folder
- self.db_codec = db_codec
- self._after_connection = after_connection
- class Dummy(object):
- lastrowid = 1
- def __getattr__(self, value):
- return lambda *a, **b: []
- self.connection = Dummy()
- self.cursor = Dummy()
- def sequence_name(self,tablename):
- return '%s_sequence' % tablename
- def trigger_name(self,tablename):
- return '%s_sequence' % tablename
- def varquote(self,name):
- return name
- def create_table(self, table,
- migrate=True,
- fake_migrate=False,
- polymodel=None):
- db = table._db
- fields = []
- # PostGIS geo fields are added after the table has been created
- postcreation_fields = []
- sql_fields = {}
- sql_fields_aux = {}
- TFK = {}
- tablename = table._tablename
- sortable = 0
- types = self.types
- for field in table:
- sortable += 1
- field_name = field.name
- field_type = field.type
- if isinstance(field_type,SQLCustomType):
- ftype = field_type.native or field_type.type
- elif field_type.startswith('reference'):
- referenced = field_type[10:].strip()
- if referenced == '.':
- referenced = tablename
- constraint_name = self.constraint_name(tablename, field_name)
- if not '.' in referenced \
- and referenced != tablename \
- and hasattr(table,'_primarykey'):
- ftype = types['integer']
- else:
- if hasattr(table,'_primarykey'):
- rtablename,rfieldname = referenced.split('.')
- rtable = db[rtablename]
- rfield = rtable[rfieldname]
- # must be PK reference or unique
- if rfieldname in rtable._primarykey or \
- rfield.unique:
- ftype = types[rfield.type[:9]] % \
- dict(length=rfield.length)
- # multicolumn primary key reference?
- if not rfield.unique and len(rtable._primarykey)>1:
- # then it has to be a table level FK
- if rtablename not in TFK:
- TFK[rtablename] = {}
- TFK[rtablename][rfieldname] = field_name
- else:
- ftype = ftype + \
- types['reference FK'] % dict(
- constraint_name = constraint_name, # should be quoted
- foreign_key = '%s (%s)' % (rtablename,
- rfieldname),
- table_name = tablename,
- field_name = field_name,
- on_delete_action=field.ondelete)
- else:
- # make a guess here for circular references
- if referenced in db:
- id_fieldname = db[referenced]._id.name
- elif referenced == tablename:
- id_fieldname = table._id.name
- else: #make a guess
- id_fieldname = 'id'
- ftype = types[field_type[:9]] % dict(
- index_name = field_name+'__idx',
- field_name = field_name,
- constraint_name = constraint_name,
- foreign_key = '%s (%s)' % (referenced,
- id_fieldname),
- on_delete_action=field.ondelete)
- elif field_type.startswith('list:reference'):
- ftype = types[field_type[:14]]
- elif field_type.startswith('decimal'):
- precision, scale = map(int,field_type[8:-1].split(','))
- ftype = types[field_type[:7]] % \
- dict(precision=precision,scale=scale)
- elif field_type.startswith('geo'):
- if not hasattr(self,'srid'):
- raise RuntimeError('Adapter does not support geometry')
- srid = self.srid
- geotype, parms = field_type[:-1].split('(')
- if not geotype in types:
- raise SyntaxError(
- 'Field: unknown field type: %s for %s' \
- % (field_type, field_name))
- ftype = types[geotype]
- if self.dbengine == 'postgres' and geotype == 'geometry':
- # parameters: schema, srid, dimension
- dimension = 2 # GIS.dimension ???
- parms = parms.split(',')
- if len(parms) == 3:
- schema, srid, dimension = parms
- elif len(parms) == 2:
- schema, srid = parms
- else:
- schema = parms[0]
- ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype]
- ftype = ftype % dict(schema=schema,
- tablename=tablename,
- fieldname=field_name, srid=srid,
- dimension=dimension)
- postcreation_fields.append(ftype)
- elif not field_type in types:
- raise SyntaxError('Field: unknown field type: %s for %s' % \
- (field_type, field_name))
- else:
- ftype = types[field_type]\
- % dict(length=field.length)
- if not field_type.startswith('id') and \
- not field_type.startswith('reference'):
- if field.notnull:
- ftype += ' NOT NULL'
- else:
- ftype += self.ALLOW_NULL()
- if field.unique:
- ftype += ' UNIQUE'
- if field.custom_qualifier:
- ftype += ' %s' % field.custom_qualifier
- # add to list of fields
- sql_fields[field_name] = dict(
- length=field.length,
- unique=field.unique,
- notnull=field.notnull,
- sortable=sortable,
- type=str(field_type),
- sql=ftype)
- if field.notnull and not field.default is None:
- # Caveat: sql_fields and sql_fields_aux
- # differ for default values.
- # sql_fields is used to trigger migrations and sql_fields_aux
- # is used for create tables.
- # The reason is that we do not want to trigger
- # a migration simply because a default value changes.
- not_null = self.NOT_NULL(field.default, field_type)
- ftype = ftype.replace('NOT NULL', not_null)
- sql_fields_aux[field_name] = dict(sql=ftype)
- # Postgres - PostGIS:
- # geometry fields are added after the table has been created, not now
- if not (self.dbengine == 'postgres' and \
- field_type.startswith('geom')):
- fields.append('%s %s' % (field_name, ftype))
- other = ';'
- # backend-specific extensions to fields
- if self.dbengine == 'mysql':
- if not hasattr(table, "_primarykey"):
- fields.append('PRIMARY KEY(%s)' % table._id.name)
- other = ' ENGINE=InnoDB CHARACTER SET utf8;'
- fields = ',\n '.join(fields)
- for rtablename in TFK:
- rfields = TFK[rtablename]
- pkeys = db[rtablename]._primarykey
- fkeys = [ rfields[k] for k in pkeys ]
- fields = fields + ',\n ' + \
- types['reference TFK'] % dict(
- table_name = tablename,
- field_name=', '.join(fkeys),
- foreign_table = rtablename,
- foreign_key = ', '.join(pkeys),
- on_delete_action = field.ondelete)
- if getattr(table,'_primarykey',None):
- query = "CREATE TABLE %s(\n %s,\n %s) %s" % \
- (tablename, fields,
- self.PRIMARY_KEY(', '.join(table._primarykey)),other)
- else:
- query = "CREATE TABLE %s(\n %s\n)%s" % \
- (tablename, fields, other)
- if self.uri.startswith('sqlite:///') \
- or self.uri.startswith('spatialite:///'):
- path_encoding = sys.getfilesystemencoding() \
- or locale.getdefaultlocale()[1] or 'utf8'
- dbpath = self.uri[9:self.uri.rfind('/')]\
- .decode('utf8').encode(path_encoding)
- else:
- dbpath = self.folder
- if not migrate:
- return query
- elif self.uri.startswith('sqlite:memory')\
- or self.uri.startswith('spatialite:memory'):
- table._dbt = None
- elif isinstance(migrate, str):
- table._dbt = pjoin(dbpath, migrate)
- else:
- table._dbt = pjoin(
- dbpath, '%s_%s.table' % (table._db._uri_hash, tablename))
- if not table._dbt or not self.file_exists(table._dbt):
- if table._dbt:
- self.log('timestamp: %s\n%s\n'
- % (datetime.datetime.today().isoformat(),
- query), table)
- if not fake_migrate:
- self.create_sequence_and_triggers(query,table)
- table._db.commit()
- # Postgres geom fields are added now,
- # after the table has been created
- for query in postcreation_fields:
- self.execute(query)
- table._db.commit()
- if table._dbt:
- tfile = self.file_open(table._dbt, 'w')
- pickle.dump(sql_fields, tfile)
- self.file_close(tfile)
- if fake_migrate:
- self.log('faked!\n', table)
- else:
- self.log('success!\n', table)
- else:
- tfile = self.file_open(table._dbt, 'r')
- try:
- sql_fields_old = pickle.load(tfile)
- except EOFError:
- self.file_close(tfile)
- raise RuntimeError('File %s appears corrupted' % table._dbt)
- self.file_close(tfile)
- if sql_fields != sql_fields_old:
- self.migrate_table(table,
- sql_fields, sql_fields_old,
- sql_fields_aux, None,
- fake_migrate=fake_migrate)
- return query
- def migrate_table(
- self,
- table,
- sql_fields,
- sql_fields_old,
- sql_fields_aux,
- logfile,
- fake_migrate=False,
- ):
- # logfile is deprecated (moved to adapter.log method)
- db = table._db
- db._migrated.append(table._tablename)
- tablename = table._tablename
- def fix(item):
- k,v=item
- if not isinstance(v,dict):
- v=dict(type='unknown',sql=v)
- return k.lower(),v
- # make sure all field names are lower case to avoid
- # migrations because of case cahnge
- sql_fields = dict(map(fix,sql_fields.iteritems()))
- sql_fields_old = dict(map(fix,sql_fields_old.iteritems()))
- sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems()))
- if db._debug:
- logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields))
- keys = sql_fields.keys()
- for key in sql_fields_old:
- if not key in keys:
- keys.append(key)
- new_add = self.concat_add(tablename)
- metadata_change = False
- sql_fields_current = copy.copy(sql_fields_old)
- for key in keys:
- query = None
- if not key in sql_fields_old:
- sql_fields_current[key] = sql_fields[key]
- if self.dbengine in ('postgres',) and \
- sql_fields[key]['type'].startswith('geometry'):
- # 'sql' == ftype in sql
- query = [ sql_fields[key]['sql'] ]
- else:
- query = ['ALTER TABLE %s ADD %s %s;' % \
- (tablename, key,
- sql_fields_aux[key]['sql'].replace(', ', new_add))]
- metadata_change = True
- elif self.dbengine in ('sqlite', 'spatialite'):
- if key in sql_fields:
- sql_fields_current[key] = sql_fields[key]
- metadata_change = True
- elif not key in sql_fields:
- del sql_fields_current[key]
- ftype = sql_fields_old[key]['type']
- if self.dbengine in ('postgres',) and ftype.startswith('geometry'):
- geotype, parms = ftype[:-1].split('(')
- schema = parms.split(',')[0]
- query = [ "SELECT DropGeometryColumn ('%(schema)s', '%(table)s', '%(field)s');" %
- dict(schema=schema, table=tablename, field=key,) ]
- elif self.dbengine in ('firebird',):
- query = ['ALTER TABLE %s DROP %s;' % (tablename, key)]
- else:
- query = ['ALTER TABLE %s DROP COLUMN %s;'
- % (tablename, key)]
- metadata_change = True
- elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \
- and not (key in table.fields and
- isinstance(table[key].type, SQLCustomType)) \
- and not sql_fields[key]['type'].startswith('reference')\
- and not sql_fields[key]['type'].startswith('double')\
- and not sql_fields[key]['type'].startswith('id'):
- sql_fields_current[key] = sql_fields[key]
- t = tablename
- tt = sql_fields_aux[key]['sql'].replace(', ', new_add)
- if self.dbengine in ('firebird',):
- drop_expr = 'ALTER TABLE %s DROP %s;'
- else:
- drop_expr = 'ALTER TABLE %s DROP COLUMN %s;'
- key_tmp = key + '__tmp'
- query = ['ALTER TABLE %s ADD %s %s;' % (t, key_tmp, tt),
- 'UPDATE %s SET %s=%s;' % (t, key_tmp, key),
- drop_expr % (t, key),
- 'ALTER TABLE %s ADD %s %s;' % (t, key, tt),
- 'UPDATE %s SET %s=%s;' % (t, key, key_tmp),
- drop_expr % (t, key_tmp)]
- metadata_change = True
- elif sql_fields[key]['type'] != sql_fields_old[key]['type']:
- sql_fields_current[key] = sql_fields[key]
- metadata_change = True
- if query:
- self.log('timestamp: %s\n'
- % datetime.datetime.today().isoformat(), table)
- db['_lastsql'] = '\n'.join(query)
- for sub_query in query:
- self.log(sub_query + '\n', table)
- if fake_migrate:
- if db._adapter.commit_on_alter_table:
- self.save_dbt(table,sql_fields_current)
- self.log('faked!\n', table)
- else:
- self.execute(sub_query)
- # Caveat: mysql, oracle and firebird do not allow multiple alter table
- # in one transaction so we must commit partial transactions and
- # update table._dbt after alter table.
- if db._adapter.commit_on_alter_table:
- db.commit()
- self.save_dbt(table,sql_fields_current)
- self.log('success!\n', table)
- elif metadata_change:
- self.save_dbt(table,sql_fields_current)
- if metadata_change and not (query and db._adapter.commit_on_alter_table):
- db.commit()
- self.save_dbt(table,sql_fields_current)
- self.log('success!\n', table)
- def save_dbt(self,table, sql_fields_current):
- tfile = self.file_open(table._dbt, 'w')
- pickle.dump(sql_fields_current, tfile)
- self.file_close(tfile)
- def LOWER(self, first):
- return 'LOWER(%s)' % self.expand(first)
- def UPPER(self, first):
- return 'UPPER(%s)' % self.expand(first)
- def COUNT(self, first, distinct=None):
- return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \
- % self.expand(first)
- def EXTRACT(self, first, what):
- return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
- def EPOCH(self, first):
- return self.EXTRACT(first, 'epoch')
- def LENGTH(self, first):
- return "LENGTH(%s)" % self.expand(first)
- def AGGREGATE(self, first, what):
- return "%s(%s)" % (what, self.expand(first))
- def JOIN(self):
- return 'JOIN'
- def LEFT_JOIN(self):
- return 'LEFT JOIN'
- def RANDOM(self):
- return 'Random()'
- def NOT_NULL(self, default, field_type):
- return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
- def COALESCE(self, first, second):
- expressions = [self.expand(first)]+[self.expand(e) for e in second]
- return 'COALESCE(%s)' % ','.join(expressions)
- def COALESCE_ZERO(self, first):
- return 'COALESCE(%s,0)' % self.expand(first)
- def RAW(self, first):
- return first
- def ALLOW_NULL(self):
- return ''
- def SUBSTRING(self, field, parameters):
- return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
- def PRIMARY_KEY(self, key):
- return 'PRIMARY KEY(%s)' % key
- def _drop(self, table, mode):
- return ['DROP TABLE %s;' % table]
- def drop(self, table, mode=''):
- db = table._db
- queries = self._drop(table, mode)
- for query in queries:
- if table._dbt:
- self.log(query + '\n', table)
- self.execute(query)
- db.commit()
- del db[table._tablename]
- del db.tables[db.tables.index(table._tablename)]
- db._remove_references_to(table)
- if table._dbt:
- self.file_delete(table._dbt)
- self.log('success!\n', table)
- def _insert(self, table, fields):
- if fields:
- keys = ','.join(f.name for f, v in fields)
- values = ','.join(self.expand(v, f.type) for f, v in fields)
- return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values)
- else:
- return self._insert_empty(table)
- def _insert_empty(self, table):
- return 'INSERT INTO %s DEFAULT VALUES;' % table
- def insert(self, table, fields):
- query = self._insert(table,fields)
- try:
- self.execute(query)
- except Exception:
- e = sys.exc_info()[1]
- if hasattr(table,'_on_insert_error'):
- return table._on_insert_error(table,fields,e)
- raise e
- if hasattr(table,'_primarykey'):
- return dict([(k[0].name, k[1]) for k in fields \
- if k[0].name in table._primarykey])
- id = self.lastrowid(table)
- if not isinstance(id,int):
- return id
- rid = Reference(id)
- (rid._table, rid._record) = (table, None)
- return rid
- def bulk_insert(self, table, items):
- return [self.insert(table,item) for item in items]
- def NOT(self, first):
- return '(NOT %s)' % self.expand(first)
- def AND(self, first, second):
- return '(%s AND %s)' % (self.expand(first), self.expand(second))
- def OR(self, first, second):
- return '(%s OR %s)' % (self.expand(first), self.expand(second))
- def BELONGS(self, first, second):
- if isinstance(second, str):
- return '(%s IN (%s))' % (self.expand(first), second[:-1])
- elif not second:
- return '(1=0)'
- items = ','.join(self.expand(item, first.type) for item in second)
- return '(%s IN (%s))' % (self.expand(first), items)
- def REGEXP(self, first, second):
- "regular expression operator"
- raise NotImplementedError
- def LIKE(self, first, second):
- "case sensitive like operator"
- raise NotImplementedError
- def ILIKE(self, first, second):
- "case in-sensitive like operator"
- return '(%s LIKE %s)' % (self.expand(first),
- self.expand(second, 'string'))
- def STARTSWITH(self, first, second):
- return '(%s LIKE %s)' % (self.expand(first),
- self.expand(second+'%', 'string'))
- def ENDSWITH(self, first, second):
- return '(%s LIKE %s)' % (self.expand(first),
- self.expand('%'+second, 'string'))
- def CONTAINS(self,first,second,case_sensitive=False):
- if first.type in ('string','text', 'json'):
- if isinstance(second,Expression):
- second = Expression(None,self.CONCAT('%',Expression(
- None,self.REPLACE(second,('%','%%'))),'%'))
- else:
- second = '%'+str(second).replace('%','%%')+'%'
- elif first.type.startswith('list:'):
- if isinstance(second,Expression):
- second = Expression(None,self.CONCAT(
- '%|',Expression(None,self.REPLACE(
- Expression(None,self.REPLACE(
- second,('%','%%'))),('|','||'))),'|%'))
- else:
- second = '%|'+str(second).replace('%','%%')\
- .replace('|','||')+'|%'
- op = case_sensitive and self.LIKE or self.ILIKE
- return op(first,second)
- def EQ(self, first, second=None):
- if second is None:
- return '(%s IS NULL)' % self.expand(first)
- return '(%s = %s)' % (self.expand(first),
- self.expand(second, first.type))
- def NE(self, first, second=None):
- if second is None:
- return '(%s IS NOT NULL)' % self.expand(first)
- return '(%s <> %s)' % (self.expand(first),
- self.expand(second, first.type))
- def LT(self,first,second=None):
- if second is None:
- raise RuntimeError("Cannot compare %s < None" % first)
- return '(%s < %s)' % (self.expand(first),
- self.expand(second,first.type))
- def LE(self,first,second=None):
- if second is None:
- raise RuntimeError("Cannot compare %s <= None" % first)
- return '(%s <= %s)' % (self.expand(first),
- self.expand(second,first.type))
- def GT(self,first,second=None):
- if second is None:
- raise RuntimeError("Cannot compare %s > None" % first)
- return '(%s > %s)' % (self.expand(first),
- self.expand(second,first.type))
- def GE(self,first,second=None):
- if second is None:
- raise RuntimeError("Cannot compare %s >= None" % first)
- return '(%s >= %s)' % (self.expand(first),
- self.expand(second,first.type))
- def is_numerical_type(self, ftype):
- return ftype in ('integer','boolean','double','bigint') or \
- ftype.startswith('decimal')
- def REPLACE(self, first, (second, third)):
- return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'),
- self.expand(second,'string'),
- self.expand(third,'string'))
- def CONCAT(self, *items):
- return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
- def ADD(self, first, second):
- if self.is_numerical_type(first.type):
- return '(%s + %s)' % (self.expand(first),
- self.expand(second, first.type))
- else:
- return self.CONCAT(first, second)
- def SUB(self, first, second):
- return '(%s - %s)' % (self.expand(first),
- self.expand(second, first.type))
- def MUL(self, first, second):
- return '(%s * %s)' % (self.expand(first),
- self.expand(second, first.type))
- def DIV(self, first, second):
- return '(%s / %s)' % (self.expand(first),
- self.expand(second, first.type))
- def MOD(self, first, second):
- return '(%s %% %s)' % (self.expand(first),
- self.expand(second, first.type))
- def AS(self, first,…
Large files files are truncated, but you can click here to view the full file