/sql.c
C | 2371 lines | 1564 code | 209 blank | 598 comment | 259 complexity | a85152c03064c2719a3442134b11326e MD5 | raw file
Possible License(s): GPL-2.0
Large files files are truncated, but you can click here to view the full file
1/* 2 * $Id$ 3 * 4 * Copyright (c) 2002-2003, Darren Hiebert 5 * 6 * This source code is released for free distribution under the terms of the 7 * GNU General Public License. 8 * 9 * This module contains functions for generating tags for PL/SQL language 10 * files. 11 */ 12 13/* 14 * INCLUDE FILES 15 */ 16#include "general.h" /* must always come first */ 17 18#include <ctype.h> /* to define isalpha () */ 19#include <setjmp.h> 20#ifdef DEBUG 21#include <stdio.h> 22#endif 23 24#include "debug.h" 25#include "entry.h" 26#include "keyword.h" 27#include "parse.h" 28#include "read.h" 29#include "routines.h" 30#include "vstring.h" 31 32/* 33 * On-line "Oracle Database PL/SQL Language Reference": 34 * http://download.oracle.com/docs/cd/B28359_01/appdev.111/b28370/toc.htm 35 * 36 * Sample PL/SQL code is available from: 37 * http://www.orafaq.com/faqscrpt.htm#GENPLSQL 38 * 39 * On-line SQL Anywhere Documentation 40 * http://www.ianywhere.com/developer/product_manuals/sqlanywhere/index.html 41 */ 42 43/* 44 * MACROS 45 */ 46#define isType(token,t) (boolean) ((token)->type == (t)) 47#define isKeyword(token,k) (boolean) ((token)->keyword == (k)) 48 49/* 50 * DATA DECLARATIONS 51 */ 52 53typedef enum eException { ExceptionNone, ExceptionEOF } exception_t; 54 55/* 56 * Used to specify type of keyword. 57 */ 58typedef enum eKeywordId { 59 KEYWORD_NONE = -1, 60 KEYWORD_is, 61 KEYWORD_begin, 62 KEYWORD_body, 63 KEYWORD_cursor, 64 KEYWORD_declare, 65 KEYWORD_end, 66 KEYWORD_function, 67 KEYWORD_if, 68 KEYWORD_else, 69 KEYWORD_elseif, 70 KEYWORD_endif, 71 KEYWORD_loop, 72 KEYWORD_while, 73 KEYWORD_case, 74 KEYWORD_for, 75 KEYWORD_do, 76 KEYWORD_call, 77 KEYWORD_package, 78 KEYWORD_pragma, 79 KEYWORD_procedure, 80 KEYWORD_record, 81 KEYWORD_object, 82 KEYWORD_ref, 83 KEYWORD_rem, 84 KEYWORD_return, 85 KEYWORD_returns, 86 KEYWORD_subtype, 87 KEYWORD_table, 88 KEYWORD_trigger, 89 KEYWORD_type, 90 KEYWORD_index, 91 KEYWORD_event, 92 KEYWORD_publication, 93 KEYWORD_service, 94 KEYWORD_domain, 95 KEYWORD_datatype, 96 KEYWORD_result, 97 KEYWORD_url, 98 KEYWORD_internal, 99 KEYWORD_external, 100 KEYWORD_when, 101 KEYWORD_then, 102 KEYWORD_variable, 103 KEYWORD_exception, 104 KEYWORD_at, 105 KEYWORD_on, 106 KEYWORD_primary, 107 KEYWORD_references, 108 KEYWORD_unique, 109 KEYWORD_check, 110 KEYWORD_constraint, 111 KEYWORD_foreign, 112 KEYWORD_ml_table, 113 KEYWORD_ml_table_lang, 114 KEYWORD_ml_table_dnet, 115 KEYWORD_ml_table_java, 116 KEYWORD_ml_table_chk, 117 KEYWORD_ml_conn, 118 KEYWORD_ml_conn_lang, 119 KEYWORD_ml_conn_dnet, 120 KEYWORD_ml_conn_java, 121 KEYWORD_ml_conn_chk, 122 KEYWORD_ml_prop, 123 KEYWORD_local, 124 KEYWORD_temporary, 125 KEYWORD_drop, 126 KEYWORD_view, 127 KEYWORD_synonym, 128 KEYWORD_handler, 129 KEYWORD_comment, 130 KEYWORD_create, 131 KEYWORD_go 132} keywordId; 133 134/* 135 * Used to determine whether keyword is valid for the token language and 136 * what its ID is. 137 */ 138typedef struct sKeywordDesc { 139 const char *name; 140 keywordId id; 141} keywordDesc; 142 143typedef enum eTokenType { 144 TOKEN_UNDEFINED, 145 TOKEN_BLOCK_LABEL_BEGIN, 146 TOKEN_BLOCK_LABEL_END, 147 TOKEN_CHARACTER, 148 TOKEN_CLOSE_PAREN, 149 TOKEN_COLON, 150 TOKEN_SEMICOLON, 151 TOKEN_COMMA, 152 TOKEN_IDENTIFIER, 153 TOKEN_KEYWORD, 154 TOKEN_OPEN_PAREN, 155 TOKEN_OPERATOR, 156 TOKEN_OTHER, 157 TOKEN_STRING, 158 TOKEN_PERIOD, 159 TOKEN_OPEN_CURLY, 160 TOKEN_CLOSE_CURLY, 161 TOKEN_OPEN_SQUARE, 162 TOKEN_CLOSE_SQUARE, 163 TOKEN_TILDE, 164 TOKEN_FORWARD_SLASH, 165 TOKEN_EQUAL 166} tokenType; 167 168typedef struct sTokenInfoSQL { 169 tokenType type; 170 keywordId keyword; 171 vString * string; 172 vString * scope; 173 int begin_end_nest_lvl; 174 unsigned long lineNumber; 175 fpos_t filePosition; 176} tokenInfo; 177 178/* 179 * DATA DEFINITIONS 180 */ 181 182static langType Lang_sql; 183 184static jmp_buf Exception; 185 186typedef enum { 187 SQLTAG_CURSOR, 188 SQLTAG_PROTOTYPE, 189 SQLTAG_FUNCTION, 190 SQLTAG_FIELD, 191 SQLTAG_LOCAL_VARIABLE, 192 SQLTAG_BLOCK_LABEL, 193 SQLTAG_PACKAGE, 194 SQLTAG_PROCEDURE, 195 SQLTAG_RECORD, 196 SQLTAG_SUBTYPE, 197 SQLTAG_TABLE, 198 SQLTAG_TRIGGER, 199 SQLTAG_VARIABLE, 200 SQLTAG_INDEX, 201 SQLTAG_EVENT, 202 SQLTAG_PUBLICATION, 203 SQLTAG_SERVICE, 204 SQLTAG_DOMAIN, 205 SQLTAG_VIEW, 206 SQLTAG_SYNONYM, 207 SQLTAG_MLTABLE, 208 SQLTAG_MLCONN, 209 SQLTAG_MLPROP, 210 SQLTAG_COUNT 211} sqlKind; 212 213static kindOption SqlKinds [] = { 214 { TRUE, 'c', "cursor", "cursors" }, 215 { FALSE, 'd', "prototype", "prototypes" }, 216 { TRUE, 'f', "function", "functions" }, 217 { TRUE, 'F', "field", "record fields" }, 218 { FALSE, 'l', "local", "local variables" }, 219 { TRUE, 'L', "label", "block label" }, 220 { TRUE, 'P', "package", "packages" }, 221 { TRUE, 'p', "procedure", "procedures" }, 222 { FALSE, 'r', "record", "records" }, 223 { TRUE, 's', "subtype", "subtypes" }, 224 { TRUE, 't', "table", "tables" }, 225 { TRUE, 'T', "trigger", "triggers" }, 226 { TRUE, 'v', "variable", "variables" }, 227 { TRUE, 'i', "index", "indexes" }, 228 { TRUE, 'e', "event", "events" }, 229 { TRUE, 'U', "publication", "publications" }, 230 { TRUE, 'R', "service", "services" }, 231 { TRUE, 'D', "domain", "domains" }, 232 { TRUE, 'V', "view", "views" }, 233 { TRUE, 'n', "synonym", "synonyms" }, 234 { TRUE, 'x', "mltable", "MobiLink Table Scripts" }, 235 { TRUE, 'y', "mlconn", "MobiLink Conn Scripts" }, 236 { TRUE, 'z', "mlprop", "MobiLink Properties " } 237}; 238 239static const keywordDesc SqlKeywordTable [] = { 240 /* keyword keyword ID */ 241 { "as", KEYWORD_is }, 242 { "is", KEYWORD_is }, 243 { "begin", KEYWORD_begin }, 244 { "body", KEYWORD_body }, 245 { "cursor", KEYWORD_cursor }, 246 { "declare", KEYWORD_declare }, 247 { "end", KEYWORD_end }, 248 { "function", KEYWORD_function }, 249 { "if", KEYWORD_if }, 250 { "else", KEYWORD_else }, 251 { "elseif", KEYWORD_elseif }, 252 { "endif", KEYWORD_endif }, 253 { "loop", KEYWORD_loop }, 254 { "while", KEYWORD_while }, 255 { "case", KEYWORD_case }, 256 { "for", KEYWORD_for }, 257 { "do", KEYWORD_do }, 258 { "call", KEYWORD_call }, 259 { "package", KEYWORD_package }, 260 { "pragma", KEYWORD_pragma }, 261 { "procedure", KEYWORD_procedure }, 262 { "record", KEYWORD_record }, 263 { "object", KEYWORD_object }, 264 { "ref", KEYWORD_ref }, 265 { "rem", KEYWORD_rem }, 266 { "return", KEYWORD_return }, 267 { "returns", KEYWORD_returns }, 268 { "subtype", KEYWORD_subtype }, 269 { "table", KEYWORD_table }, 270 { "trigger", KEYWORD_trigger }, 271 { "type", KEYWORD_type }, 272 { "index", KEYWORD_index }, 273 { "event", KEYWORD_event }, 274 { "publication", KEYWORD_publication }, 275 { "service", KEYWORD_service }, 276 { "domain", KEYWORD_domain }, 277 { "datatype", KEYWORD_datatype }, 278 { "result", KEYWORD_result }, 279 { "url", KEYWORD_url }, 280 { "internal", KEYWORD_internal }, 281 { "external", KEYWORD_external }, 282 { "when", KEYWORD_when }, 283 { "then", KEYWORD_then }, 284 { "variable", KEYWORD_variable }, 285 { "exception", KEYWORD_exception }, 286 { "at", KEYWORD_at }, 287 { "on", KEYWORD_on }, 288 { "primary", KEYWORD_primary }, 289 { "references", KEYWORD_references }, 290 { "unique", KEYWORD_unique }, 291 { "check", KEYWORD_check }, 292 { "constraint", KEYWORD_constraint }, 293 { "foreign", KEYWORD_foreign }, 294 { "ml_add_table_script", KEYWORD_ml_table }, 295 { "ml_add_lang_table_script", KEYWORD_ml_table_lang }, 296 { "ml_add_dnet_table_script", KEYWORD_ml_table_dnet }, 297 { "ml_add_java_table_script", KEYWORD_ml_table_java }, 298 { "ml_add_lang_table_script_chk", KEYWORD_ml_table_chk }, 299 { "ml_add_connection_script", KEYWORD_ml_conn }, 300 { "ml_add_lang_connection_script", KEYWORD_ml_conn_lang }, 301 { "ml_add_dnet_connection_script", KEYWORD_ml_conn_dnet }, 302 { "ml_add_java_connection_script", KEYWORD_ml_conn_java }, 303 { "ml_add_lang_conn_script_chk", KEYWORD_ml_conn_chk }, 304 { "ml_add_property", KEYWORD_ml_prop }, 305 { "local", KEYWORD_local }, 306 { "temporary", KEYWORD_temporary }, 307 { "drop", KEYWORD_drop }, 308 { "view", KEYWORD_view }, 309 { "synonym", KEYWORD_synonym }, 310 { "handler", KEYWORD_handler }, 311 { "comment", KEYWORD_comment }, 312 { "create", KEYWORD_create }, 313 { "go", KEYWORD_go } 314}; 315 316/* 317 * FUNCTION DECLARATIONS 318 */ 319 320/* Recursive calls */ 321static void parseBlock (tokenInfo *const token, const boolean local); 322static void parseDeclare (tokenInfo *const token, const boolean local); 323static void parseKeywords (tokenInfo *const token); 324static void parseSqlFile (tokenInfo *const token); 325 326/* 327 * FUNCTION DEFINITIONS 328 */ 329 330static boolean isIdentChar1 (const int c) 331{ 332 /* 333 * Other databases are less restrictive on the first character of 334 * an identifier. 335 * isIdentChar1 is used to identify the first character of an 336 * identifier, so we are removing some restrictions. 337 */ 338 return (boolean) 339 (isalpha (c) || c == '@' || c == '_' ); 340} 341 342static boolean isIdentChar (const int c) 343{ 344 return (boolean) 345 (isalpha (c) || isdigit (c) || c == '$' || 346 c == '@' || c == '_' || c == '#'); 347} 348 349static boolean isCmdTerm (tokenInfo *const token) 350{ 351 DebugStatement ( 352 debugPrintf (DEBUG_PARSE 353 , "\n isCmdTerm: token same tt:%d tk:%d\n" 354 , token->type 355 , token->keyword 356 ); 357 ); 358 359 /* 360 * Based on the various customer sites I have been at 361 * the most common command delimiters are 362 * ; 363 * ~ 364 * / 365 * go 366 * This routine will check for any of these, more 367 * can easily be added by modifying readToken and 368 * either adding the character to: 369 * enum eTokenType 370 * enum eTokenType 371 */ 372 return ( isType (token, TOKEN_SEMICOLON) || 373 isType (token, TOKEN_TILDE) || 374 isType (token, TOKEN_FORWARD_SLASH) || 375 isKeyword (token, KEYWORD_go) 376 ); 377} 378 379static boolean isMatchedEnd(tokenInfo *const token, int nest_lvl) 380{ 381 boolean terminated = FALSE; 382 /* 383 * Since different forms of SQL allow the use of 384 * BEGIN 385 * ... 386 * END 387 * blocks, some statements may not be terminated using 388 * the standard delimiters: 389 * ; 390 * ~ 391 * / 392 * go 393 * This routine will check to see if we encounter and END 394 * for the matching nest level of BEGIN ... END statements. 395 * If we find one, then we can assume, the statement was terminated 396 * since we have fallen through to the END statement of the BEGIN 397 * block. 398 */ 399 if ( nest_lvl > 0 && isKeyword (token, KEYWORD_end) ) 400 { 401 if ( token->begin_end_nest_lvl == nest_lvl ) 402 terminated = TRUE; 403 } 404 405 return terminated; 406} 407 408static void buildSqlKeywordHash (void) 409{ 410 const size_t count = sizeof (SqlKeywordTable) / 411 sizeof (SqlKeywordTable [0]); 412 size_t i; 413 for (i = 0 ; i < count ; ++i) 414 { 415 const keywordDesc* const p = &SqlKeywordTable [i]; 416 addKeyword (p->name, Lang_sql, (int) p->id); 417 } 418} 419 420static tokenInfo *newToken (void) 421{ 422 tokenInfo *const token = xMalloc (1, tokenInfo); 423 424 token->type = TOKEN_UNDEFINED; 425 token->keyword = KEYWORD_NONE; 426 token->string = vStringNew (); 427 token->scope = vStringNew (); 428 token->begin_end_nest_lvl = 0; 429 token->lineNumber = getSourceLineNumber (); 430 token->filePosition = getInputFilePosition (); 431 432 return token; 433} 434 435static void deleteToken (tokenInfo *const token) 436{ 437 vStringDelete (token->string); 438 vStringDelete (token->scope); 439 eFree (token); 440} 441 442/* 443 * Tag generation functions 444 */ 445 446static void makeConstTag (tokenInfo *const token, const sqlKind kind) 447{ 448 if (SqlKinds [kind].enabled) 449 { 450 const char *const name = vStringValue (token->string); 451 tagEntryInfo e; 452 initTagEntry (&e, name); 453 454 e.lineNumber = token->lineNumber; 455 e.filePosition = token->filePosition; 456 e.kindName = SqlKinds [kind].name; 457 e.kind = SqlKinds [kind].letter; 458 459 makeTagEntry (&e); 460 } 461} 462 463static void makeSqlTag (tokenInfo *const token, const sqlKind kind) 464{ 465 vString * fulltag; 466 467 if (SqlKinds [kind].enabled) 468 { 469 /* 470 * If a scope has been added to the token, change the token 471 * string to include the scope when making the tag. 472 */ 473 if ( vStringLength(token->scope) > 0 ) 474 { 475 fulltag = vStringNew (); 476 vStringCopy(fulltag, token->scope); 477 vStringCatS (fulltag, "."); 478 vStringCatS (fulltag, vStringValue(token->string)); 479 vStringTerminate(fulltag); 480 vStringCopy(token->string, fulltag); 481 vStringDelete (fulltag); 482 } 483 makeConstTag (token, kind); 484 } 485} 486 487/* 488 * Parsing functions 489 */ 490 491static void parseString (vString *const string, const int delimiter) 492{ 493 boolean end = FALSE; 494 while (! end) 495 { 496 int c = fileGetc (); 497 if (c == EOF) 498 end = TRUE; 499 /* 500 else if (c == '\\') 501 { 502 c = fileGetc(); // This maybe a ' or ". // 503 vStringPut(string, c); 504 } 505 */ 506 else if (c == delimiter) 507 end = TRUE; 508 else 509 vStringPut (string, c); 510 } 511 vStringTerminate (string); 512} 513 514/* Read a C identifier beginning with "firstChar" and places it into "name". 515*/ 516static void parseIdentifier (vString *const string, const int firstChar) 517{ 518 int c = firstChar; 519 Assert (isIdentChar1 (c)); 520 do 521 { 522 vStringPut (string, c); 523 c = fileGetc (); 524 } while (isIdentChar (c)); 525 vStringTerminate (string); 526 if (!isspace (c)) 527 fileUngetc (c); /* unget non-identifier character */ 528} 529 530static void readToken (tokenInfo *const token) 531{ 532 int c; 533 534 token->type = TOKEN_UNDEFINED; 535 token->keyword = KEYWORD_NONE; 536 vStringClear (token->string); 537 538getNextChar: 539 do 540 { 541 c = fileGetc (); 542 token->lineNumber = getSourceLineNumber (); 543 token->filePosition = getInputFilePosition (); 544 /* 545 * Added " to the list of ignores, not sure what this 546 * might break but it gets by this issue: 547 * create table "t1" (...) 548 * 549 * Darren, the code passes all my tests for both 550 * Oracle and SQL Anywhere, but maybe you can tell me 551 * what this may effect. 552 */ 553 } 554 while (c == '\t' || c == ' ' || c == '\n'); 555 556 switch (c) 557 { 558 case EOF: longjmp (Exception, (int)ExceptionEOF); break; 559 case '(': token->type = TOKEN_OPEN_PAREN; break; 560 case ')': token->type = TOKEN_CLOSE_PAREN; break; 561 case ':': token->type = TOKEN_COLON; break; 562 case ';': token->type = TOKEN_SEMICOLON; break; 563 case '.': token->type = TOKEN_PERIOD; break; 564 case ',': token->type = TOKEN_COMMA; break; 565 case '{': token->type = TOKEN_OPEN_CURLY; break; 566 case '}': token->type = TOKEN_CLOSE_CURLY; break; 567 case '~': token->type = TOKEN_TILDE; break; 568 case '[': token->type = TOKEN_OPEN_SQUARE; break; 569 case ']': token->type = TOKEN_CLOSE_SQUARE; break; 570 case '=': token->type = TOKEN_EQUAL; break; 571 572 case '\'': 573 case '"': 574 token->type = TOKEN_STRING; 575 parseString (token->string, c); 576 token->lineNumber = getSourceLineNumber (); 577 token->filePosition = getInputFilePosition (); 578 break; 579 580 case '-': 581 c = fileGetc (); 582 if (c == '-') /* -- is this the start of a comment? */ 583 { 584 fileSkipToCharacter ('\n'); 585 goto getNextChar; 586 } 587 else 588 { 589 if (!isspace (c)) 590 fileUngetc (c); 591 token->type = TOKEN_OPERATOR; 592 } 593 break; 594 595 case '<': 596 case '>': 597 { 598 const int initial = c; 599 int d = fileGetc (); 600 if (d == initial) 601 { 602 if (initial == '<') 603 token->type = TOKEN_BLOCK_LABEL_BEGIN; 604 else 605 token->type = TOKEN_BLOCK_LABEL_END; 606 } 607 else 608 { 609 fileUngetc (d); 610 token->type = TOKEN_UNDEFINED; 611 } 612 break; 613 } 614 615 case '\\': 616 c = fileGetc (); 617 if (c != '\\' && c != '"' && c != '\'' && !isspace (c)) 618 fileUngetc (c); 619 token->type = TOKEN_CHARACTER; 620 token->lineNumber = getSourceLineNumber (); 621 token->filePosition = getInputFilePosition (); 622 break; 623 624 case '/': 625 { 626 int d = fileGetc (); 627 if ( (d != '*') && /* is this the start of a comment? */ 628 (d != '/') ) /* is a one line comment? */ 629 { 630 token->type = TOKEN_FORWARD_SLASH; 631 fileUngetc (d); 632 } 633 else 634 { 635 if (d == '*') 636 { 637 do 638 { 639 fileSkipToCharacter ('*'); 640 c = fileGetc (); 641 if (c == '/') 642 break; 643 else 644 fileUngetc (c); 645 } while (c != EOF && c != '\0'); 646 goto getNextChar; 647 } 648 else if (d == '/') /* is this the start of a comment? */ 649 { 650 fileSkipToCharacter ('\n'); 651 goto getNextChar; 652 } 653 } 654 break; 655 } 656 657 default: 658 if (! isIdentChar1 (c)) 659 token->type = TOKEN_UNDEFINED; 660 else 661 { 662 parseIdentifier (token->string, c); 663 token->lineNumber = getSourceLineNumber (); 664 token->filePosition = getInputFilePosition (); 665 token->keyword = analyzeToken (token->string, Lang_sql); 666 if (isKeyword (token, KEYWORD_rem)) 667 { 668 vStringClear (token->string); 669 fileSkipToCharacter ('\n'); 670 goto getNextChar; 671 } 672 else if (isKeyword (token, KEYWORD_NONE)) 673 token->type = TOKEN_IDENTIFIER; 674 else 675 token->type = TOKEN_KEYWORD; 676 } 677 break; 678 } 679} 680 681/* 682 * Token parsing functions 683 */ 684 685/* 686 * static void addContext (tokenInfo* const parent, const tokenInfo* const child) 687 * { 688 * if (vStringLength (parent->string) > 0) 689 * { 690 * vStringCatS (parent->string, "."); 691 * } 692 * vStringCatS (parent->string, vStringValue(child->string)); 693 * vStringTerminate(parent->string); 694 * } 695 */ 696 697static void addToScope (tokenInfo* const token, vString* const extra) 698{ 699 if (vStringLength (token->scope) > 0) 700 { 701 vStringCatS (token->scope, "."); 702 } 703 vStringCatS (token->scope, vStringValue(extra)); 704 vStringTerminate(token->scope); 705} 706 707/* 708 * Scanning functions 709 */ 710 711static void findToken (tokenInfo *const token, const tokenType type) 712{ 713 while (! isType (token, type)) 714 { 715 readToken (token); 716 } 717} 718 719static void findCmdTerm (tokenInfo *const token, const boolean check_first) 720{ 721 int begin_end_nest_lvl = token->begin_end_nest_lvl; 722 723 if ( check_first ) 724 { 725 if ( isCmdTerm(token) ) 726 return; 727 } 728 do 729 { 730 readToken (token); 731 } while ( !isCmdTerm(token) && !isMatchedEnd(token, begin_end_nest_lvl) ); 732} 733 734static void skipToMatched(tokenInfo *const token) 735{ 736 int nest_level = 0; 737 tokenType open_token; 738 tokenType close_token; 739 740 switch (token->type) 741 { 742 case TOKEN_OPEN_PAREN: 743 open_token = TOKEN_OPEN_PAREN; 744 close_token = TOKEN_CLOSE_PAREN; 745 break; 746 case TOKEN_OPEN_CURLY: 747 open_token = TOKEN_OPEN_CURLY; 748 close_token = TOKEN_CLOSE_CURLY; 749 break; 750 case TOKEN_OPEN_SQUARE: 751 open_token = TOKEN_OPEN_SQUARE; 752 close_token = TOKEN_CLOSE_SQUARE; 753 break; 754 default: 755 return; 756 } 757 758 /* 759 * This routine will skip to a matching closing token. 760 * It will also handle nested tokens like the (, ) below. 761 * ( name varchar(30), text binary(10) ) 762 */ 763 764 if (isType (token, open_token)) 765 { 766 nest_level++; 767 while (! (isType (token, close_token) && (nest_level == 0))) 768 { 769 readToken (token); 770 if (isType (token, open_token)) 771 { 772 nest_level++; 773 } 774 if (isType (token, close_token)) 775 { 776 if (nest_level > 0) 777 { 778 nest_level--; 779 } 780 } 781 } 782 readToken (token); 783 } 784} 785 786static void copyToken (tokenInfo *const dest, tokenInfo *const src) 787{ 788 dest->lineNumber = src->lineNumber; 789 dest->filePosition = src->filePosition; 790 dest->type = src->type; 791 dest->keyword = src->keyword; 792 vStringCopy(dest->string, src->string); 793 vStringCopy(dest->scope, src->scope); 794} 795 796static void skipArgumentList (tokenInfo *const token) 797{ 798 /* 799 * Other databases can have arguments with fully declared 800 * datatypes: 801 * ( name varchar(30), text binary(10) ) 802 * So we must check for nested open and closing parantheses 803 */ 804 805 if (isType (token, TOKEN_OPEN_PAREN)) /* arguments? */ 806 { 807 skipToMatched (token); 808 } 809} 810 811static void parseSubProgram (tokenInfo *const token) 812{ 813 tokenInfo *const name = newToken (); 814 vString * saveScope = vStringNew (); 815 816 /* 817 * This must handle both prototypes and the body of 818 * the procedures. 819 * 820 * Prototype: 821 * FUNCTION func_name RETURN integer; 822 * PROCEDURE proc_name( parameters ); 823 * Procedure 824 * FUNCTION GET_ML_USERNAME RETURN VARCHAR2 825 * IS 826 * BEGIN 827 * RETURN v_sync_user_id; 828 * END GET_ML_USERNAME; 829 * 830 * PROCEDURE proc_name( parameters ) 831 * IS 832 * BEGIN 833 * END; 834 * CREATE PROCEDURE proc_name( parameters ) 835 * EXTERNAL NAME ... ; 836 * CREATE PROCEDURE proc_name( parameters ) 837 * BEGIN 838 * END; 839 * 840 * CREATE FUNCTION f_GetClassName( 841 * IN @object VARCHAR(128) 842 * ,IN @code VARCHAR(128) 843 * ) 844 * RETURNS VARCHAR(200) 845 * DETERMINISTIC 846 * BEGIN 847 * 848 * IF( @object = 'user_state' ) THEN 849 * SET something = something; 850 * END IF; 851 * 852 * RETURN @name; 853 * END; 854 * 855 * Note, a Package adds scope to the items within. 856 * create or replace package demo_pkg is 857 * test_var number; 858 * function test_func return varchar2; 859 * function more.test_func2 return varchar2; 860 * end demo_pkg; 861 * So the tags generated here, contain the package name: 862 * demo_pkg.test_var 863 * demo_pkg.test_func 864 * demo_pkg.more.test_func2 865 */ 866 const sqlKind kind = isKeyword (token, KEYWORD_function) ? 867 SQLTAG_FUNCTION : SQLTAG_PROCEDURE; 868 Assert (isKeyword (token, KEYWORD_function) || 869 isKeyword (token, KEYWORD_procedure)); 870 871 vStringCopy(saveScope, token->scope); 872 readToken (token); 873 copyToken (name, token); 874 readToken (token); 875 876 if (isType (token, TOKEN_PERIOD)) 877 { 878 /* 879 * If this is an Oracle package, then the token->scope should 880 * already be set. If this is the case, also add this value to the 881 * scope. 882 * If this is not an Oracle package, chances are the scope should be 883 * blank and the value just read is the OWNER or CREATOR of the 884 * function and should not be considered part of the scope. 885 */ 886 if ( vStringLength(saveScope) > 0 ) 887 { 888 addToScope(token, name->string); 889 } 890 readToken (token); 891 copyToken (name, token); 892 readToken (token); 893 } 894 if (isType (token, TOKEN_OPEN_PAREN)) 895 { 896 /* Reads to the next token after the TOKEN_CLOSE_PAREN */ 897 skipArgumentList(token); 898 } 899 900 if (kind == SQLTAG_FUNCTION) 901 { 902 if (isKeyword (token, KEYWORD_return) || isKeyword (token, KEYWORD_returns)) 903 { 904 /* Read datatype */ 905 readToken (token); 906 /* 907 * Read token after which could be the 908 * command terminator if a prototype 909 * or an open parantheses 910 */ 911 readToken (token); 912 if (isType (token, TOKEN_OPEN_PAREN)) 913 { 914 /* Reads to the next token after the TOKEN_CLOSE_PAREN */ 915 skipArgumentList(token); 916 } 917 } 918 } 919 if( isCmdTerm (token) ) 920 { 921 makeSqlTag (name, SQLTAG_PROTOTYPE); 922 } 923 else 924 { 925 while (!(isKeyword (token, KEYWORD_is) || 926 isKeyword (token, KEYWORD_begin) || 927 isKeyword (token, KEYWORD_at) || 928 isKeyword (token, KEYWORD_internal) || 929 isKeyword (token, KEYWORD_external) || 930 isKeyword (token, KEYWORD_url) || 931 isType (token, TOKEN_EQUAL) || 932 isCmdTerm (token) 933 ) 934 ) 935 { 936 if ( isKeyword (token, KEYWORD_result) ) 937 { 938 readToken (token); 939 if (isType (token, TOKEN_OPEN_PAREN)) 940 { 941 /* Reads to the next token after the TOKEN_CLOSE_PAREN */ 942 skipArgumentList(token); 943 } 944 } else { 945 readToken (token); 946 } 947 } 948 if (isKeyword (token, KEYWORD_at) || 949 isKeyword (token, KEYWORD_url) || 950 isKeyword (token, KEYWORD_internal) || 951 isKeyword (token, KEYWORD_external) ) 952 { 953 addToScope(token, name->string); 954 if (isType (name, TOKEN_IDENTIFIER) || 955 isType (name, TOKEN_STRING) || 956 !isKeyword (token, KEYWORD_NONE) 957 ) 958 makeSqlTag (name, kind); 959 960 vStringClear (token->scope); 961 } 962 if ( isType (token, TOKEN_EQUAL) ) 963 readToken (token); 964 965 if ( isKeyword (token, KEYWORD_declare) ) 966 parseDeclare (token, FALSE); 967 968 if (isKeyword (token, KEYWORD_is) || 969 isKeyword (token, KEYWORD_begin) ) 970 { 971 addToScope(token, name->string); 972 if (isType (name, TOKEN_IDENTIFIER) || 973 isType (name, TOKEN_STRING) || 974 !isKeyword (token, KEYWORD_NONE) 975 ) 976 makeSqlTag (name, kind); 977 978 parseBlock (token, TRUE); 979 vStringClear (token->scope); 980 } 981 } 982 vStringCopy(token->scope, saveScope); 983 deleteToken (name); 984 vStringDelete(saveScope); 985} 986 987static void parseRecord (tokenInfo *const token) 988{ 989 /* 990 * Make it a bit forgiving, this is called from 991 * multiple functions, parseTable, parseType 992 */ 993 if (!isType (token, TOKEN_OPEN_PAREN)) 994 readToken (token); 995 996 Assert (isType (token, TOKEN_OPEN_PAREN)); 997 do 998 { 999 if ( isType (token, TOKEN_COMMA) || isType (token, TOKEN_OPEN_PAREN) ) 1000 readToken (token); 1001 1002 /* 1003 * Create table statements can end with various constraints 1004 * which must be excluded from the SQLTAG_FIELD. 1005 * create table t1 ( 1006 * c1 integer, 1007 * c2 char(30), 1008 * c3 numeric(10,5), 1009 * c4 integer, 1010 * constraint whatever, 1011 * primary key(c1), 1012 * foreign key (), 1013 * check () 1014 * ) 1015 */ 1016 if (! (isKeyword(token, KEYWORD_primary) || 1017 isKeyword(token, KEYWORD_references) || 1018 isKeyword(token, KEYWORD_unique) || 1019 isKeyword(token, KEYWORD_check) || 1020 isKeyword(token, KEYWORD_constraint) || 1021 isKeyword(token, KEYWORD_foreign) ) ) 1022 { 1023 if (isType (token, TOKEN_IDENTIFIER) || 1024 isType (token, TOKEN_STRING)) 1025 makeSqlTag (token, SQLTAG_FIELD); 1026 } 1027 1028 while (!(isType (token, TOKEN_COMMA) || 1029 isType (token, TOKEN_CLOSE_PAREN) || 1030 isType (token, TOKEN_OPEN_PAREN) 1031 )) 1032 { 1033 readToken (token); 1034 /* 1035 * A table structure can look like this: 1036 * create table t1 ( 1037 * c1 integer, 1038 * c2 char(30), 1039 * c3 numeric(10,5), 1040 * c4 integer 1041 * ) 1042 * We can't just look for a COMMA or CLOSE_PAREN 1043 * since that will not deal with the numeric(10,5) 1044 * case. So we need to skip the argument list 1045 * when we find an open paren. 1046 */ 1047 if (isType (token, TOKEN_OPEN_PAREN)) 1048 { 1049 /* Reads to the next token after the TOKEN_CLOSE_PAREN */ 1050 skipArgumentList(token); 1051 } 1052 } 1053 } while (! isType (token, TOKEN_CLOSE_PAREN)); 1054} 1055 1056static void parseType (tokenInfo *const token) 1057{ 1058 tokenInfo *const name = newToken (); 1059 vString * saveScope = vStringNew (); 1060 1061 vStringCopy(saveScope, token->scope); 1062 /* If a scope has been set, add it to the name */ 1063 addToScope (name, token->scope); 1064 readToken (name); 1065 if (isType (name, TOKEN_IDENTIFIER)) 1066 { 1067 readToken (token); 1068 if (isKeyword (token, KEYWORD_is)) 1069 { 1070 readToken (token); 1071 addToScope (token, name->string); 1072 switch (token->keyword) 1073 { 1074 case KEYWORD_record: 1075 case KEYWORD_object: 1076 makeSqlTag (name, SQLTAG_RECORD); 1077 parseRecord (token); 1078 break; 1079 1080 case KEYWORD_table: 1081 makeSqlTag (name, SQLTAG_TABLE); 1082 break; 1083 1084 case KEYWORD_ref: 1085 readToken (token); 1086 if (isKeyword (token, KEYWORD_cursor)) 1087 makeSqlTag (name, SQLTAG_CURSOR); 1088 break; 1089 1090 default: break; 1091 } 1092 vStringClear (token->scope); 1093 } 1094 } 1095 vStringCopy(token->scope, saveScope); 1096 deleteToken (name); 1097 vStringDelete(saveScope); 1098} 1099 1100static void parseSimple (tokenInfo *const token, const sqlKind kind) 1101{ 1102 /* This will simply make the tagname from the first word found */ 1103 readToken (token); 1104 if (isType (token, TOKEN_IDENTIFIER) || 1105 isType (token, TOKEN_STRING)) 1106 makeSqlTag (token, kind); 1107} 1108 1109static void parseDeclare (tokenInfo *const token, const boolean local) 1110{ 1111 /* 1112 * PL/SQL declares are of this format: 1113 * IS|AS 1114 * [declare] 1115 * CURSOR curname ... 1116 * varname1 datatype; 1117 * varname2 datatype; 1118 * varname3 datatype; 1119 * begin 1120 */ 1121 1122 if (isKeyword (token, KEYWORD_declare)) 1123 readToken (token); 1124 while (! isKeyword (token, KEYWORD_begin) && ! isKeyword (token, KEYWORD_end)) 1125 { 1126 switch (token->keyword) 1127 { 1128 case KEYWORD_cursor: parseSimple (token, SQLTAG_CURSOR); break; 1129 case KEYWORD_function: parseSubProgram (token); break; 1130 case KEYWORD_procedure: parseSubProgram (token); break; 1131 case KEYWORD_subtype: parseSimple (token, SQLTAG_SUBTYPE); break; 1132 case KEYWORD_trigger: parseSimple (token, SQLTAG_TRIGGER); break; 1133 case KEYWORD_type: parseType (token); break; 1134 1135 default: 1136 if (isType (token, TOKEN_IDENTIFIER)) 1137 { 1138 if (local) 1139 { 1140 makeSqlTag (token, SQLTAG_LOCAL_VARIABLE); 1141 } 1142 else 1143 { 1144 makeSqlTag (token, SQLTAG_VARIABLE); 1145 } 1146 } 1147 break; 1148 } 1149 findToken (token, TOKEN_SEMICOLON); 1150 readToken (token); 1151 } 1152} 1153 1154static void parseDeclareANSI (tokenInfo *const token, const boolean local) 1155{ 1156 tokenInfo *const type = newToken (); 1157 /* 1158 * ANSI declares are of this format: 1159 * BEGIN 1160 * DECLARE varname1 datatype; 1161 * DECLARE varname2 datatype; 1162 * ... 1163 * 1164 * This differ from PL/SQL where DECLARE preceeds the BEGIN block 1165 * and the DECLARE keyword is not repeated. 1166 */ 1167 while (isKeyword (token, KEYWORD_declare)) 1168 { 1169 readToken (token); 1170 readToken (type); 1171 1172 if (isKeyword (type, KEYWORD_cursor)) 1173 makeSqlTag (token, SQLTAG_CURSOR); 1174 else if (isKeyword (token, KEYWORD_local) && 1175 isKeyword (type, KEYWORD_temporary)) 1176 { 1177 /* 1178 * DECLARE LOCAL TEMPORARY TABLE table_name ( 1179 * c1 int, 1180 * c2 int 1181 * ); 1182 */ 1183 readToken (token); 1184 if (isKeyword (token, KEYWORD_table)) 1185 { 1186 readToken (token); 1187 if (isType(token, TOKEN_IDENTIFIER) || 1188 isType(token, TOKEN_STRING) ) 1189 { 1190 makeSqlTag (token, SQLTAG_TABLE); 1191 } 1192 } 1193 } 1194 else if (isType (token, TOKEN_IDENTIFIER) || 1195 isType (token, TOKEN_STRING)) 1196 { 1197 if (local) 1198 makeSqlTag (token, SQLTAG_LOCAL_VARIABLE); 1199 else 1200 makeSqlTag (token, SQLTAG_VARIABLE); 1201 } 1202 findToken (token, TOKEN_SEMICOLON); 1203 readToken (token); 1204 } 1205 deleteToken (type); 1206} 1207 1208static void parseLabel (tokenInfo *const token) 1209{ 1210 /* 1211 * A label has this format: 1212 * <<tobacco_dependency>> 1213 * DECLARE 1214 * v_senator VARCHAR2(100) := 'THURMOND, JESSE'; 1215 * BEGIN 1216 * IF total_contributions (v_senator, 'TOBACCO') > 25000 1217 * THEN 1218 * <<alochol_dependency>> 1219 * DECLARE 1220 * v_senator VARCHAR2(100) := 'WHATEVERIT, TAKES'; 1221 * BEGIN 1222 * ... 1223 */ 1224 1225 Assert (isType (token, TOKEN_BLOCK_LABEL_BEGIN)); 1226 readToken (token); 1227 if (isType (token, TOKEN_IDENTIFIER)) 1228 { 1229 makeSqlTag (token, SQLTAG_BLOCK_LABEL); 1230 readToken (token); /* read end of label */ 1231 } 1232} 1233 1234static void parseStatements (tokenInfo *const token, const boolean exit_on_endif ) 1235{ 1236 boolean isAnsi = TRUE; 1237 boolean stmtTerm = FALSE; 1238 do 1239 { 1240 1241 if (isType (token, TOKEN_BLOCK_LABEL_BEGIN)) 1242 parseLabel (token); 1243 else 1244 { 1245 switch (token->keyword) 1246 { 1247 case KEYWORD_exception: 1248 /* 1249 * EXCEPTION 1250 * <exception handler>; 1251 * 1252 * Where an exception handler could be: 1253 * BEGIN 1254 * WHEN OTHERS THEN 1255 * x := x + 3; 1256 * END; 1257 * In this case we need to skip this keyword and 1258 * move on to the next token without reading until 1259 * TOKEN_SEMICOLON; 1260 */ 1261 readToken (token); 1262 continue; 1263 1264 case KEYWORD_when: 1265 /* 1266 * WHEN statements can be used in exception clauses 1267 * and CASE statements. The CASE statement should skip 1268 * these given below we skip over to an END statement. 1269 * But for an exception clause, we can have: 1270 * EXCEPTION 1271 * WHEN OTHERS THEN 1272 * BEGIN 1273 * x := x + 3; 1274 * END; 1275 * If we skip to the TOKEN_SEMICOLON, we miss the begin 1276 * of a nested BEGIN END block. So read the next token 1277 * after the THEN and restart the LOOP. 1278 */ 1279 while (! isKeyword (token, KEYWORD_then)) 1280 readToken (token); 1281 1282 readToken (token); 1283 continue; 1284 1285 case KEYWORD_if: 1286 /* 1287 * We do not want to look for a ; since for an empty 1288 * IF block, it would skip over the END. 1289 * IF...THEN 1290 * END IF; 1291 * 1292 * IF...THEN 1293 * ELSE 1294 * END IF; 1295 * 1296 * IF...THEN 1297 * ELSEIF...THEN 1298 * ELSE 1299 * END IF; 1300 * 1301 * or non-ANSI 1302 * IF ... 1303 * BEGIN 1304 * END 1305 */ 1306 while ( ! isKeyword (token, KEYWORD_then) && 1307 ! isKeyword (token, KEYWORD_begin) ) 1308 { 1309 readToken (token); 1310 } 1311 1312 if( isKeyword (token, KEYWORD_begin ) ) 1313 { 1314 isAnsi = FALSE; 1315 parseBlock(token, FALSE); 1316 1317 /* 1318 * Handle the non-Ansi IF blocks. 1319 * parseBlock consumes the END, so if the next 1320 * token in a command terminator (like GO) 1321 * we know we are done with this statement. 1322 */ 1323 if ( isCmdTerm (token) ) 1324 stmtTerm = TRUE; 1325 } 1326 else 1327 { 1328 readToken (token); 1329 1330 while( ! (isKeyword (token, KEYWORD_end ) || 1331 isKeyword (token, KEYWORD_endif ) ) 1332 ) 1333 { 1334 if ( isKeyword (token, KEYWORD_else) || 1335 isKeyword (token, KEYWORD_elseif) ) 1336 readToken (token); 1337 1338 parseStatements (token, TRUE); 1339 1340 if ( isCmdTerm(token) ) 1341 readToken (token); 1342 1343 } 1344 1345 /* 1346 * parseStatements returns when it finds an END, an IF 1347 * should follow the END for ANSI anyway. 1348 * IF...THEN 1349 * END IF; 1350 */ 1351 if( isKeyword (token, KEYWORD_end ) ) 1352 readToken (token); 1353 1354 if( isKeyword (token, KEYWORD_if ) || isKeyword (token, KEYWORD_endif ) ) 1355 { 1356 readToken (token); 1357 if ( isCmdTerm(token) ) 1358 stmtTerm = TRUE; 1359 } 1360 else 1361 { 1362 /* 1363 * Well we need to do something here. 1364 * There are lots of different END statements 1365 * END; 1366 * END CASE; 1367 * ENDIF; 1368 * ENDCASE; 1369 */ 1370 } 1371 } 1372 break; 1373 1374 case KEYWORD_loop: 1375 case KEYWORD_case: 1376 case KEYWORD_for: 1377 /* 1378 * LOOP... 1379 * END LOOP; 1380 * 1381 * CASE 1382 * WHEN '1' THEN 1383 * END CASE; 1384 * 1385 * FOR loop_name AS cursor_name CURSOR FOR ... 1386 * DO 1387 * END FOR; 1388 */ 1389 if( isKeyword (token, KEYWORD_for ) ) 1390 { 1391 /* loop name */ 1392 readToken (token); 1393 /* AS */ 1394 readToken (token); 1395 1396 while ( ! isKeyword (token, KEYWORD_is) ) 1397 { 1398 /* 1399 * If this is not an AS keyword this is 1400 * not a proper FOR statement and should 1401 * simply be ignored 1402 */ 1403 return; 1404 } 1405 1406 while ( ! isKeyword (token, KEYWORD_do) ) 1407 readToken (token); 1408 } 1409 1410 1411 readToken (token); 1412 while( ! isKeyword (token, KEYWORD_end ) ) 1413 { 1414 /* 1415 if ( isKeyword (token, KEYWORD_else) || 1416 isKeyword (token, KEYWORD_elseif) ) 1417 readToken (token); 1418 */ 1419 1420 parseStatements (token, FALSE); 1421 1422 if ( isCmdTerm(token) ) 1423 readToken (token); 1424 } 1425 1426 1427 if( isKeyword (token, KEYWORD_end ) ) 1428 readToken (token); 1429 1430 /* 1431 * Typically ended with 1432 * END LOOP [loop name]; 1433 * END CASE 1434 * END FOR [loop name]; 1435 */ 1436 if ( isKeyword (token, KEYWORD_loop) || 1437 isKeyword (token, KEYWORD_case) || 1438 isKeyword (token, KEYWORD_for) ) 1439 readToken (token); 1440 1441 if ( isCmdTerm(token) ) 1442 stmtTerm = TRUE; 1443 1444 break; 1445 1446 case KEYWORD_create: 1447 readToken (token); 1448 parseKeywords(token); 1449 break; 1450 1451 case KEYWORD_declare: 1452 case KEYWORD_begin: 1453 parseBlock (token, TRUE); 1454 break; 1455 1456 case KEYWORD_end: 1457 break; 1458 1459 default: 1460 readToken (token); 1461 break; 1462 } 1463 /* 1464 * Not all statements must end in a semi-colon 1465 * begin 1466 * if current publisher <> 'publish' then 1467 * signal UE_FailStatement 1468 * end if 1469 * end; 1470 * The last statement prior to an end ("signal" above) does 1471 * not need a semi-colon, nor does the end if, since it is 1472 * also the last statement prior to the end of the block. 1473 * 1474 * So we must read to the first semi-colon or an END block 1475 */ 1476 while ( ! stmtTerm && 1477 ! ( isKeyword (token, KEYWORD_end) || 1478 (isCmdTerm(token)) ) 1479 ) 1480 { 1481 if ( isKeyword (token, KEYWORD_endif) && 1482 exit_on_endif ) 1483 return; 1484 1485 if (isType (token, TOKEN_COLON) ) 1486 { 1487 /* 1488 * A : can signal a loop name 1489 * myloop: 1490 * LOOP 1491 * LEAVE myloop; 1492 * END LOOP; 1493 * Unfortunately, labels do not have a 1494 * cmd terminator, therefore we have to check 1495 * if the next token is a keyword and process 1496 * it accordingly. 1497 */ 1498 readToken (token); 1499 if ( isKeyword (token, KEYWORD_loop) || 1500 isKeyword (token, KEYWORD_while) || 1501 isKeyword (token, KEYWORD_for) ) 1502 /* parseStatements (token); */ 1503 return; 1504 } 1505 1506 readToken (token); 1507 1508 if (isType (token, TOKEN_OPEN_PAREN) || 1509 isType (token, TOKEN_OPEN_CURLY) || 1510 isType (token, TOKEN_OPEN_SQUARE) ) 1511 skipToMatched (token); 1512 1513 /* 1514 * Since we know how to parse various statements 1515 * if we detect them, parse them to completion 1516 */ 1517 if (isType (token, TOKEN_BLOCK_LABEL_BEGIN) || 1518 isKeyword (token, KEYWORD_exception) || 1519 isKeyword (token, KEYWORD_loop) || 1520 isKeyword (token, KEYWORD_case) || 1521 isKeyword (token, KEYWORD_for) || 1522 isKeyword (token, KEYWORD_begin) ) 1523 parseStatements (token, FALSE); 1524 else if (isKeyword (token, KEYWORD_if)) 1525 parseStatements (token, TRUE); 1526 1527 } 1528 } 1529 /* 1530 * We assumed earlier all statements ended with a command terminator. 1531 * See comment above, now, only read if the current token 1532 * is not a command terminator. 1533 */ 1534 if ( isCmdTerm(token) && ! stmtTerm ) 1535 stmtTerm = TRUE; 1536 1537 } while (! isKeyword (token, KEYWORD_end) && 1538 ! (exit_on_endif && isKeyword (token, KEYWORD_endif) ) && 1539 ! stmtTerm ); 1540} 1541 1542static void parseBlock (tokenInfo *const token, const boolean local) 1543{ 1544 if (isType (token, TOKEN_BLOCK_LABEL_BEGIN)) 1545 { 1546 parseLabel (token); 1547 readToken (token); 1548 } 1549 if (! isKeyword (token, KEYWORD_begin)) 1550 { 1551 readToken (token); 1552 /* 1553 * These are Oracle style declares which generally come 1554 * between an IS/AS and BEGIN block. 1555 */ 1556 parseDeclare (token, local); 1557 } 1558 if (isKeyword (token, KEYWORD_begin)) 1559 { 1560 readToken (token); 1561 /* 1562 * Check for ANSI declarations which always follow 1563 * a BEGIN statement. This routine will not advance 1564 * the token if none are found. 1565 */ 1566 parseDeclareANSI (token, local); 1567 token->begin_end_nest_lvl++; 1568 while (! isKeyword (token, KEYWORD_end)) 1569 { 1570 parseStatements (token, FALSE); 1571 1572 if ( isCmdTerm(token) ) 1573 readToken (token); 1574 } 1575 token->begin_end_nest_lvl--; 1576 1577 /* 1578 * Read the next token (we will assume 1579 * it is the command delimiter) 1580 */ 1581 readToken (token); 1582 1583 /* 1584 * Check if the END block is terminated 1585 */ 1586 if ( !isCmdTerm (token) ) 1587 { 1588 /* 1589 * Not sure what to do here at the moment. 1590 * I think the routine that calls parseBlock 1591 * must expect the next token has already 1592 * been read since it is possible this 1593 * token is not a command delimiter. 1594 */ 1595 /* findCmdTerm (token, FALSE); */ 1596 } 1597 } 1598} 1599 1600static void parsePackage (tokenInfo *const token) 1601{ 1602 /* 1603 * Packages can be specified in a number of ways: 1604 * CREATE OR REPLACE PACKAGE pkg_name AS 1605 * or 1606 * CREATE OR REPLACE PACKAGE owner.pkg_name AS 1607 * or by specifying a package body 1608 * CREATE OR REPLACE PACKAGE BODY pkg_name AS 1609 * CREATE OR REPLACE PACKAGE BODY owner.pkg_name AS 1610 */ 1611 tokenInfo *const name = newToken (); 1612 readToken (name); 1613 if (isKeyword (name, KEYWORD_body)) 1614 { 1615 /* 1616 * Ignore the BODY tag since we will process 1617 * the body or prototypes in the same manner 1618 */ 1619 readToken (name); 1620 } 1621 /* Check for owner.pkg_name */ 1622 while (! isKeyword (token, KEYWORD_is)) 1623 { 1624 readToken (token); 1625 if ( isType(token, TOKEN_PERIOD) ) 1626 { 1627 readToken (name); 1628 } 1629 } 1630 if (isKeyword (token, KEYWORD_is)) 1631 { 1632 if (isType (name, TOKEN_IDENTIFIER) || 1633 isType (name, TOKEN_STRING)) 1634 makeSqlTag (name, SQLTAG_PACKAGE); 1635 addToScope (token, name->string); 1636 parseBlock (token, FALSE); 1637 vStringClear (token->scope); 1638 } 1639 findCmdTerm (token, FALSE); 1640 deleteToken (name); 1641} 1642 1643static void parseTable (tokenInfo *const token) 1644{ 1645 tokenInfo *const name = newToken (); 1646 1647 /* 1648 * This deals with these formats: 1649 * create table t1 (c1 int); 1650 * create global tempoary table t2 (c1 int); 1651 * create table "t3" (c1 int); 1652 * create table bob.t4 (c1 int); 1653 * create table bob."t5" (c1 int); 1654 * create table "bob"."t6" (c1 int); 1655 * create table bob."t7" (c1 int); 1656 * Proxy tables use this format: 1657 * create existing table bob."t7" AT '...'; 1658 * SQL Server and Sybase formats 1659 * create table OnlyTable ( 1660 * create table dbo.HasOwner ( 1661 * create table [dbo].[HasOwnerSquare] ( 1662 * create table master.dbo.HasDb ( 1663 * create table master..HasDbNoOwner ( 1664 * create table [master].dbo.[HasDbAndOwnerSquare] ( 1665 * create table [master]..[HasDbNoOwnerSquare] ( 1666 */ 1667 1668 /* This could be a database, owner or table name */ 1669 readToken (name); 1670 if (isType (name, TOKEN_OPEN_SQUARE)) 1671 { 1672 readToken (name); 1673 /* Read close square */ 1674 readToken (token); 1675 } 1676 readToken (token); 1677 if (isType (token, TOKEN_PERIOD)) 1678 { 1679 /* 1680 * This could be a owner or table name. 1681 * But this is also a special case since the table can be 1682 * referenced with a blank owner: 1683 * dbname..tablename 1684 */ 1685 readToken (name); 1686 if (isType (name, TOKEN_OPEN_SQUARE)) 1687 { 1688 readToken (name); 1689 /* Read close square */ 1690 readToken (token); 1691 } 1692 /* Check if a blank name was provided */ 1693 if (isType (name, TOKEN_PERIOD)) 1694 { 1695 readToken (name); 1696 if (isType (name, TOKEN_OPEN_SQUARE)) 1697 { 1698 readToken (name); 1699 /* Read close square */ 1700 readToken (token); 1701 } 1702 } 1703 readToken (token); 1704 if (isType (token, TOKEN_PERIOD)) 1705 { 1706 /* This can only be the table name */ 1707 readToken (name); 1708 if (isType (name, TOKEN_OPEN_SQUARE)) 1709 { 1710 readToken (name); 1711 /* Read close square */ 1712 readToken (token); 1713 } 1714 readToken (token); 1715 } 1716 } 1717 if (isType (token, TOKEN_OPEN_PAREN)) 1718 { 1719 if (isType (name, TOKEN_IDENTIFIER) || 1720 isType (name, TOKEN_STRING)) 1721 { 1722 makeSqlTag (name, SQLTAG_TABLE); 1723 vStringCopy(token->scope, name->string); 1724 parseRecord (token); 1725 vStringClear (token->scope); 1726 } 1727 } 1728 else if (isKeyword (token, KEYWORD_at)) 1729 { 1730 if (isType (name, TOKEN_IDENTIFIER)) 1731 { 1732 makeSqlTag (name, SQLTAG_TABLE); 1733 } 1734 } 1735 findCmdTerm (token, FALSE); 1736 deleteToken (name); 1737} 1738 1739static void parseIndex (tokenInfo *const token) 1740{ 1741 tokenInfo *const name = newToken (); 1742 tokenInfo *const owner = newToken (); 1743 1744 /* 1745 * This deals with these formats 1746 * create index i1 on t1(c1) create index "i2" on t1(c1) 1747 * create virtual unique clustered index "i3" on t1(c1) 1748 * create unique clustered index "i4" on t1(c1) 1749 * create clustered index "i5" on t1(c1) 1750 * create bitmap index "i6" on t1(c1) 1751 */ 1752 1753 readToken (name); 1754 readToken (token); 1755 if (isType (token, TOKEN_PERIOD)) 1756 { 1757 readToken (name); 1758 readToken (token); 1759 } 1760 if ( isKeyword (token, KEYWORD_on) && 1761 (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING) ) ) 1762 { 1763 readToken (owner); 1764 readToken (token); 1765 if (isType (token, TOKEN_PERIOD)) 1766 { 1767 readToken (owner); 1768 readToken (token); 1769 } 1770 addToScope(name, owner->string); 1771 makeSqlTag (name, SQLTAG_INDEX); 1772 } 1773 findCmdTerm (token, FALSE); 1774 deleteToken (name); 1775 deleteToken (owner); 1776} 1777 1778static void parseEvent (tokenInfo *const token) 1779{ 1780 tokenInfo *const name = newToken (); 1781 1782 /* 1783 * This deals with these formats 1784 * create event e1 handler begin end; 1785 * create event "e2" handler begin end; 1786 * create event dba."e3" handler begin end; 1787 * create event "dba"."e4" handler begin end; 1788 */ 1789 1790 readToken (name); 1791 readToken (token); 1792 if (isType (token, TOKEN_PERIOD)) 1793 { 1794 readToken (name); 1795 } 1796 while (! (isKeyword (token, KEYWORD_handler) || 1797 (isType (token, TOKEN_SEMICOLON))) ) 1798 { 1799 readToken (token); 1800 } 1801 1802 if ( isKeyword (token, KEYWORD_handler) || 1803 isType (token, TOKEN_SEMICOLON) ) 1804 { 1805 makeSqlTag (name, SQLTAG_EVENT); 1806 } 1807 1808 if (isKeyword (token, KEYWORD_handler)) 1809 { 1810 readToken (token); 1811 if ( isKeyword (token, KEYWORD_begin) ) 1812 { 1813 parseBlock (token, TRUE); 1814 } 1815 findCmdTerm (token, TRUE); 1816 } 1817 deleteToken (name); 1818} 1819 1820static void parseTrigger (tokenInfo *const token) 1821{ 1822 tokenInfo *const name = newToken (); 1823 tokenInfo *const table = newToken (); 1824 1825 /* 1826 * This deals with these formats 1827 * create or replace trigger tr1 begin end; 1828 * create trigger "tr2" begin end; 1829 * drop trigger "droptr1"; 1830 * create trigger "tr3" CALL sp_something(); 1831 * create trigger "owner"."tr4" begin end; 1832 * create trigger "tr5" not valid; 1833 * create trigger "tr6" begin end; 1834 */ 1835 1836 readToken (name); 1837 readToken (token); 1838 if (isType (token, TOKEN_PERIOD)) 1839 { 1840 readToken (name); 1841 readToken (token); 1842 } 1843 1844 while ( !isKeyword (token, KEYWORD_on) && 1845 !isCmdTerm (token) ) 1846 { 1847 readToken (token); 1848 } 1849 1850 /*if (! isType (token, TOKEN_SEMICOLON) ) */ 1851 if (! isCmdTerm (token) ) 1852 { 1853 readToken (table); 1854 readToken (token); 1855 if (isType (token, TOKEN_PERIOD)) 1856 { 1857 readToken (table); 1858 readToken (token); 1859 } 1860 1861 while (! (isKeyword (token, KEYWORD_begin) || 1862 (isKeyword (token, KEYWORD_call)) || 1863 ( isCmdTerm (token))) ) 1864 { 1865 if ( isKeyword (token, KEYWORD_declare) ) 1866 { 1867 addToScope(token, name->string); 1868 parseDeclare(token, TRUE); 1869 vStringClear(token->scope); 1870 } 1871 else 1872 readToken (token); 1873 } 1874 1875 if ( isKeyword (token, KEYWORD_begin) || 1876 isKeyword (token, KEYWORD_call) ) 1877 { 1878 addToScope(name, table->string); 1879 makeSqlTag (name, SQLTAG_TRIGGER); 1880 addToScope(token, table->string); 1881 if ( isKeyword (token, KEYWORD_begin) ) 1882 { 1883 parseBlock (token, TRUE); 1884 } 1885 vStringClear(token->scope); 1886 } 1887 } 1888 1889 findCmdTerm (token, TRUE); 1890 deleteToken (name); 1891 deleteToken (table); 1892} 1893 1894static void parsePublication (tokenInfo *const token) 1895{ 1896 tokenInfo *const name = newToken (); 1897 1898 /* 1899 * This deals with these formats 1900 * create or replace publication pu1 () 1901 * create publication "pu2" () 1902 * create publication dba."pu3" () 1903 * create publication "dba"."pu4" () 1904 */ 1905 1906 readToken (name); 1907 readToken (token); 1908 if (isType (token, TOKEN_PERIOD)) 1909 { 1910 readToken (name); 1911 readToken (token); 1912 } 1913 if (isType (token, TOKEN_OPEN_PAREN)) 1914 { 1915 if (isType (name, TOKEN_IDENTIFIER) || 1916 isType (name, TOKEN_STRING)) 1917 { 1918 makeSqlTag (name, SQLTAG_PUBLICATION); 1919 } 1920 } 1921 findCmdTerm (token, FALSE); 1922 deleteToken (name); 1923} 1924 1925static void parseService (tokenInfo *const token) 1926{ 1927 tokenInfo *const name = newToken (); 1928 1929 /* 1930 * This deals with these formats 1931 * CREATE SERVICE s1 TYPE 'HTML' 1932 * AUTHORIZATION OFF USER DBA AS 1933 * SELECT * 1934 * FROM SYS.SYSTABLE; 1935 * CREATE SERVICE "s2" TYPE 'HTML' 1936 * AUTHORIZATION OFF USER DBA AS 1937 * CALL sp_Something(); 1938 */ 1939 1940 readToken (name); 1941 readToken (token); 1942 if (isKeyword (token, KEYWORD_type)) 1943 { 1944 if (isType (name, TOKEN_IDENTIFIER) || 1945 isType (name, TOKEN_STRING)) 1946 { 1947 makeSqlTag (name, SQLTAG_SERVICE); 1948 } 1949 } 1950 findCmdTerm (token, FALSE); 1951 deleteToken (name); 1952} 1953 1954static void parseDomain (tokenInfo *const token) 1955{ 1956 tokenInfo *const name = newToken (); 1957 1958 /* 1959 * This deals with these formats 1960 * CREATE DOMAIN|DATATYPE [AS] your_name ...; 1961 */ 1962 1963 readToken (name); 1964 if (isKeyword (name, KEYWORD_is)) 1965 { 1966 readToken (name); 1967 } 1968 readToken (token); 1969 if (isType (name, TOKEN_IDENTIFIER) || 1970 isType (name, TOKEN_STRING)) 1971 { 1972 makeSqlTag (name, SQLTAG_DOMAIN); 1973 } 1974 findCmdTerm (token, FALSE); 1975 deleteToken (name); 1976} 1977 1978static void parseDrop (tokenInfo *const token) 1979{ 1980 /* 1981 * This deals with these formats 1982 * DROP TABLE|PROCEDURE|DOMAIN|DATATYPE name; 1983 * 1984 * Just simply skip over these statements. 1985 * They are often confused with PROCEDURE prototypes 1986 * since the syntax is similar, this effectively deals with 1987 * the issue for all types. 1988 */ 1989 1990 findCmdTerm (token, FALSE); 1991} 1992 1993static void parseVariable (tokenInfo *const token) 1994{ 1995 tokenInfo *const name = newToken (); 1996 1997 /* 1998 * This deals with these formats 1999 * create variable varname1 integer; 2000 * create variable @varname2 integer; 2001 * create variable "varname3" integer; 2002 * drop variable @varname3; 2003 */ 2004 2005 readToken (name); 2006 readToken (token); 2007 if ( (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING)) 2008 && !isType (token, TOKEN_SEMICOLON) ) 2009 { 2010 makeSqlTag (name, SQLTAG_VARIABLE); 2011 } 2012 findCmdTerm (token, TRUE); 2013 2014 deleteToken (name); 2015} 2016 2017static void parseSynonym (tokenInfo *const token) 2018{ 2019 tokenInfo *const name = newToken (); 2020 2021 /* 2022 * This deals with these formats 2023 * create variable varname1 integer; 2024 * create variable @varname2 integer; 2025 * create variable "varname3" integer; 2026 * drop variable @varname3; 2027 */ 2028 2029 readToken (name); 2030 readToken (token); 2031 if ( (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING)) 2032 && isKeyword (token, KEYWORD_for) ) 2033 { 2034 makeSqlTag (name, SQLTAG_SYNONYM); 2035 } 2036 findCmdTerm (token, TRUE); 2037 2038 deleteToken (name); 2039} 2040 2041static void parseView (tokenInfo *const token) 2042{ 2043 tokenInfo *const name = newToken (); 2044 2045 /* 2046 * This deals with these formats 2047 * create variable varname1 integer; 2048 * create variable @varname2 integer; 2049 * create variable "varname3" integer; 2050 * drop variable @varname3; 2051 */ 2052 2053 readToken (name); 2054 readToken (token); 2055 if (isType (token, TOKEN_PERIOD)) 2056 { 2057 readToken (name); 2058 readToken (token); 2059 } 2060 if ( isType (token, TOKEN_OPEN_PAREN) ) 2061 { 2062 skipArgumentList(token); 2063 2064 } 2065 2066 while (!(isKeyword (token, KEYWORD_is) || 2067 isType (token, TOKEN_SEMICOLON) 2068 )) 2069 { 2070 readToken (token); 2071 } 2072 2073 if ( (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING)) 2074 && isKeyword (token, KEYWORD_is) ) 2075 { 2076 makeSqlTag (name, SQLTAG_VIEW); 2077 } 2078 2079 findCmdTerm (token, TRUE); 2080 2081 deleteToken (name); 2082} 2083 2084static void parseMLTable (tokenInfo *const token) 2085{ 2086 tokenInfo *const version = newToken (); 2087 tokenInfo *const table = newToken …
Large files files are truncated, but you can click here to view the full file