PageRenderTime 86ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 1ms

/amanda/tags/3_1_2_sol01/device-src/s3.c

#
C | 2037 lines | 1434 code | 292 blank | 311 comment | 309 complexity | ce771f030f6d39903811a909e6a886b8 MD5 | raw file
  1. /*
  2. * Copyright (c) 2008, 2009, 2010 Zmanda, Inc. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published
  6. * by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  10. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  11. * for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along
  14. * with this program; if not, write to the Free Software Foundation, Inc.,
  15. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  16. *
  17. * Contact information: Zmanda Inc., 465 S. Mathilda Ave., Suite 300
  18. * Sunnyvale, CA 94085, USA, or: http://www.zmanda.com
  19. */
  20. /* TODO
  21. * - collect speed statistics
  22. * - debugging mode
  23. */
  24. #ifdef HAVE_CONFIG_H
  25. /* use a relative path here to avoid conflicting with Perl's config.h. */
  26. #include "../config/config.h"
  27. #endif
  28. #include <string.h>
  29. #include "s3.h"
  30. #include "s3-util.h"
  31. #ifdef HAVE_REGEX_H
  32. #include <regex.h>
  33. #endif
  34. #ifdef HAVE_SYS_TYPES_H
  35. #include <sys/types.h>
  36. #endif
  37. #ifdef HAVE_SYS_STAT_H
  38. #include <sys/stat.h>
  39. #endif
  40. #ifdef HAVE_UNISTD_H
  41. #include <unistd.h>
  42. #endif
  43. #ifdef HAVE_DIRENT_H
  44. #include <dirent.h>
  45. #endif
  46. #ifdef HAVE_TIME_H
  47. #include <time.h>
  48. #endif
  49. #ifdef HAVE_UTIL_H
  50. #include "util.h"
  51. #endif
  52. #ifdef HAVE_AMANDA_H
  53. #include "amanda.h"
  54. #endif
  55. #include <curl/curl.h>
  56. /* Constant renamed after version 7.10.7 */
  57. #ifndef CURLINFO_RESPONSE_CODE
  58. #define CURLINFO_RESPONSE_CODE CURLINFO_HTTP_CODE
  59. #endif
  60. /* We don't need OpenSSL's kerberos support, and it's broken in
  61. * RHEL 3 anyway. */
  62. #define OPENSSL_NO_KRB5
  63. #ifdef HAVE_OPENSSL_HMAC_H
  64. # include <openssl/hmac.h>
  65. #else
  66. # ifdef HAVE_CRYPTO_HMAC_H
  67. # include <crypto/hmac.h>
  68. # else
  69. # ifdef HAVE_HMAC_H
  70. # include <hmac.h>
  71. # endif
  72. # endif
  73. #endif
  74. #include <openssl/err.h>
  75. #include <openssl/ssl.h>
  76. #include <openssl/md5.h>
  77. /* Maximum key length as specified in the S3 documentation
  78. * (*excluding* null terminator) */
  79. #define S3_MAX_KEY_LENGTH 1024
  80. #define AMAZON_SECURITY_HEADER "x-amz-security-token"
  81. #define AMAZON_BUCKET_CONF_TEMPLATE "\
  82. <CreateBucketConfiguration>\n\
  83. <LocationConstraint>%s</LocationConstraint>\n\
  84. </CreateBucketConfiguration>"
  85. #define AMAZON_STORAGE_CLASS_HEADER "x-amz-storage-class"
  86. #define AMAZON_WILDCARD_LOCATION "*"
  87. /* parameters for exponential backoff in the face of retriable errors */
  88. /* start at 0.01s */
  89. #define EXPONENTIAL_BACKOFF_START_USEC G_USEC_PER_SEC/100
  90. /* double at each retry */
  91. #define EXPONENTIAL_BACKOFF_BASE 2
  92. /* retry 14 times (for a total of about 3 minutes spent waiting) */
  93. #define EXPONENTIAL_BACKOFF_MAX_RETRIES 14
  94. /* general "reasonable size" parameters */
  95. #define MAX_ERROR_RESPONSE_LEN (100*1024)
  96. /* Results which should always be retried */
  97. #define RESULT_HANDLING_ALWAYS_RETRY \
  98. { 400, S3_ERROR_RequestTimeout, 0, S3_RESULT_RETRY }, \
  99. { 409, S3_ERROR_OperationAborted, 0, S3_RESULT_RETRY }, \
  100. { 412, S3_ERROR_PreconditionFailed, 0, S3_RESULT_RETRY }, \
  101. { 500, S3_ERROR_InternalError, 0, S3_RESULT_RETRY }, \
  102. { 501, S3_ERROR_NotImplemented, 0, S3_RESULT_RETRY }, \
  103. { 0, 0, CURLE_COULDNT_CONNECT, S3_RESULT_RETRY }, \
  104. { 0, 0, CURLE_COULDNT_RESOLVE_HOST, S3_RESULT_RETRY }, \
  105. { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \
  106. { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \
  107. { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \
  108. { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \
  109. { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY }
  110. /*
  111. * Data structures and associated functions
  112. */
  113. struct S3Handle {
  114. /* (all strings in this struct are freed by s3_free()) */
  115. char *access_key;
  116. char *secret_key;
  117. char *user_token;
  118. /* attributes for new objects */
  119. char *bucket_location;
  120. char *storage_class;
  121. char *ca_info;
  122. CURL *curl;
  123. gboolean verbose;
  124. gboolean use_ssl;
  125. guint64 max_send_speed;
  126. guint64 max_recv_speed;
  127. /* information from the last request */
  128. char *last_message;
  129. guint last_response_code;
  130. s3_error_code_t last_s3_error_code;
  131. CURLcode last_curl_code;
  132. guint last_num_retries;
  133. void *last_response_body;
  134. guint last_response_body_size;
  135. };
  136. typedef struct {
  137. CurlBuffer resp_buf;
  138. s3_write_func write_func;
  139. s3_reset_func reset_func;
  140. gpointer write_data;
  141. gboolean headers_done;
  142. gboolean int_write_done;
  143. char *etag;
  144. } S3InternalData;
  145. /* Callback function to examine headers one-at-a-time
  146. *
  147. * @note this is the same as CURLOPT_HEADERFUNCTION
  148. *
  149. * @param data: The pointer to read data from
  150. * @param size: The size of each "element" of the data buffer in bytes
  151. * @param nmemb: The number of elements in the data buffer.
  152. * So, the buffer's size is size*nmemb bytes.
  153. * @param stream: the header_data (an opaque pointer)
  154. *
  155. * @return The number of bytes written to the buffer or
  156. * CURL_WRITEFUNC_PAUSE to pause.
  157. * If it's the number of bytes written, it should match the buffer size
  158. */
  159. typedef size_t (*s3_header_func)(void *data, size_t size, size_t nmemb, void *stream);
  160. /*
  161. * S3 errors */
  162. /* (see preprocessor magic in s3.h) */
  163. static char * s3_error_code_names[] = {
  164. #define S3_ERROR(NAME) #NAME
  165. S3_ERROR_LIST
  166. #undef S3_ERROR
  167. };
  168. /* Convert an s3 error name to an error code. This function
  169. * matches strings case-insensitively, and is appropriate for use
  170. * on data from the network.
  171. *
  172. * @param s3_error_code: the error name
  173. * @returns: the error code (see constants in s3.h)
  174. */
  175. static s3_error_code_t
  176. s3_error_code_from_name(char *s3_error_name);
  177. /* Convert an s3 error code to a string
  178. *
  179. * @param s3_error_code: the error code to convert
  180. * @returns: statically allocated string
  181. */
  182. static const char *
  183. s3_error_name_from_code(s3_error_code_t s3_error_code);
  184. /*
  185. * result handling */
  186. /* result handling is specified by a static array of result_handling structs,
  187. * which match based on response_code (from HTTP) and S3 error code. The result
  188. * given for the first match is used. 0 acts as a wildcard for both response_code
  189. * and s3_error_code. The list is terminated with a struct containing 0 for both
  190. * response_code and s3_error_code; the result for that struct is the default
  191. * result.
  192. *
  193. * See RESULT_HANDLING_ALWAYS_RETRY for an example.
  194. */
  195. typedef enum {
  196. S3_RESULT_RETRY = -1,
  197. S3_RESULT_FAIL = 0,
  198. S3_RESULT_OK = 1
  199. } s3_result_t;
  200. typedef struct result_handling {
  201. guint response_code;
  202. s3_error_code_t s3_error_code;
  203. CURLcode curl_code;
  204. s3_result_t result;
  205. } result_handling_t;
  206. /* Lookup a result in C{result_handling}.
  207. *
  208. * @param result_handling: array of handling specifications
  209. * @param response_code: response code from operation
  210. * @param s3_error_code: s3 error code from operation, if any
  211. * @param curl_code: the CURL error, if any
  212. * @returns: the matching result
  213. */
  214. static s3_result_t
  215. lookup_result(const result_handling_t *result_handling,
  216. guint response_code,
  217. s3_error_code_t s3_error_code,
  218. CURLcode curl_code);
  219. /*
  220. * Precompiled regular expressions */
  221. static regex_t etag_regex, error_name_regex, message_regex, subdomain_regex,
  222. location_con_regex;
  223. /*
  224. * Utility functions
  225. */
  226. /* Check if a string is non-empty
  227. *
  228. * @param str: string to check
  229. * @returns: true iff str is non-NULL and not "\0"
  230. */
  231. static gboolean is_non_empty_string(const char *str);
  232. /* Construct the URL for an Amazon S3 REST request.
  233. *
  234. * A new string is allocated and returned; it is the responsiblity of the caller.
  235. *
  236. * @param hdl: the S3Handle object
  237. * @param verb: capitalized verb for this request ('PUT', 'GET', etc.)
  238. * @param bucket: the bucket being accessed, or NULL for none
  239. * @param key: the key being accessed, or NULL for none
  240. * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
  241. * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used
  242. */
  243. static char *
  244. build_url(const char *bucket,
  245. const char *key,
  246. const char *subresource,
  247. const char *query,
  248. gboolean use_subdomain,
  249. gboolean use_ssl);
  250. /* Create proper authorization headers for an Amazon S3 REST
  251. * request to C{headers}.
  252. *
  253. * @note: C{X-Amz} headers (in C{headers}) must
  254. * - be in lower-case
  255. * - be in alphabetical order
  256. * - have no spaces around the colon
  257. * (don't yell at me -- see the Amazon Developer Guide)
  258. *
  259. * @param hdl: the S3Handle object
  260. * @param verb: capitalized verb for this request ('PUT', 'GET', etc.)
  261. * @param bucket: the bucket being accessed, or NULL for none
  262. * @param key: the key being accessed, or NULL for none
  263. * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
  264. * @param md5_hash: the MD5 hash of the request body, or NULL for none
  265. * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used
  266. */
  267. static struct curl_slist *
  268. authenticate_request(S3Handle *hdl,
  269. const char *verb,
  270. const char *bucket,
  271. const char *key,
  272. const char *subresource,
  273. const char *md5_hash,
  274. gboolean use_subdomain);
  275. /* Interpret the response to an S3 operation, assuming CURL completed its request
  276. * successfully. This function fills in the relevant C{hdl->last*} members.
  277. *
  278. * @param hdl: The S3Handle object
  279. * @param body: the response body
  280. * @param body_len: the length of the response body
  281. * @param etag: The response's ETag header
  282. * @param content_md5: The hex-encoded MD5 hash of the request body,
  283. * which will be checked against the response's ETag header.
  284. * If NULL, the header is not checked.
  285. * If non-NULL, then the body should have the response headers at its beginnning.
  286. * @returns: TRUE if the response should be retried (e.g., network error)
  287. */
  288. static gboolean
  289. interpret_response(S3Handle *hdl,
  290. CURLcode curl_code,
  291. char *curl_error_buffer,
  292. gchar *body,
  293. guint body_len,
  294. const char *etag,
  295. const char *content_md5);
  296. /* Perform an S3 operation. This function handles all of the details
  297. * of retryig requests and so on.
  298. *
  299. * The concepts of bucket and keys are defined by the Amazon S3 API.
  300. * See: "Components of Amazon S3" - API Version 2006-03-01 pg. 8
  301. *
  302. * Individual sub-resources are defined in several places. In the REST API,
  303. * they they are represented by a "flag" in the "query string".
  304. * See: "Constructing the CanonicalizedResource Element" - API Version 2006-03-01 pg. 60
  305. *
  306. * @param hdl: the S3Handle object
  307. * @param verb: the HTTP request method
  308. * @param bucket: the bucket to access, or NULL for none
  309. * @param key: the key to access, or NULL for none
  310. * @param subresource: the "sub-resource" to request (e.g. "acl") or NULL for none
  311. * @param query: the query string to send (not including th initial '?'),
  312. * or NULL for none
  313. * @param read_func: the callback for reading data
  314. * Will use s3_empty_read_func if NULL is passed in.
  315. * @param read_reset_func: the callback for to reset reading data
  316. * @param size_func: the callback to get the number of bytes to upload
  317. * @param md5_func: the callback to get the MD5 hash of the data to upload
  318. * @param read_data: pointer to pass to the above functions
  319. * @param write_func: the callback for writing data.
  320. * Will use s3_counter_write_func if NULL is passed in.
  321. * @param write_reset_func: the callback for to reset writing data
  322. * @param write_data: pointer to pass to C{write_func}
  323. * @param progress_func: the callback for progress information
  324. * @param progress_data: pointer to pass to C{progress_func}
  325. * @param result_handling: instructions for handling the results; see above.
  326. * @returns: the result specified by result_handling; details of the response
  327. * are then available in C{hdl->last*}
  328. */
  329. static s3_result_t
  330. perform_request(S3Handle *hdl,
  331. const char *verb,
  332. const char *bucket,
  333. const char *key,
  334. const char *subresource,
  335. const char *query,
  336. s3_read_func read_func,
  337. s3_reset_func read_reset_func,
  338. s3_size_func size_func,
  339. s3_md5_func md5_func,
  340. gpointer read_data,
  341. s3_write_func write_func,
  342. s3_reset_func write_reset_func,
  343. gpointer write_data,
  344. s3_progress_func progress_func,
  345. gpointer progress_data,
  346. const result_handling_t *result_handling);
  347. /*
  348. * a CURLOPT_WRITEFUNCTION to save part of the response in memory and
  349. * call an external function if one was provided.
  350. */
  351. static size_t
  352. s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream);
  353. /*
  354. * a function to reset to our internal buffer
  355. */
  356. static void
  357. s3_internal_reset_func(void * stream);
  358. /*
  359. * a CURLOPT_HEADERFUNCTION to save the ETag header only.
  360. */
  361. static size_t
  362. s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream);
  363. static gboolean
  364. compile_regexes(void);
  365. /*
  366. * Static function implementations
  367. */
  368. static s3_error_code_t
  369. s3_error_code_from_name(char *s3_error_name)
  370. {
  371. int i;
  372. if (!s3_error_name) return S3_ERROR_Unknown;
  373. /* do a brute-force search through the list, since it's not sorted */
  374. for (i = 0; i < S3_ERROR_END; i++) {
  375. if (g_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0)
  376. return i;
  377. }
  378. return S3_ERROR_Unknown;
  379. }
  380. static const char *
  381. s3_error_name_from_code(s3_error_code_t s3_error_code)
  382. {
  383. if (s3_error_code >= S3_ERROR_END)
  384. s3_error_code = S3_ERROR_Unknown;
  385. return s3_error_code_names[s3_error_code];
  386. }
  387. gboolean
  388. s3_curl_supports_ssl(void)
  389. {
  390. static int supported = -1;
  391. if (supported == -1) {
  392. #if defined(CURL_VERSION_SSL)
  393. curl_version_info_data *info = curl_version_info(CURLVERSION_NOW);
  394. if (info->features & CURL_VERSION_SSL)
  395. supported = 1;
  396. else
  397. supported = 0;
  398. #else
  399. supported = 0;
  400. #endif
  401. }
  402. return supported;
  403. }
  404. static gboolean
  405. s3_curl_throttling_compat(void)
  406. {
  407. /* CURLOPT_MAX_SEND_SPEED_LARGE added in 7.15.5 */
  408. #if LIBCURL_VERSION_NUM >= 0x070f05
  409. curl_version_info_data *info;
  410. /* check the runtime version too */
  411. info = curl_version_info(CURLVERSION_NOW);
  412. return info->version_num >= 0x070f05;
  413. #else
  414. return FALSE;
  415. #endif
  416. }
  417. static s3_result_t
  418. lookup_result(const result_handling_t *result_handling,
  419. guint response_code,
  420. s3_error_code_t s3_error_code,
  421. CURLcode curl_code)
  422. {
  423. while (result_handling->response_code
  424. || result_handling->s3_error_code
  425. || result_handling->curl_code) {
  426. if ((result_handling->response_code && result_handling->response_code != response_code)
  427. || (result_handling->s3_error_code && result_handling->s3_error_code != s3_error_code)
  428. || (result_handling->curl_code && result_handling->curl_code != curl_code)) {
  429. result_handling++;
  430. continue;
  431. }
  432. return result_handling->result;
  433. }
  434. /* return the result for the terminator, as the default */
  435. return result_handling->result;
  436. }
  437. static gboolean
  438. is_non_empty_string(const char *str)
  439. {
  440. return str && str[0] != '\0';
  441. }
  442. static char *
  443. build_url(const char *bucket,
  444. const char *key,
  445. const char *subresource,
  446. const char *query,
  447. gboolean use_subdomain,
  448. gboolean use_ssl)
  449. {
  450. GString *url = NULL;
  451. char *esc_bucket = NULL, *esc_key = NULL;
  452. /* scheme */
  453. url = g_string_new("http");
  454. if (use_ssl)
  455. g_string_append(url, "s");
  456. g_string_append(url, "://");
  457. /* domain */
  458. if (use_subdomain && bucket)
  459. g_string_append_printf(url, "%s.s3.amazonaws.com/", bucket);
  460. else
  461. g_string_append(url, "s3.amazonaws.com/");
  462. /* path */
  463. if (!use_subdomain && bucket) {
  464. esc_bucket = curl_escape(bucket, 0);
  465. if (!esc_bucket) goto cleanup;
  466. g_string_append_printf(url, "%s", esc_bucket);
  467. if (key)
  468. g_string_append(url, "/");
  469. }
  470. if (key) {
  471. esc_key = curl_escape(key, 0);
  472. if (!esc_key) goto cleanup;
  473. g_string_append_printf(url, "%s", esc_key);
  474. }
  475. /* query string */
  476. if (subresource || query)
  477. g_string_append(url, "?");
  478. if (subresource)
  479. g_string_append(url, subresource);
  480. if (subresource && query)
  481. g_string_append(url, "&");
  482. if (query)
  483. g_string_append(url, query);
  484. cleanup:
  485. if (esc_bucket) curl_free(esc_bucket);
  486. if (esc_key) curl_free(esc_key);
  487. return g_string_free(url, FALSE);
  488. }
  489. static struct curl_slist *
  490. authenticate_request(S3Handle *hdl,
  491. const char *verb,
  492. const char *bucket,
  493. const char *key,
  494. const char *subresource,
  495. const char *md5_hash,
  496. gboolean use_subdomain)
  497. {
  498. time_t t;
  499. struct tm tmp;
  500. char *date = NULL;
  501. char *buf = NULL;
  502. HMAC_CTX ctx;
  503. GByteArray *md = NULL;
  504. char *auth_base64 = NULL;
  505. struct curl_slist *headers = NULL;
  506. char *esc_bucket = NULL, *esc_key = NULL;
  507. GString *auth_string = NULL;
  508. /* From RFC 2616 */
  509. static const char *wkday[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"};
  510. static const char *month[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
  511. "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
  512. /* Build the string to sign, per the S3 spec.
  513. * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
  514. */
  515. /* verb */
  516. auth_string = g_string_new(verb);
  517. g_string_append(auth_string, "\n");
  518. /* Content-MD5 header */
  519. if (md5_hash)
  520. g_string_append(auth_string, md5_hash);
  521. g_string_append(auth_string, "\n");
  522. /* Content-Type is empty*/
  523. g_string_append(auth_string, "\n");
  524. /* calculate the date */
  525. t = time(NULL);
  526. #ifdef _WIN32
  527. if (!gmtime_s(&tmp, &t)) g_debug("localtime error");
  528. #else
  529. if (!gmtime_r(&t, &tmp)) perror("localtime");
  530. #endif
  531. date = g_strdup_printf("%s, %02d %s %04d %02d:%02d:%02d GMT",
  532. wkday[tmp.tm_wday], tmp.tm_mday, month[tmp.tm_mon], 1900+tmp.tm_year,
  533. tmp.tm_hour, tmp.tm_min, tmp.tm_sec);
  534. g_string_append(auth_string, date);
  535. g_string_append(auth_string, "\n");
  536. /* CanonicalizedAmzHeaders, sorted lexicographically */
  537. if (is_non_empty_string(hdl->user_token)) {
  538. g_string_append(auth_string, AMAZON_SECURITY_HEADER);
  539. g_string_append(auth_string, ":");
  540. g_string_append(auth_string, hdl->user_token);
  541. g_string_append(auth_string, ",");
  542. g_string_append(auth_string, STS_PRODUCT_TOKEN);
  543. g_string_append(auth_string, "\n");
  544. }
  545. if (is_non_empty_string(hdl->storage_class)) {
  546. g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER);
  547. g_string_append(auth_string, ":");
  548. g_string_append(auth_string, hdl->storage_class);
  549. g_string_append(auth_string, "\n");
  550. }
  551. /* CanonicalizedResource */
  552. g_string_append(auth_string, "/");
  553. if (bucket) {
  554. if (use_subdomain)
  555. g_string_append(auth_string, bucket);
  556. else {
  557. esc_bucket = curl_escape(bucket, 0);
  558. if (!esc_bucket) goto cleanup;
  559. g_string_append(auth_string, esc_bucket);
  560. }
  561. }
  562. if (bucket && (use_subdomain || key))
  563. g_string_append(auth_string, "/");
  564. if (key) {
  565. esc_key = curl_escape(key, 0);
  566. if (!esc_key) goto cleanup;
  567. g_string_append(auth_string, esc_key);
  568. }
  569. if (subresource) {
  570. g_string_append(auth_string, "?");
  571. g_string_append(auth_string, subresource);
  572. }
  573. /* run HMAC-SHA1 on the canonicalized string */
  574. md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1);
  575. HMAC_CTX_init(&ctx);
  576. HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key), EVP_sha1(), NULL);
  577. HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len);
  578. HMAC_Final(&ctx, md->data, &md->len);
  579. HMAC_CTX_cleanup(&ctx);
  580. auth_base64 = s3_base64_encode(md);
  581. /* append the new headers */
  582. if (is_non_empty_string(hdl->user_token)) {
  583. /* Devpay headers are included in hash. */
  584. buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", hdl->user_token);
  585. headers = curl_slist_append(headers, buf);
  586. g_free(buf);
  587. buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", STS_PRODUCT_TOKEN);
  588. headers = curl_slist_append(headers, buf);
  589. g_free(buf);
  590. }
  591. if (is_non_empty_string(hdl->storage_class)) {
  592. buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s", hdl->storage_class);
  593. headers = curl_slist_append(headers, buf);
  594. g_free(buf);
  595. }
  596. buf = g_strdup_printf("Authorization: AWS %s:%s",
  597. hdl->access_key, auth_base64);
  598. headers = curl_slist_append(headers, buf);
  599. g_free(buf);
  600. if (md5_hash && '\0' != md5_hash[0]) {
  601. buf = g_strdup_printf("Content-MD5: %s", md5_hash);
  602. headers = curl_slist_append(headers, buf);
  603. g_free(buf);
  604. }
  605. buf = g_strdup_printf("Date: %s", date);
  606. headers = curl_slist_append(headers, buf);
  607. g_free(buf);
  608. cleanup:
  609. g_free(date);
  610. g_free(esc_bucket);
  611. g_free(esc_key);
  612. g_byte_array_free(md, TRUE);
  613. g_free(auth_base64);
  614. g_string_free(auth_string, TRUE);
  615. return headers;
  616. }
  617. static gboolean
  618. interpret_response(S3Handle *hdl,
  619. CURLcode curl_code,
  620. char *curl_error_buffer,
  621. gchar *body,
  622. guint body_len,
  623. const char *etag,
  624. const char *content_md5)
  625. {
  626. long response_code = 0;
  627. regmatch_t pmatch[2];
  628. char *error_name = NULL, *message = NULL;
  629. char *body_copy = NULL;
  630. gboolean ret = TRUE;
  631. if (!hdl) return FALSE;
  632. if (hdl->last_message) g_free(hdl->last_message);
  633. hdl->last_message = NULL;
  634. /* bail out from a CURL error */
  635. if (curl_code != CURLE_OK) {
  636. hdl->last_curl_code = curl_code;
  637. hdl->last_message = g_strdup_printf("CURL error: %s", curl_error_buffer);
  638. return FALSE;
  639. }
  640. /* CURL seems to think things were OK, so get its response code */
  641. curl_easy_getinfo(hdl->curl, CURLINFO_RESPONSE_CODE, &response_code);
  642. hdl->last_response_code = response_code;
  643. /* check ETag, if present */
  644. if (etag && content_md5 && 200 == response_code) {
  645. if (etag && g_strcasecmp(etag, content_md5))
  646. hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)");
  647. else
  648. ret = FALSE;
  649. return ret;
  650. }
  651. if (200 <= response_code && response_code < 400) {
  652. /* 2xx and 3xx codes won't have a response body we care about */
  653. hdl->last_s3_error_code = S3_ERROR_None;
  654. return FALSE;
  655. }
  656. /* Now look at the body to try to get the actual Amazon error message. Rather
  657. * than parse out the XML, just use some regexes. */
  658. /* impose a reasonable limit on body size */
  659. if (body_len > MAX_ERROR_RESPONSE_LEN) {
  660. hdl->last_message = g_strdup("S3 Error: Unknown (response body too large to parse)");
  661. return FALSE;
  662. } else if (!body || body_len == 0) {
  663. hdl->last_message = g_strdup("S3 Error: Unknown (empty response body)");
  664. return TRUE; /* perhaps a network error; retry the request */
  665. }
  666. /* use strndup to get a zero-terminated string */
  667. body_copy = g_strndup(body, body_len);
  668. if (!body_copy) goto cleanup;
  669. if (!s3_regexec_wrap(&error_name_regex, body_copy, 2, pmatch, 0))
  670. error_name = find_regex_substring(body_copy, pmatch[1]);
  671. if (!s3_regexec_wrap(&message_regex, body_copy, 2, pmatch, 0))
  672. message = find_regex_substring(body_copy, pmatch[1]);
  673. if (error_name) {
  674. hdl->last_s3_error_code = s3_error_code_from_name(error_name);
  675. }
  676. if (message) {
  677. hdl->last_message = message;
  678. message = NULL; /* steal the reference to the string */
  679. }
  680. cleanup:
  681. g_free(body_copy);
  682. g_free(message);
  683. g_free(error_name);
  684. return FALSE;
  685. }
  686. /* a CURLOPT_READFUNCTION to read data from a buffer. */
  687. size_t
  688. s3_buffer_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
  689. {
  690. CurlBuffer *data = stream;
  691. guint bytes_desired = (guint) size * nmemb;
  692. /* check the number of bytes remaining, just to be safe */
  693. if (bytes_desired > data->buffer_len - data->buffer_pos)
  694. bytes_desired = data->buffer_len - data->buffer_pos;
  695. memcpy((char *)ptr, data->buffer + data->buffer_pos, bytes_desired);
  696. data->buffer_pos += bytes_desired;
  697. return bytes_desired;
  698. }
  699. size_t
  700. s3_buffer_size_func(void *stream)
  701. {
  702. CurlBuffer *data = stream;
  703. return data->buffer_len;
  704. }
  705. GByteArray*
  706. s3_buffer_md5_func(void *stream)
  707. {
  708. CurlBuffer *data = stream;
  709. GByteArray req_body_gba = {(guint8 *)data->buffer, data->buffer_len};
  710. return s3_compute_md5_hash(&req_body_gba);
  711. }
  712. void
  713. s3_buffer_reset_func(void *stream)
  714. {
  715. CurlBuffer *data = stream;
  716. data->buffer_pos = 0;
  717. }
  718. /* a CURLOPT_WRITEFUNCTION to write data to a buffer. */
  719. size_t
  720. s3_buffer_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
  721. {
  722. CurlBuffer * data = stream;
  723. guint new_bytes = (guint) size * nmemb;
  724. guint bytes_needed = data->buffer_pos + new_bytes;
  725. /* error out if the new size is greater than the maximum allowed */
  726. if (data->max_buffer_size && bytes_needed > data->max_buffer_size)
  727. return 0;
  728. /* reallocate if necessary. We use exponential sizing to make this
  729. * happen less often. */
  730. if (bytes_needed > data->buffer_len) {
  731. guint new_size = MAX(bytes_needed, data->buffer_len * 2);
  732. if (data->max_buffer_size) {
  733. new_size = MIN(new_size, data->max_buffer_size);
  734. }
  735. data->buffer = g_realloc(data->buffer, new_size);
  736. data->buffer_len = new_size;
  737. }
  738. if (!data->buffer)
  739. return 0; /* returning zero signals an error to libcurl */
  740. /* actually copy the data to the buffer */
  741. memcpy(data->buffer + data->buffer_pos, ptr, new_bytes);
  742. data->buffer_pos += new_bytes;
  743. /* signal success to curl */
  744. return new_bytes;
  745. }
  746. /* a CURLOPT_READFUNCTION that writes nothing. */
  747. size_t
  748. s3_empty_read_func(G_GNUC_UNUSED void *ptr, G_GNUC_UNUSED size_t size, G_GNUC_UNUSED size_t nmemb, G_GNUC_UNUSED void * stream)
  749. {
  750. return 0;
  751. }
  752. size_t
  753. s3_empty_size_func(G_GNUC_UNUSED void *stream)
  754. {
  755. return 0;
  756. }
  757. GByteArray*
  758. s3_empty_md5_func(G_GNUC_UNUSED void *stream)
  759. {
  760. static const GByteArray empty = {(guint8 *) "", 0};
  761. return s3_compute_md5_hash(&empty);
  762. }
  763. /* a CURLOPT_WRITEFUNCTION to write data that just counts data.
  764. * s3_write_data should be NULL or a pointer to an gint64.
  765. */
  766. size_t
  767. s3_counter_write_func(G_GNUC_UNUSED void *ptr, size_t size, size_t nmemb, void *stream)
  768. {
  769. gint64 *count = (gint64*) stream, inc = nmemb*size;
  770. if (count) *count += inc;
  771. return inc;
  772. }
  773. void
  774. s3_counter_reset_func(void *stream)
  775. {
  776. gint64 *count = (gint64*) stream;
  777. if (count) *count = 0;
  778. }
  779. #ifdef _WIN32
  780. /* a CURLOPT_READFUNCTION to read data from a file. */
  781. size_t
  782. s3_file_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
  783. {
  784. HANDLE *hFile = (HANDLE *) stream;
  785. DWORD bytes_read;
  786. ReadFile(hFile, ptr, (DWORD) size*nmemb, &bytes_read, NULL);
  787. return bytes_read;
  788. }
  789. size_t
  790. s3_file_size_func(void *stream)
  791. {
  792. HANDLE *hFile = (HANDLE *) stream;
  793. DWORD size = GetFileSize(hFile, NULL);
  794. if (INVALID_FILE_SIZE == size) {
  795. return -1;
  796. } else {
  797. return size;
  798. }
  799. }
  800. GByteArray*
  801. s3_file_md5_func(void *stream)
  802. {
  803. #define S3_MD5_BUF_SIZE (10*1024)
  804. HANDLE *hFile = (HANDLE *) stream;
  805. guint8 buf[S3_MD5_BUF_SIZE];
  806. DWORD bytes_read;
  807. MD5_CTX md5_ctx;
  808. GByteArray *ret = NULL;
  809. g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
  810. ret = g_byte_array_sized_new(S3_MD5_HASH_BYTE_LEN);
  811. g_byte_array_set_size(ret, S3_MD5_HASH_BYTE_LEN);
  812. MD5_Init(&md5_ctx);
  813. while (ReadFile(hFile, buf, S3_MD5_BUF_SIZE, &bytes_read, NULL)) {
  814. MD5_Update(&md5_ctx, buf, bytes_read);
  815. }
  816. MD5_Final(ret->data, &md5_ctx);
  817. g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
  818. return ret;
  819. #undef S3_MD5_BUF_SIZE
  820. }
  821. GByteArray*
  822. s3_file_reset_func(void *stream)
  823. {
  824. g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
  825. }
  826. /* a CURLOPT_WRITEFUNCTION to write data to a file. */
  827. size_t
  828. s3_file_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
  829. {
  830. HANDLE *hFile = (HANDLE *) stream;
  831. DWORD bytes_written;
  832. WriteFile(hFile, ptr, (DWORD) size*nmemb, &bytes_written, NULL);
  833. return bytes_written;
  834. }
  835. #endif
  836. static int
  837. curl_debug_message(CURL *curl G_GNUC_UNUSED,
  838. curl_infotype type,
  839. char *s,
  840. size_t len,
  841. void *unused G_GNUC_UNUSED)
  842. {
  843. char *lineprefix;
  844. char *message;
  845. char **lines, **line;
  846. switch (type) {
  847. case CURLINFO_TEXT:
  848. lineprefix="";
  849. break;
  850. case CURLINFO_HEADER_IN:
  851. lineprefix="Hdr In: ";
  852. break;
  853. case CURLINFO_HEADER_OUT:
  854. lineprefix="Hdr Out: ";
  855. break;
  856. default:
  857. /* ignore data in/out -- nobody wants to see that in the
  858. * debug logs! */
  859. return 0;
  860. }
  861. /* split the input into lines */
  862. message = g_strndup(s, (gsize) len);
  863. lines = g_strsplit(message, "\n", -1);
  864. g_free(message);
  865. for (line = lines; *line; line++) {
  866. if (**line == '\0') continue; /* skip blank lines */
  867. g_debug("%s%s", lineprefix, *line);
  868. }
  869. g_strfreev(lines);
  870. return 0;
  871. }
  872. static s3_result_t
  873. perform_request(S3Handle *hdl,
  874. const char *verb,
  875. const char *bucket,
  876. const char *key,
  877. const char *subresource,
  878. const char *query,
  879. s3_read_func read_func,
  880. s3_reset_func read_reset_func,
  881. s3_size_func size_func,
  882. s3_md5_func md5_func,
  883. gpointer read_data,
  884. s3_write_func write_func,
  885. s3_reset_func write_reset_func,
  886. gpointer write_data,
  887. s3_progress_func progress_func,
  888. gpointer progress_data,
  889. const result_handling_t *result_handling)
  890. {
  891. gboolean use_subdomain;
  892. char *url = NULL;
  893. s3_result_t result = S3_RESULT_FAIL; /* assume the worst.. */
  894. CURLcode curl_code = CURLE_OK;
  895. char curl_error_buffer[CURL_ERROR_SIZE] = "";
  896. struct curl_slist *headers = NULL;
  897. S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL};
  898. gboolean should_retry;
  899. guint retries = 0;
  900. gulong backoff = EXPONENTIAL_BACKOFF_START_USEC;
  901. /* corresponds to PUT, HEAD, GET, and POST */
  902. int curlopt_upload = 0, curlopt_nobody = 0, curlopt_httpget = 0, curlopt_post = 0;
  903. /* do we want to examine the headers */
  904. const char *curlopt_customrequest = NULL;
  905. /* for MD5 calculation */
  906. GByteArray *md5_hash = NULL;
  907. gchar *md5_hash_hex = NULL, *md5_hash_b64 = NULL;
  908. size_t request_body_size = 0;
  909. g_assert(hdl != NULL && hdl->curl != NULL);
  910. s3_reset(hdl);
  911. use_subdomain = is_non_empty_string(hdl->bucket_location);
  912. url = build_url(bucket, key, subresource, query, use_subdomain, hdl->use_ssl);
  913. if (!url) goto cleanup;
  914. /* libcurl may behave strangely if these are not set correctly */
  915. if (!strncmp(verb, "PUT", 4)) {
  916. curlopt_upload = 1;
  917. } else if (!strncmp(verb, "GET", 4)) {
  918. curlopt_httpget = 1;
  919. } else if (!strncmp(verb, "POST", 5)) {
  920. curlopt_post = 1;
  921. } else if (!strncmp(verb, "HEAD", 5)) {
  922. curlopt_nobody = 1;
  923. } else {
  924. curlopt_customrequest = verb;
  925. }
  926. if (size_func) {
  927. request_body_size = size_func(read_data);
  928. }
  929. if (md5_func) {
  930. md5_hash = md5_func(read_data);
  931. if (md5_hash) {
  932. md5_hash_b64 = s3_base64_encode(md5_hash);
  933. md5_hash_hex = s3_hex_encode(md5_hash);
  934. g_byte_array_free(md5_hash, TRUE);
  935. }
  936. }
  937. if (!read_func) {
  938. /* Curl will use fread() otherwise */
  939. read_func = s3_empty_read_func;
  940. }
  941. if (write_func) {
  942. int_writedata.write_func = write_func;
  943. int_writedata.reset_func = write_reset_func;
  944. int_writedata.write_data = write_data;
  945. } else {
  946. /* Curl will use fwrite() otherwise */
  947. int_writedata.write_func = s3_counter_write_func;
  948. int_writedata.reset_func = s3_counter_reset_func;
  949. int_writedata.write_data = NULL;
  950. }
  951. while (1) {
  952. /* reset things */
  953. if (headers) {
  954. curl_slist_free_all(headers);
  955. }
  956. curl_error_buffer[0] = '\0';
  957. if (read_reset_func) {
  958. read_reset_func(read_data);
  959. }
  960. /* calls write_reset_func */
  961. s3_internal_reset_func(&int_writedata);
  962. /* set up the request */
  963. headers = authenticate_request(hdl, verb, bucket, key, subresource,
  964. md5_hash_b64, is_non_empty_string(hdl->bucket_location));
  965. if (hdl->use_ssl && hdl->ca_info) {
  966. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CAINFO, hdl->ca_info)))
  967. goto curl_error;
  968. }
  969. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_VERBOSE, hdl->verbose)))
  970. goto curl_error;
  971. if (hdl->verbose) {
  972. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_DEBUGFUNCTION,
  973. curl_debug_message)))
  974. goto curl_error;
  975. }
  976. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_ERRORBUFFER,
  977. curl_error_buffer)))
  978. goto curl_error;
  979. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOPROGRESS, 1)))
  980. goto curl_error;
  981. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FOLLOWLOCATION, 1)))
  982. goto curl_error;
  983. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_URL, url)))
  984. goto curl_error;
  985. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPHEADER,
  986. headers)))
  987. goto curl_error;
  988. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEFUNCTION, s3_internal_write_func)))
  989. goto curl_error;
  990. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEDATA, &int_writedata)))
  991. goto curl_error;
  992. /* Note: we always have to set this apparently, for consistent "end of header" detection */
  993. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERFUNCTION, s3_internal_header_func)))
  994. goto curl_error;
  995. /* Note: if set, CURLOPT_HEADERDATA seems to also be used for CURLOPT_WRITEDATA ? */
  996. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERDATA, &int_writedata)))
  997. goto curl_error;
  998. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func)))
  999. goto curl_error;
  1000. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data)))
  1001. goto curl_error;
  1002. /* CURLOPT_INFILESIZE_LARGE added in 7.11.0 */
  1003. #if LIBCURL_VERSION_NUM >= 0x070b00
  1004. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)request_body_size)))
  1005. goto curl_error;
  1006. #else
  1007. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE, (long)request_body_size)))
  1008. goto curl_error;
  1009. #endif
  1010. /* CURLOPT_MAX_{RECV,SEND}_SPEED_LARGE added in 7.15.5 */
  1011. #if LIBCURL_VERSION_NUM >= 0x070f05
  1012. if (s3_curl_throttling_compat()) {
  1013. if (hdl->max_send_speed)
  1014. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_send_speed)))
  1015. goto curl_error;
  1016. if (hdl->max_recv_speed)
  1017. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed)))
  1018. goto curl_error;
  1019. }
  1020. #endif
  1021. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPGET, curlopt_httpget)))
  1022. goto curl_error;
  1023. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_UPLOAD, curlopt_upload)))
  1024. goto curl_error;
  1025. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POST, curlopt_post)))
  1026. goto curl_error;
  1027. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOBODY, curlopt_nobody)))
  1028. goto curl_error;
  1029. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CUSTOMREQUEST,
  1030. curlopt_customrequest)))
  1031. goto curl_error;
  1032. if (curlopt_upload) {
  1033. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func)))
  1034. goto curl_error;
  1035. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data)))
  1036. goto curl_error;
  1037. } else {
  1038. /* Clear request_body options. */
  1039. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION,
  1040. NULL)))
  1041. goto curl_error;
  1042. if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA,
  1043. NULL)))
  1044. goto curl_error;
  1045. }
  1046. /* Perform the request */
  1047. curl_code = curl_easy_perform(hdl->curl);
  1048. /* interpret the response into hdl->last* */
  1049. curl_error: /* (label for short-circuiting the curl_easy_perform call) */
  1050. should_retry = interpret_response(hdl, curl_code, curl_error_buffer,
  1051. int_writedata.resp_buf.buffer, int_writedata.resp_buf.buffer_pos, int_writedata.etag, md5_hash_hex);
  1052. /* and, unless we know we need to retry, see what we're to do now */
  1053. if (!should_retry) {
  1054. result = lookup_result(result_handling, hdl->last_response_code,
  1055. hdl->last_s3_error_code, hdl->last_curl_code);
  1056. /* break out of the while(1) unless we're retrying */
  1057. if (result != S3_RESULT_RETRY)
  1058. break;
  1059. }
  1060. if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES) {
  1061. /* we're out of retries, so annotate hdl->last_message appropriately and bail
  1062. * out. */
  1063. char *m = g_strdup_printf("Too many retries; last message was '%s'", hdl->last_message);
  1064. if (hdl->last_message) g_free(hdl->last_message);
  1065. hdl->last_message = m;
  1066. result = S3_RESULT_FAIL;
  1067. break;
  1068. }
  1069. g_usleep(backoff);
  1070. retries++;
  1071. backoff *= EXPONENTIAL_BACKOFF_BASE;
  1072. }
  1073. if (result != S3_RESULT_OK) {
  1074. g_debug(_("%s %s failed with %d/%s"), verb, url,
  1075. hdl->last_response_code,
  1076. s3_error_name_from_code(hdl->last_s3_error_code));
  1077. }
  1078. cleanup:
  1079. g_free(url);
  1080. if (headers) curl_slist_free_all(headers);
  1081. g_free(md5_hash_b64);
  1082. g_free(md5_hash_hex);
  1083. /* we don't deallocate the response body -- we keep it for later */
  1084. hdl->last_response_body = int_writedata.resp_buf.buffer;
  1085. hdl->last_response_body_size = int_writedata.resp_buf.buffer_pos;
  1086. hdl->last_num_retries = retries;
  1087. return result;
  1088. }
  1089. static size_t
  1090. s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream)
  1091. {
  1092. S3InternalData *data = (S3InternalData *) stream;
  1093. size_t bytes_saved;
  1094. if (!data->headers_done)
  1095. return size*nmemb;
  1096. /* call write on internal buffer (if not full) */
  1097. if (data->int_write_done) {
  1098. bytes_saved = 0;
  1099. } else {
  1100. bytes_saved = s3_buffer_write_func(ptr, size, nmemb, &data->resp_buf);
  1101. if (!bytes_saved) {
  1102. data->int_write_done = TRUE;
  1103. }
  1104. }
  1105. /* call write on user buffer */
  1106. if (data->write_func) {
  1107. return data->write_func(ptr, size, nmemb, data->write_data);
  1108. } else {
  1109. return bytes_saved;
  1110. }
  1111. }
  1112. static void
  1113. s3_internal_reset_func(void * stream)
  1114. {
  1115. S3InternalData *data = (S3InternalData *) stream;
  1116. s3_buffer_reset_func(&data->resp_buf);
  1117. data->headers_done = FALSE;
  1118. data->int_write_done = FALSE;
  1119. data->etag = NULL;
  1120. if (data->reset_func) {
  1121. data->reset_func(data->write_data);
  1122. }
  1123. }
  1124. static size_t
  1125. s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream)
  1126. {
  1127. static const char *final_header = "\r\n";
  1128. char *header;
  1129. regmatch_t pmatch[2];
  1130. S3InternalData *data = (S3InternalData *) stream;
  1131. header = g_strndup((gchar *) ptr, (gsize) size*nmemb);
  1132. if (!s3_regexec_wrap(&etag_regex, header, 2, pmatch, 0))
  1133. data->etag = find_regex_substring(header, pmatch[1]);
  1134. if (!strcmp(final_header, header))
  1135. data->headers_done = TRUE;
  1136. return size*nmemb;
  1137. }
  1138. static gboolean
  1139. compile_regexes(void)
  1140. {
  1141. #ifdef HAVE_REGEX_H
  1142. /* using POSIX regular expressions */
  1143. struct {const char * str; int flags; regex_t *regex;} regexes[] = {
  1144. {"<Code>[[:space:]]*([^<]*)[[:space:]]*</Code>", REG_EXTENDED | REG_ICASE, &error_name_regex},
  1145. {"^ETag:[[:space:]]*\"([^\"]+)\"[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &etag_regex},
  1146. {"<Message>[[:space:]]*([^<]*)[[:space:]]*</Message>", REG_EXTENDED | REG_ICASE, &message_regex},
  1147. {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex},
  1148. {"(/>)|(>([^<]*)</LocationConstraint>)", REG_EXTENDED | REG_ICASE, &location_con_regex},
  1149. {NULL, 0, NULL}
  1150. };
  1151. char regmessage[1024];
  1152. int size, i;
  1153. int reg_result;
  1154. for (i = 0; regexes[i].str; i++) {
  1155. reg_result = regcomp(regexes[i].regex, regexes[i].str, regexes[i].flags);
  1156. if (reg_result != 0) {
  1157. size = regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage));
  1158. g_error(_("Regex error: %s"), regmessage);
  1159. return FALSE;
  1160. }
  1161. }
  1162. #else /* ! HAVE_REGEX_H */
  1163. /* using PCRE via GLib */
  1164. struct {const char * str; int flags; regex_t *regex;} regexes[] = {
  1165. {"<Code>\\s*([^<]*)\\s*</Code>",
  1166. G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
  1167. &error_name_regex},
  1168. {"^ETag:\\s*\"([^\"]+)\"\\s*$",
  1169. G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
  1170. &etag_regex},
  1171. {"<Message>\\s*([^<]*)\\s*</Message>",
  1172. G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
  1173. &message_regex},
  1174. {"^[a-z0-9]((-*[a-z0-9])|(\\.[a-z0-9])){2,62}$",
  1175. G_REGEX_OPTIMIZE | G_REGEX_NO_AUTO_CAPTURE,
  1176. &subdomain_regex},
  1177. {"(/>)|(>([^<]*)</LocationConstraint>)",
  1178. G_REGEX_CASELESS,
  1179. &location_con_regex},
  1180. {NULL, 0, NULL}
  1181. };
  1182. int i;
  1183. GError *err = NULL;
  1184. for (i = 0; regexes[i].str; i++) {
  1185. *(regexes[i].regex) = g_regex_new(regexes[i].str, regexes[i].flags, 0, &err);
  1186. if (err) {
  1187. g_error(_("Regex error: %s"), err->message);
  1188. g_error_free(err);
  1189. return FALSE;
  1190. }
  1191. }
  1192. #endif
  1193. return TRUE;
  1194. }
  1195. /*
  1196. * Public function implementations
  1197. */
  1198. gboolean s3_init(void)
  1199. {
  1200. static GStaticMutex mutex = G_STATIC_MUTEX_INIT;
  1201. static gboolean init = FALSE, ret;
  1202. /* n.b. curl_global_init is called in common-src/glib-util.c:glib_init() */
  1203. g_static_mutex_lock (&mutex);
  1204. if (!init) {
  1205. ret = compile_regexes();
  1206. init = TRUE;
  1207. }
  1208. g_static_mutex_unlock(&mutex);
  1209. return ret;
  1210. }
  1211. gboolean
  1212. s3_curl_location_compat(void)
  1213. {
  1214. curl_version_info_data *info;
  1215. info = curl_version_info(CURLVERSION_NOW);
  1216. return info->version_num > 0x070a02;
  1217. }
  1218. gboolean
  1219. s3_bucket_location_compat(const char *bucket)
  1220. {
  1221. return !s3_regexec_wrap(&subdomain_regex, bucket, 0, NULL, 0);
  1222. }
  1223. S3Handle *
  1224. s3_open(const char *access_key,
  1225. const char *secret_key,
  1226. const char *user_token,
  1227. const char *bucket_location,
  1228. const char *storage_class,
  1229. const char *ca_info
  1230. ) {
  1231. S3Handle *hdl;
  1232. hdl = g_new0(S3Handle, 1);
  1233. if (!hdl) goto error;
  1234. hdl->verbose = FALSE;
  1235. hdl->use_ssl = s3_curl_supports_ssl();
  1236. g_assert(access_key);
  1237. hdl->access_key = g_strdup(access_key);
  1238. g_assert(secret_key);
  1239. hdl->secret_key = g_strdup(secret_key);
  1240. /* NULL is okay */
  1241. hdl->user_token = g_strdup(user_token);
  1242. /* NULL is okay */
  1243. hdl->bucket_location = g_strdup(bucket_location);
  1244. /* NULL is ok */
  1245. hdl->storage_class = g_strdup(storage_class);
  1246. /* NULL is okay */
  1247. hdl->ca_info = g_strdup(ca_info);
  1248. hdl->curl = curl_easy_init();
  1249. if (!hdl->curl) goto error;
  1250. return hdl;
  1251. error:
  1252. s3_free(hdl);
  1253. return NULL;
  1254. }
  1255. void
  1256. s3_free(S3Handle *hdl)
  1257. {
  1258. s3_reset(hdl);
  1259. if (hdl) {
  1260. g_free(hdl->access_key);
  1261. g_free(hdl->secret_key);
  1262. if (hdl->user_token) g_free(hdl->user_token);
  1263. if (hdl->bucket_location) g_free(hdl->bucket_location);
  1264. if (hdl->storage_class) g_free(hdl->storage_class);
  1265. if (hdl->curl) curl_easy_cleanup(hdl->curl);
  1266. g_free(hdl);
  1267. }
  1268. }
  1269. void
  1270. s3_reset(S3Handle *hdl)
  1271. {
  1272. if (hdl) {
  1273. /* We don't call curl_easy_reset here, because doing that in curl
  1274. * < 7.16 blanks the default CA certificate path, and there's no way
  1275. * to get it back. */
  1276. if (hdl->last_message) {
  1277. g_free(hdl->last_message);
  1278. hdl->last_message = NULL;
  1279. }
  1280. hdl->last_response_code = 0;
  1281. hdl->last_curl_code = 0;
  1282. hdl->last_s3_error_code = 0;
  1283. hdl->last_num_retries = 0;
  1284. if (hdl->last_response_body) {
  1285. g_free(hdl->last_response_body);
  1286. hdl->last_response_body = NULL;
  1287. }
  1288. hdl->last_response_body_size = 0;
  1289. }
  1290. }
  1291. void
  1292. s3_error(S3Handle *hdl,
  1293. const char **message,
  1294. guint *response_code,
  1295. s3_error_code_t *s3_error_code,
  1296. const char **s3_error_name,
  1297. CURLcode *curl_code,
  1298. guint *num_retries)
  1299. {
  1300. if (hdl) {
  1301. if (message) *message = hdl->last_message;
  1302. if (response_code) *response_code = hdl->last_response_code;
  1303. if (s3_error_code) *s3_error_code = hdl->last_s3_error_code;
  1304. if (s3_error_name) *s3_error_name = s3_error_name_from_code(hdl->last_s3_error_code);
  1305. if (curl_code) *curl_code = hdl->last_curl_code;
  1306. if (num_retries) *num_retries = hdl->last_num_retries;
  1307. } else {
  1308. /* no hdl? return something coherent, anyway */
  1309. if (message) *message = "NULL S3Handle";
  1310. if (response_code) *response_code = 0;
  1311. if (s3_error_code) *s3_error_code = 0;
  1312. if (s3_error_name) *s3_error_name = NULL;
  1313. if (curl_code) *curl_code = 0;
  1314. if (num_retries) *num_retries = 0;
  1315. }
  1316. }
  1317. void
  1318. s3_verbose(S3Handle *hdl, gboolean verbose)
  1319. {
  1320. hdl->verbose = verbose;
  1321. }
  1322. gboolean
  1323. s3_set_max_send_speed(S3Handle *hdl, guint64 max_send_speed)
  1324. {
  1325. if (!s3_curl_throttling_compat())
  1326. return FALSE;
  1327. hdl->max_send_speed = max_send_speed;
  1328. return TRUE;
  1329. }
  1330. gboolean
  1331. s3_set_max_recv_speed(S3Handle *hdl, guint64 max_recv_speed)
  1332. {
  1333. if (!s3_curl_throttling_compat())
  1334. return FALSE;
  1335. hdl->max_recv_speed = max_recv_speed;
  1336. return TRUE;
  1337. }
  1338. gboolean
  1339. s3_use_ssl(S3Handle *hdl, gboolean use_ssl)
  1340. {
  1341. gboolean ret = TRUE;
  1342. if (use_ssl & !s3_curl_supports_ssl()) {
  1343. ret = FALSE;
  1344. } else {
  1345. hdl->use_ssl = use_ssl;
  1346. }
  1347. return ret;
  1348. }
  1349. char *
  1350. s3_strerror(S3Handle *hdl)
  1351. {
  1352. const char *message;
  1353. guint response_code;
  1354. const char *s3_error_name;
  1355. CURLcode curl_code;
  1356. guint num_retries;
  1357. char s3_info[256] = "";
  1358. char response_info[16] = "";
  1359. char curl_info[32] = "";
  1360. char retries_info[32] = "";
  1361. s3_error(hdl, &message, &response_code, NULL, &s3_error_name, &curl_code, &num_retries);
  1362. if (!message)
  1363. message = "Unknown S3 error";
  1364. if (s3_error_name)
  1365. g_snprintf(s3_info, sizeof(s3_info), " (%s)", s3_error_name);
  1366. if (response_code)
  1367. g_snprintf(response_info, sizeof(response_info), " (HTTP %d)", response_code);
  1368. if (curl_code)
  1369. g_snprintf(curl_info, sizeof(curl_info), " (CURLcode %d)", curl_code);
  1370. if (num_retries)
  1371. g_snprintf(retries_info, sizeof(retries_info), " (after %d retries)", num_retries);
  1372. return g_strdup_printf("%s%s%s%s%s", message, s3_info, curl_info, response_info, retries_info);
  1373. }
  1374. /* Perform an upload. When this function returns, KEY and
  1375. * BUFFER remain the responsibility of the caller.
  1376. *
  1377. * @param self: the s3 device
  1378. * @param bucket: the bucket to which the upload should be made
  1379. * @param key: the key to which the upload should be made
  1380. * @param buffer: the data to be uploaded
  1381. * @param buffer_len: the length of the data to upload
  1382. * @returns: false if an error ocurred
  1383. */
  1384. gboolean
  1385. s3_upload(S3Handle *hdl,
  1386. const char *bucket,
  1387. const char *key,
  1388. s3_read_func read_func,
  1389. s3_reset_func reset_func,
  1390. s3_size_func size_func,
  1391. s3_md5_func md5_func,
  1392. gpointer read_data,
  1393. s3_progress_func progress_func,
  1394. gpointer progress_data)
  1395. {
  1396. s3_result_t result = S3_RESULT_FAIL;
  1397. static result_handling_t result_handling[] = {
  1398. { 200, 0, 0, S3_RESULT_OK },
  1399. RESULT_HANDLING_ALWAYS_RETRY,
  1400. { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
  1401. };
  1402. g_assert(hdl != NULL);
  1403. result = perform_request(hdl, "PUT", bucket, key, NULL, NULL,
  1404. read_func, reset_func, size_func, md5_func, read_data,
  1405. NULL, NULL, NULL, progress_func, progress_data,
  1406. result_handling);
  1407. return result == S3_RESULT_OK;
  1408. }
  1409. /* Private structure for our "thunk", which tracks where the user is in the list
  1410. * of keys. */
  1411. struct list_keys_thunk {
  1412. GSList *filename_list; /* all pending filenames */
  1413. gboolean in_contents; /* look for "key" entities in here */
  1414. gboolean in_common_prefixes; /* look for "prefix" entities in here */
  1415. gboolean is_truncated;
  1416. gchar *next_marker;
  1417. gboolean want_text;
  1418. gchar *text;
  1419. gsize text_len;
  1420. };
  1421. /* Functions for a SAX parser to parse the XML from Amazon */
  1422. static void
  1423. list_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
  1424. const gchar *element_name,
  1425. const gchar **attribute_names G_GNUC_UNUSED,
  1426. const gchar **attribute_values G_GNUC_UNUSED,
  1427. gpointer user_data,
  1428. GError **error G_GNUC_UNUSED)
  1429. {
  1430. struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
  1431. thunk->want_text = 0;
  1432. if (g_strcasecmp(element_name, "contents") == 0) {
  1433. thunk->in_contents = 1;
  1434. } else if (g_strcasecmp(element_name, "commonprefixes") == 0) {
  1435. thunk->in_common_prefixes = 1;
  1436. } else if (g_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
  1437. thunk->want_text = 1;
  1438. } else if (g_strcasecmp(element_name, "key") == 0 && thunk->in_contents) {
  1439. thunk->want_text = 1;
  1440. } else if (g_strcasecmp(element_name, "istruncated")) {
  1441. thunk->want_text = 1;
  1442. } else if (g_strcasecmp(element_name, "nextmarker")) {
  1443. thunk->want_text = 1;
  1444. }
  1445. }
  1446. static void
  1447. list_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
  1448. const gchar *element_name,
  1449. gpointer user_data,
  1450. GError **error G_GNUC_UNUSED)
  1451. {
  1452. struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
  1453. if (g_strcasecmp(element_name, "contents") == 0) {
  1454. thunk->in_contents = 0;
  1455. } else if (g_strcasecmp(element_name, "commonprefixes") == 0) {
  1456. thunk->in_common_prefixes = 0;
  1457. } else if (g_strcasecmp(element_name, "key") == 0 && thunk->in_contents) {
  1458. thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
  1459. thunk->text = NULL;
  1460. } else if (g_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
  1461. thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
  1462. thunk->text = NULL;
  1463. } else if (g_strcasecmp(element_name, "istruncated") == 0) {
  1464. if (thunk->text && g_strncasecmp(thunk->text, "false", 5) != 0)
  1465. thunk->is_truncated = TRUE;
  1466. } else if (g_strcasecmp(element_name, "nextmarker") == 0) {
  1467. if (thunk->next_marker) g_free(thunk->next_marker);
  1468. thunk->next_marker = thunk->text;
  1469. thunk->text = NULL;
  1470. }
  1471. }
  1472. static void
  1473. list_text(GMarkupParseContext *context G_GNUC_UNUSED,
  1474. const gchar *text,
  1475. gsize text_len,
  1476. gpointer user_data,
  1477. GError **error G_GNUC_UNUSED)
  1478. {
  1479. struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
  1480. if (thunk->want_text) {
  1481. if (thunk->text) g_free(thunk->text);
  1482. thunk->text = g_strndup(text, text_len);
  1483. }
  1484. }
  1485. /* Perform a fetch from S3; several fetches may be involved in a
  1486. * single listing operation */
  1487. static s3_result_t
  1488. list_fetch(S3Handle *hdl,
  1489. const char *bucket,
  1490. const char *prefix,
  1491. const char *delimiter,
  1492. const char *marker,
  1493. const char *max_keys,
  1494. CurlBuffer *buf)
  1495. {
  1496. s3_result_t result = S3_RESULT_FAIL;
  1497. static result_handling_t result_handling[] = {
  1498. { 200, 0, 0, S3_RESULT_OK },
  1499. RESULT_HANDLING_ALWAYS_RETRY,
  1500. { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
  1501. };
  1502. const char* pos_parts[][2] = {
  1503. {"prefix", prefix},
  1504. {"delimiter", delimiter},
  1505. {"marker", marker},
  1506. {"max-keys", max_keys},
  1507. {NULL, NULL}
  1508. };
  1509. char *esc_value;
  1510. GString *query;
  1511. guint i;
  1512. gboolean have_prev_part = FALSE;
  1513. /* loop over possible parts to build query string */
  1514. query = g_string_new("");
  1515. for (i = 0; pos_parts[i][0]; i++) {
  1516. if (pos_parts[i][1]) {
  1517. if (have_prev_part)
  1518. g_string_append(query, "&");
  1519. else
  1520. have_prev_part = TRUE;
  1521. esc_value = curl_escape(pos_parts[i][1], 0);
  1522. g_string_append_printf(query, "%s=%s", pos_parts[i][0], esc_value);
  1523. curl_free(esc_value);
  1524. }
  1525. }
  1526. /* and perform the request on that URI */
  1527. result = perform_request(hdl, "GET", bucket, NULL, NULL, query->str,
  1528. NULL, NULL, NULL, NULL, NULL,
  1529. S3_BUFFER_WRITE_FUNCS, buf, NULL, NULL,
  1530. result_handling);
  1531. if (query) g_string_free(query, TRUE);
  1532. return result;
  1533. }
  1534. gboolean
  1535. s3_list_keys(S3Handle *hdl,
  1536. const char *bucket,
  1537. const char *prefix,
  1538. const char *delimiter,
  1539. GSList **list)
  1540. {
  1541. /*
  1542. * max len of XML variables:
  1543. * bucket: 255 bytes (p12 API Version 2006-03-01)
  1544. * key: 1024 bytes (p15 API Version 2006-03-01)
  1545. * size per key: 5GB bytes (p6 API Version 2006-03-01)
  1546. * size of size 10 bytes (i.e. 10 decimal digits)
  1547. * etag: 44 (observed+assumed)
  1548. * owner ID: 64 (observed+assumed)
  1549. * owner DisplayName: 255 (assumed)
  1550. * StorageClass: const (p18 API Version 2006-03-01)
  1551. */
  1552. static const guint MAX_RESPONSE_LEN = 1000*2000;
  1553. static const char *MAX_KEYS = "1000";
  1554. struct list_keys_thunk thunk;
  1555. GMarkupParseContext *ctxt = NULL;
  1556. static GMarkupParser parser = { list_start_element, list_end_element, list_text, NULL, NULL };
  1557. GError *err = NULL;
  1558. s3_result_t result = S3_RESULT_FAIL;
  1559. CurlBuffer buf = {NULL, 0, 0, MAX_RESPONSE_LEN};
  1560. g_assert(list);
  1561. *list = NULL;
  1562. thunk.filename_list = NULL;
  1563. thunk.text = NULL;
  1564. thunk.next_marker = NULL;
  1565. /* Loop until S3 has given us the entire picture */
  1566. do {
  1567. s3_buffer_reset_func(&buf);
  1568. /* get some data from S3 */
  1569. result = list_fetch(hdl, bucket, prefix, delimiter, thunk.next_marker, MAX_KEYS, &buf);
  1570. if (result != S3_RESULT_OK) goto cleanup;
  1571. /* run the parser over it */
  1572. thunk.in_contents = FALSE;
  1573. thunk.in_common_prefixes = FALSE;
  1574. thunk.is_truncated = FALSE;
  1575. thunk.want_text = FALSE;
  1576. ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
  1577. if (!g_markup_parse_context_parse(ctxt, buf.buffer, buf.buffer_pos, &err)) {
  1578. if (hdl->last_message) g_free(hdl->last_message);
  1579. hdl->last_message = g_strdup(err->message);
  1580. result = S3_RESULT_FAIL;
  1581. goto cleanup;
  1582. }
  1583. if (!g_markup_parse_context_end_parse(ctxt, &err)) {
  1584. if (hdl->last_message) g_free(hdl->last_message);
  1585. hdl->last_message = g_strdup(err->message);
  1586. result = S3_RESULT_FAIL;
  1587. goto cleanup;
  1588. }
  1589. g_markup_parse_context_free(ctxt);
  1590. ctxt = NULL;
  1591. } while (thunk.next_marker);
  1592. cleanup:
  1593. if (err) g_error_free(err);
  1594. if (thunk.text) g_free(thunk.text);
  1595. if (thunk.next_marker) g_free(thunk.next_marker);
  1596. if (ctxt) g_markup_parse_context_free(ctxt);
  1597. if (buf.buffer) g_free(buf.buffer);
  1598. if (result != S3_RESULT_OK) {
  1599. g_slist_free(thunk.filename_list);
  1600. return FALSE;
  1601. } else {
  1602. *list = thunk.filename_list;
  1603. return TRUE;
  1604. }
  1605. }
  1606. gboolean
  1607. s3_read(S3Handle *hdl,
  1608. const char *bucket,
  1609. const char *key,
  1610. s3_write_func write_func,
  1611. s3_reset_func reset_func,
  1612. gpointer write_data,
  1613. s3_progress_func progress_func,
  1614. gpointer progress_data)
  1615. {
  1616. s3_result_t result = S3_RESULT_FAIL;
  1617. static result_handling_t result_handling[] = {
  1618. { 200, 0, 0, S3_RESULT_OK },
  1619. RESULT_HANDLING_ALWAYS_RETRY,
  1620. { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
  1621. };
  1622. g_assert(hdl != NULL);
  1623. g_assert(write_func != NULL);
  1624. result = perform_request(hdl, "GET", bucket, key, NULL, NULL,
  1625. NULL, NULL, NULL, NULL, NULL, write_func, reset_func, write_data,
  1626. progress_func, progress_data, result_handling);
  1627. return result == S3_RESULT_OK;
  1628. }
  1629. gboolean
  1630. s3_delete(S3Handle *hdl,
  1631. const char *bucket,
  1632. const char *key)
  1633. {
  1634. s3_result_t result = S3_RESULT_FAIL;
  1635. static result_handling_t result_handling[] = {
  1636. { 204, 0, 0, S3_RESULT_OK },
  1637. { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
  1638. RESULT_HANDLING_ALWAYS_RETRY,
  1639. { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
  1640. };
  1641. g_assert(hdl != NULL);
  1642. result = perform_request(hdl, "DELETE", bucket, key, NULL, NULL,
  1643. NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
  1644. result_handling);
  1645. return result == S3_RESULT_OK;
  1646. }
  1647. gboolean
  1648. s3_make_bucket(S3Handle *hdl,
  1649. const char *bucket)
  1650. {
  1651. char *body = NULL;
  1652. s3_result_t result = S3_RESULT_FAIL;
  1653. static result_handling_t result_handling[] = {
  1654. { 200, 0, 0, S3_RESULT_OK },
  1655. { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY },
  1656. RESULT_HANDLING_ALWAYS_RETRY,
  1657. { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
  1658. };
  1659. regmatch_t pmatch[4];
  1660. char *loc_end_open, *loc_content;
  1661. CurlBuffer buf = {NULL, 0, 0, 0}, *ptr = NULL;
  1662. s3_read_func read_func = NULL;
  1663. s3_reset_func reset_func = NULL;
  1664. s3_md5_func md5_func = NULL;
  1665. s3_size_func size_func = NULL;
  1666. g_assert(hdl != NULL);
  1667. if (is_non_empty_string(hdl->bucket_location) &&
  1668. 0 != strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)) {
  1669. if (s3_bucket_location_compat(bucket)) {
  1670. ptr = &buf;
  1671. buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE, hdl->bucket_location);
  1672. buf.buffer_len = (guint) strlen(buf.buffer);
  1673. buf.buffer_pos = 0;
  1674. buf.max_buffer_size = buf.buffer_len;
  1675. read_func = s3_buffer_read_func;
  1676. reset_func = s3_buffer_reset_func;
  1677. size_func = s3_buffer_size_func;
  1678. md5_func = s3_buffer_md5_func;
  1679. } else {
  1680. hdl->last_message = g_strdup_printf(_(
  1681. "Location constraint given for Amazon S3 bucket, "
  1682. "but the bucket name (%s) is not usable as a subdomain."), bucket);
  1683. return FALSE;
  1684. }
  1685. }
  1686. result = perform_request(hdl, "PUT", bucket, NULL, NULL, NULL,
  1687. read_func, reset_func, size_func, md5_func, ptr,
  1688. NULL, NULL, NULL, NULL, NULL, result_handling);
  1689. if (result == S3_RESULT_OK ||
  1690. (is_non_empty_string(hdl->bucket_location) && result != S3_RESULT_OK
  1691. && hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) {
  1692. /* verify the that the location constraint on the existing bucket matches
  1693. * the one that's configured.
  1694. */
  1695. result = perform_request(hdl, "GET", bucket, NULL, "location", NULL,
  1696. NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
  1697. NULL, NULL, result_handling);
  1698. /* note that we can check only one of the three AND conditions above
  1699. * and infer that the others are true
  1700. */
  1701. if (result == S3_RESULT_OK && is_non_empty_string(hdl->bucket_location)) {
  1702. /* return to the default state of failure */
  1703. result = S3_RESULT_FAIL;
  1704. if (body) g_free(body);
  1705. /* use strndup to get a null-terminated string */
  1706. body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
  1707. if (!body) {
  1708. hdl->last_message = g_strdup(_("No body received for location request"));
  1709. goto cleanup;
  1710. } else if ('\0' == body[0]) {
  1711. hdl->last_message = g_strdup(_("Empty body received for location request"));
  1712. goto cleanup;
  1713. }
  1714. if (!s3_regexec_wrap(&location_con_regex, body, 4, pmatch, 0)) {
  1715. loc_end_open = find_regex_substring(body, pmatch[1]);
  1716. loc_content = find_regex_substring(body, pmatch[3]);
  1717. /* The case of an empty string is special because XML allows
  1718. * "self-closing" tags
  1719. */
  1720. if (0 == strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location) &&
  1721. '/' != loc_end_open[0])
  1722. hdl->last_message = g_strdup(_("A wildcard location constraint is "
  1723. "configured, but the bucket has a non-empty location constraint"));
  1724. else if (strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)?
  1725. strncmp(loc_content, hdl->bucket_location, strlen(hdl->bucket_location)) :
  1726. ('\0' != loc_content[0]))
  1727. hdl->last_message = g_strdup(_("The location constraint configured "
  1728. "does not match the constraint currently on the bucket"));
  1729. else
  1730. result = S3_RESULT_OK;
  1731. } else {
  1732. hdl->last_message = g_strdup(_("Unexpected location response from Amazon S3"));
  1733. }
  1734. }
  1735. }
  1736. cleanup:
  1737. if (body) g_free(body);
  1738. return result == S3_RESULT_OK;
  1739. }
  1740. gboolean
  1741. s3_delete_bucket(S3Handle *hdl,
  1742. const char *bucket)
  1743. {
  1744. return s3_delete(hdl, bucket, NULL);
  1745. }