-
Notifications
You must be signed in to change notification settings - Fork 1.4k
Expand file tree
/
Copy pathsqlcipher.c
More file actions
3889 lines (3406 loc) · 159 KB
/
sqlcipher.c
File metadata and controls
3889 lines (3406 loc) · 159 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
** SQLCipher
** http://zetetic.net
**
** Copyright (c) 2008-2024, ZETETIC LLC
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** * Neither the name of the ZETETIC LLC nor the
** names of its contributors may be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY ZETETIC LLC ''AS IS'' AND ANY
** EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
** DISCLAIMED. IN NO EVENT SHALL ZETETIC LLC BE LIABLE FOR ANY
** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**
*/
/* BEGIN SQLCIPHER */
#ifdef SQLITE_HAS_CODEC
#if !defined(SQLCIPHER_OMIT_LOG_DEVICE)
#if defined(__ANDROID__)
#include <android/log.h>
#elif defined(__APPLE__)
#include <TargetConditionals.h>
#include <os/log.h>
#endif
#endif
#include <time.h>
#if defined(_WIN32) || defined(SQLITE_OS_WINRT)
#include <windows.h> /* amalgamator: dontcache */
#else
#include <sys/time.h> /* amalgamator: dontcache */
#endif
#ifndef OMIT_MEMLOCK
#if defined(__unix__) || defined(__APPLE__) || defined(_AIX)
#include <errno.h> /* amalgamator: dontcache */
#include <unistd.h> /* amalgamator: dontcache */
#include <sys/resource.h> /* amalgamator: dontcache */
#include <sys/mman.h> /* amalgamator: dontcache */
#endif
#endif
#include <assert.h>
#include "sqlcipher.h"
#include "btreeInt.h"
#include "pager.h"
#include "vdbeInt.h"
#if !defined(SQLITE_EXTRA_INIT) || !defined(SQLITE_EXTRA_SHUTDOWN)
#error "SQLCipher must be compiled with -DSQLITE_EXTRA_INIT=sqlcipher_extra_init -DSQLITE_EXTRA_SHUTDOWN=sqlcipher_extra_shutdown"
#endif
#if !defined(SQLITE_THREADSAFE) || !(SQLITE_THREADSAFE == 1 || SQLITE_THREADSAFE == 2)
#error "SQLCipher must be compiled with -DSQLITE_THREADSAFE=<1 or 2>"
#endif
#if !defined(SQLITE_TEMP_STORE) || SQLITE_TEMP_STORE == 0 || SQLITE_TEMP_STORE == 1
#error "SQLCipher must be compiled with -DSQLITE_TEMP_STORE=<2 or 3>"
#endif
/* extensions defined in pager.c */
void *sqlcipherPagerGetCodec(Pager*);
void sqlcipherPagerSetCodec(Pager*, void *(*)(void*,void*,Pgno,int), void (*)(void*,int,int), void (*)(void*), void *);
int sqlite3pager_is_sj_pgno(Pager*, Pgno);
void sqlite3pager_error(Pager*, int);
void sqlite3pager_reset(Pager *pPager);
/* end extensions defined in pager.c */
#if !defined (SQLCIPHER_CRYPTO_CC) \
&& !defined (SQLCIPHER_CRYPTO_OPENSSL) \
&& !defined (SQLCIPHER_CRYPTO_CUSTOM)
#define SQLCIPHER_CRYPTO_OPENSSL
#endif
#define FILE_HEADER_SZ 16
#define CIPHER_XSTR(s) CIPHER_STR(s)
#define CIPHER_STR(s) #s
#ifndef CIPHER_VERSION_NUMBER
#define CIPHER_VERSION_NUMBER 4.13.0
#endif
#ifndef CIPHER_VERSION_BUILD
#define CIPHER_VERSION_BUILD community
#endif
#define CIPHER_READ_CTX 0
#define CIPHER_WRITE_CTX 1
#define CIPHER_READWRITE_CTX 2
#ifndef PBKDF2_ITER
#define PBKDF2_ITER 256000
#endif
#define SQLCIPHER_FLAG_GET(FLAG,BIT) ((FLAG & BIT) != 0)
#define SQLCIPHER_FLAG_SET(FLAG,BIT) FLAG |= BIT
#define SQLCIPHER_FLAG_UNSET(FLAG,BIT) FLAG &= ~BIT
/* possible flags for codec_ctx->flags */
#define CIPHER_FLAG_HMAC (1 << 0)
#define CIPHER_FLAG_LE_PGNO (1 << 1)
#define CIPHER_FLAG_BE_PGNO (1 << 2)
#define CIPHER_FLAG_KEY_USED (1 << 3)
#define CIPHER_FLAG_HAS_KDF_SALT (1 << 4)
#ifndef DEFAULT_CIPHER_FLAGS
#define DEFAULT_CIPHER_FLAGS CIPHER_FLAG_HMAC | CIPHER_FLAG_LE_PGNO
#endif
/* by default, sqlcipher will use a reduced number of iterations to generate
the HMAC key / or transform a raw cipher key
*/
#ifndef FAST_PBKDF2_ITER
#define FAST_PBKDF2_ITER 2
#endif
/* this if a fixed random array that will be xor'd with the database salt to ensure that the
salt passed to the HMAC key derivation function is not the same as that used to derive
the encryption key. This can be overridden at compile time but it will make the resulting
binary incompatible with the default builds when using HMAC. A future version of SQLcipher
will likely allow this to be defined at runtime via pragma */
#ifndef HMAC_SALT_MASK
#define HMAC_SALT_MASK 0x3a
#endif
#ifndef CIPHER_MAX_IV_SZ
#define CIPHER_MAX_IV_SZ 16
#endif
#ifndef CIPHER_MAX_KEY_SZ
#define CIPHER_MAX_KEY_SZ 64
#endif
/* the default implementation of SQLCipher uses a cipher_ctx
to keep track of read / write state separately. The following
struct and associated functions are defined here */
typedef struct {
int derive_key;
int pass_sz;
unsigned char *key;
unsigned char *hmac_key;
unsigned char *pass;
} cipher_ctx;
typedef struct {
int store_pass;
int kdf_iter;
int fast_kdf_iter;
int kdf_salt_sz;
int key_sz;
int iv_sz;
int block_sz;
int page_sz;
int reserve_sz;
int hmac_sz;
int plaintext_header_sz;
int hmac_algorithm;
int kdf_algorithm;
int error;
unsigned int flags;
unsigned char *kdf_salt;
unsigned char *hmac_kdf_salt;
unsigned char *buffer;
Btree *pBt;
cipher_ctx *read_ctx;
cipher_ctx *write_ctx;
sqlcipher_provider *provider;
void *provider_ctx;
} codec_ctx ;
typedef struct private_block private_block;
struct private_block {
private_block *next;
u32 size;
u32 is_used;
};
/* implementation of simple, fast PSRNG function using xoshiro256++ (XOR/shift/rotate)
* https://prng.di.unimi.it/ under the public domain via https://prng.di.unimi.it/xoshiro256plusplus.c
* xoshiro is NEVER used for any cryptographic functions as CSPRNG. It is solely used for
* generating random data for testing, debugging, and forensic purposes (overwriting memory segments) */
static volatile uint64_t xoshiro_s[4];
static inline uint64_t xoshiro_rotl(const uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
uint64_t xoshiro_next(void) {
volatile uint64_t result = xoshiro_rotl(xoshiro_s[0] + xoshiro_s[3], 23) + xoshiro_s[0];
volatile uint64_t t = xoshiro_s[1] << 17;
xoshiro_s[2] ^= xoshiro_s[0];
xoshiro_s[3] ^= xoshiro_s[1];
xoshiro_s[1] ^= xoshiro_s[2];
xoshiro_s[0] ^= xoshiro_s[3];
xoshiro_s[2] ^= t;
xoshiro_s[3] = xoshiro_rotl(xoshiro_s[3], 45);
return result;
}
static void xoshiro_randomness(unsigned char *ptr, int sz) {
volatile uint64_t val;
volatile int to_copy;
while (sz > 0) {
val = xoshiro_next();
to_copy = (sz >= sizeof(val)) ? sizeof(val) : sz;
memcpy(ptr, (void *) &val, to_copy);
ptr += to_copy;
sz -= to_copy;
}
}
#ifdef SQLCIPHER_TEST
/* possible flags for simulating specific test conditions */
#define TEST_FAIL_ENCRYPT 0x01
#define TEST_FAIL_DECRYPT 0x02
#define TEST_FAIL_MIGRATE 0x04
static volatile unsigned int cipher_test_flags = 0;
static volatile int cipher_test_rand = 0;
static int sqlcipher_get_test_fail() {
int x;
/* if cipher_test_rand is not set to a non-zero value always fail (return true) */
if (cipher_test_rand == 0) return 1;
xoshiro_randomness((unsigned char *) &x, sizeof(x));
return ((x % cipher_test_rand) == 0);
}
#endif
static volatile unsigned int default_flags = DEFAULT_CIPHER_FLAGS;
static volatile unsigned char hmac_salt_mask = HMAC_SALT_MASK;
static volatile int default_kdf_iter = PBKDF2_ITER;
static volatile int default_page_size = 4096;
static volatile int default_plaintext_header_size = 0;
static volatile int default_hmac_algorithm = SQLCIPHER_HMAC_SHA512;
static volatile int default_kdf_algorithm = SQLCIPHER_PBKDF2_HMAC_SHA512;
static volatile int sqlcipher_mem_security_on = 0;
static volatile int sqlcipher_mem_executed = 0;
static volatile int sqlcipher_mem_initialized = 0;
static volatile sqlite3_mem_methods default_mem_methods;
static sqlcipher_provider *default_provider = NULL;
static sqlite3_mutex* sqlcipher_static_mutex[SQLCIPHER_MUTEX_COUNT];
#ifndef SQLCIPHER_LOG_LEVEL_DEFAULT
#define SQLCIPHER_LOG_LEVEL_DEFAULT SQLCIPHER_LOG_WARN
#endif
static FILE* sqlcipher_log_file = NULL;
static volatile int sqlcipher_log_device = 0;
static volatile unsigned int sqlcipher_log_level = SQLCIPHER_LOG_NONE;
static volatile unsigned int sqlcipher_log_source = SQLCIPHER_LOG_ANY;
static volatile int sqlcipher_log_set = 0;
static size_t sqlcipher_shield_mask_sz = 32;
static u8* sqlcipher_shield_mask = NULL;
/* Establish the default size of the private heap. This can be overriden
* at compile time by setting -DSQLCIPHER_PRIVATE_HEAP_SIZE_DEFAULT=X */
#ifndef SQLCIPHER_PRIVATE_HEAP_SIZE_DEFAULT
/* On android, the maximim amount of memory that can be memlocked in 64k. This also
* seems to be a popular ulimit on linux distributions, containsers, etc. Therefore
* the default heap size is chosen as 48K, which is either 4 (with 4k page size)
* or 1 (with 16k page size) page less than the max. We choose to allocate slightly
* less than the max just in case the app has locked some other page(s). This
* initial allocation should be enough to support at least 10 concurrent
* sqlcipher-enabled database connections at the same time without requiring any
* overflow allocations */
#define SQLCIPHER_PRIVATE_HEAP_SIZE_DEFAULT 49152
#endif
/* if default allocation fails, we'll reduce the size by this amount
* and try again. This is also the minimium of the private heap. The minimum
* size will be 4 4K pages or 1 16K page (possible with latest android)*/
#define SQLCIPHER_PRIVATE_HEAP_SIZE_STEP 16384
static volatile size_t private_heap_sz = SQLCIPHER_PRIVATE_HEAP_SIZE_DEFAULT;
static u8* private_heap = NULL;
static volatile size_t private_heap_used = 0; /* bytes currently used on private heap */
static volatile size_t private_heap_hwm = 0; /* larged number of bytes used on the private heap at one time */
static volatile size_t private_heap_alloc = 0; /* total bytes allocated on private heap over time */
static volatile u32 private_heap_allocs = 0; /* total number of allocations on private heap over time */
static volatile size_t private_heap_overflow = 0; /* total bytes overflowing private heap over time */
static volatile u32 private_heap_overflows = 0; /* number of overlow allocations over time */
/* to prevent excessive fragmentation blocks will
* only be split if there are at least this many
* bytes available after the split. This should allow for at
* least two addtional small allocations */
#define SQLCIPHER_PRIVATE_HEAP_MIN_SPLIT_SIZE 32
/* requested sizes will be rounded up to the nearest 8 bytes for alignment */
#define SQLCIPHER_PRIVATE_HEAP_ALIGNMENT 8
#define SQLCIPHER_PRIVATE_HEAP_ROUNDUP(x) ((x % SQLCIPHER_PRIVATE_HEAP_ALIGNMENT) ? \
((x / SQLCIPHER_PRIVATE_HEAP_ALIGNMENT) + 1) * SQLCIPHER_PRIVATE_HEAP_ALIGNMENT : x)
static volatile int sqlcipher_init = 0;
static volatile int sqlcipher_shutdown = 0;
static volatile int sqlcipher_cleanup = 0;
static int sqlcipher_init_error = SQLITE_ERROR;
static void sqlcipher_internal_free(void *, sqlite_uint64);
static void *sqlcipher_internal_malloc(sqlite_uint64);
/*
** Simple shared routines for converting hex char strings to binary data
*/
static int cipher_hex2int(char c) {
return (c>='0' && c<='9') ? (c)-'0' :
(c>='A' && c<='F') ? (c)-'A'+10 :
(c>='a' && c<='f') ? (c)-'a'+10 : 0;
}
static void cipher_hex2bin(const unsigned char *hex, int sz, unsigned char *out){
int i;
for(i = 0; i < sz; i += 2){
out[i/2] = (cipher_hex2int(hex[i])<<4) | cipher_hex2int(hex[i+1]);
}
}
static void cipher_bin2hex(const unsigned char* in, int sz, char *out) {
int i;
for(i=0; i < sz; i++) {
sqlite3_snprintf(3, out + (i*2), "%02x ", in[i]);
}
}
static int cipher_isHex(const unsigned char *hex, int sz){
int i;
for(i = 0; i < sz; i++) {
unsigned char c = hex[i];
if ((c < '0' || c > '9') &&
(c < 'A' || c > 'F') &&
(c < 'a' || c > 'f')) {
return 0;
}
}
return 1;
}
sqlite3_mutex* sqlcipher_mutex(int mutex) {
if(mutex < 0 || mutex >= SQLCIPHER_MUTEX_COUNT) return NULL;
return sqlcipher_static_mutex[mutex];
}
static void sqlcipher_atexit(void) {
sqlcipher_log(SQLCIPHER_LOG_DEBUG, SQLCIPHER_LOG_CORE, "%s: calling sqlcipher_extra_shutdown()", __func__);
sqlcipher_extra_shutdown();
}
static void sqlcipher_fini(void) {
sqlcipher_log(SQLCIPHER_LOG_DEBUG, SQLCIPHER_LOG_CORE, "%s: calling sqlcipher_extra_shutdown()", __func__);
sqlcipher_extra_shutdown();
}
#if defined(_WIN32)
#ifndef SQLCIPHER_OMIT_DLLMAIN
BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
switch (fdwReason) {
case DLL_PROCESS_DETACH:
sqlcipher_log(SQLCIPHER_LOG_DEBUG, SQLCIPHER_LOG_CORE, "%s: calling sqlcipher_extra_shutdown()", __func__);
sqlcipher_extra_shutdown();
break;
default:
break;
}
return TRUE;
}
#endif
#elif defined(__APPLE__)
#if defined(__has_feature)
#if __has_feature(address_sanitizer)
static void sqlcipher_cleanup_destructor(void) __attribute__((destructor));
static void sqlcipher_cleanup_destructor(void) { sqlcipher_fini(); }
#else
static void (*const sqlcipher_fini_func)(void) __attribute__((used, section("__DATA,__mod_term_func"))) = sqlcipher_fini;
#endif
#else
static void (*const sqlcipher_fini_func)(void) __attribute__((used, section("__DATA,__mod_term_func"))) = sqlcipher_fini;
#endif
#else
static void (*const sqlcipher_fini_func)(void) __attribute__((used, section(".fini_array"))) = sqlcipher_fini;
#endif
static void sqlcipher_exportFunc(sqlite3_context*, int, sqlite3_value**);
static int sqlcipher_export_init(sqlite3* db, const char** errmsg, const struct sqlite3_api_routines* api) {
sqlite3_create_function_v2(db, "sqlcipher_export", -1, SQLITE_UTF8, 0, sqlcipher_exportFunc, 0, 0, 0);
return SQLITE_OK;
}
/* The extra_init function is called by sqlite3_init automaticay by virtue of
* being defined with SQLITE_EXTRA_INIT. This function sets up
* static mutexes used internally by SQLCipher and initializes
* the internal private heap */
int sqlcipher_extra_init(const char* arg) {
int rc = SQLITE_OK, i=0;
void* provider_ctx = NULL;
sqlite3_mutex_enter(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER));
if(sqlcipher_init) {
/* if this init routine already completed successfully return immediately */
sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER));
return SQLITE_OK;
}
/* only register cleanup handlers once per process */
if(!sqlcipher_cleanup) {
atexit(sqlcipher_atexit);
sqlcipher_cleanup = 1;
}
#ifndef SQLCIPHER_OMIT_DEFAULT_LOGGING
/* when sqlcipher is first activated, set a default log target and level of WARN if the
logging settings have not yet been initialized. Use the "device log" for
android (logcat) or apple (console). Use stderr on all other platforms. */
if(!sqlcipher_log_set) {
/* set log level if it is different than the uninitalized default value of NONE */
if(sqlcipher_log_level == SQLCIPHER_LOG_NONE) {
sqlcipher_log_level = SQLCIPHER_LOG_LEVEL_DEFAULT;
}
/* set the default file or device if neither is already set */
if(sqlcipher_log_device == 0 && sqlcipher_log_file == NULL) {
#if defined(__ANDROID__) || defined(__APPLE__)
sqlcipher_log_device = 1;
#else
sqlcipher_log_file = stderr;
#endif
}
sqlcipher_log_set = 1;
}
#endif
/* allocate static mutexe, and return error if any fail to allocate */
for(i = 0; i < SQLCIPHER_MUTEX_COUNT; i++) {
if(sqlcipher_static_mutex[i] == NULL) {
if((sqlcipher_static_mutex[i] = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST)) == NULL) {
sqlcipher_log(SQLCIPHER_LOG_ERROR, SQLCIPHER_LOG_MEMORY, "%s: failed to allocate static mutex %d", __func__, i);
rc = SQLITE_NOMEM;
goto error;
}
}
}
/* initialize the private heap for use in internal SQLCipher memory allocations */
if(private_heap == NULL) {
while(private_heap_sz >= SQLCIPHER_PRIVATE_HEAP_SIZE_STEP) {
/* attempt to allocate the private heap. If allocation fails, reduce the size and try again */
if((private_heap = sqlcipher_internal_malloc(private_heap_sz))) {
xoshiro_randomness(private_heap, (int) private_heap_sz);
/* initialize the head block of the linked list at the start of the heap */
private_block *head = (private_block *) private_heap;
head->is_used = 0;
head->size = (u32) private_heap_sz - sizeof(private_block);
head->next = NULL;
break;
}
/* allocation failed, reduce the requested size of the heap */
private_heap_sz -= SQLCIPHER_PRIVATE_HEAP_SIZE_STEP;
}
}
if(!private_heap) {
sqlcipher_log(SQLCIPHER_LOG_ERROR, SQLCIPHER_LOG_MEMORY, "%s: failed to allocate private heap", __func__);
rc = SQLITE_NOMEM;
goto error;
}
/* check to see if there is a provider registered at this point
if there no provider registered at this point, register the
default provider */
if(sqlcipher_get_provider() == NULL) {
sqlcipher_provider *p = sqlcipher_malloc(sizeof(sqlcipher_provider));
#if defined (SQLCIPHER_CRYPTO_CC)
extern int sqlcipher_cc_setup(sqlcipher_provider *p);
sqlcipher_cc_setup(p);
#elif defined (SQLCIPHER_CRYPTO_OPENSSL)
extern int sqlcipher_openssl_setup(sqlcipher_provider *p);
sqlcipher_openssl_setup(p);
#elif defined (SQLCIPHER_CRYPTO_OSSL3)
extern int sqlcipher_ossl3_setup(sqlcipher_provider *p);
sqlcipher_ossl3_setup(p);
#elif defined (SQLCIPHER_CRYPTO_CUSTOM)
extern int SQLCIPHER_CRYPTO_CUSTOM(sqlcipher_provider *p);
SQLCIPHER_CRYPTO_CUSTOM(p);
#else
#error "NO DEFAULT SQLCIPHER CRYPTO PROVIDER DEFINED"
#endif
if((rc = sqlcipher_register_provider(p)) != SQLITE_OK) {
sqlcipher_log(SQLCIPHER_LOG_ERROR, SQLCIPHER_LOG_PROVIDER, "%s: failed to register provider %p %d", __func__, p, rc);
goto error;
}
}
/* required random data */
if((rc = default_provider->ctx_init(&provider_ctx)) != SQLITE_OK) {
sqlcipher_log(SQLCIPHER_LOG_ERROR, SQLCIPHER_LOG_MEMORY, "%s: failed to initilize provider context %d", __func__, rc);
goto error;
}
if((rc = default_provider->random(provider_ctx, (void *)xoshiro_s, sizeof(xoshiro_s))) != SQLITE_OK) {
sqlcipher_log(SQLCIPHER_LOG_ERROR, SQLCIPHER_LOG_MEMORY, "%s: failed to generate xoshiro seed %d", __func__, rc);
goto error;
}
if(!sqlcipher_shield_mask) {
if(!(sqlcipher_shield_mask = sqlcipher_internal_malloc(sqlcipher_shield_mask_sz))) {
sqlcipher_log(SQLCIPHER_LOG_ERROR, SQLCIPHER_LOG_MEMORY, "%s: failed to allocate shield mask", __func__);
goto error;
}
if((rc = default_provider->random(provider_ctx, sqlcipher_shield_mask, (int) sqlcipher_shield_mask_sz)) != SQLITE_OK) {
sqlcipher_log(SQLCIPHER_LOG_ERROR, SQLCIPHER_LOG_MEMORY, "%s: failed to generate requisite random mask data %d", __func__, rc);
goto error;
}
}
default_provider->ctx_free(&provider_ctx);
sqlcipher_init = 1;
sqlcipher_shutdown = 0;
/* leave the master mutex so we can proceed with auto extension registration */
sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER));
/* finally, extension registration occurs outside of the mutex because it is
* uses SQLITE_MUTEX_STATIC_MASTER itself */
sqlite3_auto_extension((void (*)(void))sqlcipher_export_init);
return SQLITE_OK;
error:
/* if an error occurs during initialization, tear down everything that was setup */
if(private_heap) {
sqlcipher_internal_free(private_heap, private_heap_sz);
private_heap = NULL;
}
if(sqlcipher_shield_mask) {
sqlcipher_internal_free(sqlcipher_shield_mask, sqlcipher_shield_mask_sz);
sqlcipher_shield_mask = NULL;
}
for(i = 0; i < SQLCIPHER_MUTEX_COUNT; i++) {
if(sqlcipher_static_mutex[i]) {
sqlite3_mutex_free(sqlcipher_static_mutex[i]);
sqlcipher_static_mutex[i] = NULL;
}
}
/* post cleanup return the error code back up to sqlite3_init() */
sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER));
sqlcipher_init_error = rc;
return rc;
}
/* The extra_shutdown function is called by sqlite3_shutdown()
* because it is defined with SQLITE_EXTRA_SHUTDOWN. In addition it will
* be called via atexit(), finalizer, and DllMain. The function will
* cleanup resources allocated by SQLCipher including mutexes,
* the private heap, and default provider. */
void sqlcipher_extra_shutdown(void) {
int i = 0;
sqlcipher_provider *provider = NULL;
/* if sqlcipher hasn't been initialized or the shutdown already completed exit early */
if(!sqlcipher_init || sqlcipher_shutdown) {
goto cleanup;
}
if(sqlcipher_shield_mask) {
sqlcipher_internal_free(sqlcipher_shield_mask, sqlcipher_shield_mask_sz);
sqlcipher_shield_mask = NULL;
}
/* free the provider list. start at the default provider and move through the list
* freeing each one. If a provider has a shutdown function, call it before freeing.
* finally NULL out the default_provider */
provider = default_provider;
while(provider) {
sqlcipher_provider *next = provider->next;
if(provider->shutdown) {
provider->shutdown();
}
sqlcipher_free(provider, sizeof(sqlcipher_provider));
provider = next;
}
default_provider = NULL;
/* free private heap. If SQLCipher is compiled in test mode, it will deliberately
not free the heap (leaking it) if the heap is not empty. This will allow tooling
to detect memory issues like unfreed private heap memory */
if(private_heap) {
#ifdef SQLCIPHER_TEST
size_t used = 0;
private_block *block = NULL;
block = (private_block *) private_heap;
while (block != NULL) {
if(block->is_used) {
used+= block->size;
i++;
}
block = block->next;
}
if(used > 0) {
/* don't free the heap so that sqlite treats this as unfreed memory */
sqlcipher_log(SQLCIPHER_LOG_ERROR, SQLCIPHER_LOG_MEMORY,
"%s: SQLCipher private heap unfreed memory %u bytes in %d allocations", __func__, used, i);
} else {
sqlcipher_internal_free(private_heap, private_heap_sz);
private_heap = NULL;
}
#else
sqlcipher_internal_free(private_heap, private_heap_sz);
private_heap = NULL;
#endif
sqlcipher_log(SQLCIPHER_LOG_INFO, SQLCIPHER_LOG_MEMORY,
"%s: SQLCipher private heap stats: size=%u, hwm=%u, alloc=%u, allocs=%u, overflow=%u, overflows=%u", __func__,
private_heap_sz, private_heap_hwm, private_heap_alloc, private_heap_allocs, private_heap_overflow, private_heap_overflows
);
}
/* free all of sqlcipher's static mutexes */
for(i = 0; i < SQLCIPHER_MUTEX_COUNT; i++) {
if(sqlcipher_static_mutex[i]) {
sqlite3_mutex_free(sqlcipher_static_mutex[i]);
sqlcipher_static_mutex[i] = NULL;
}
}
cleanup:
sqlcipher_init = 0;
sqlcipher_init_error = SQLITE_ERROR;
sqlcipher_shutdown = 1;
}
static void sqlcipher_shield(unsigned char *in, int sz) {
int i = 0;
for(i = 0; i < sz; i++) {
in[i] ^= sqlcipher_shield_mask[i % sqlcipher_shield_mask_sz];
}
}
/* constant time memset using volitile to avoid having the memset
optimized out by the compiler.
Note: As suggested by Joachim Schipper (joachim.schipper@fox-it.com)
*/
void* sqlcipher_memset(void *v, unsigned char value, sqlite_uint64 len) {
volatile sqlite_uint64 i = 0;
volatile unsigned char *a = v;
if (v == NULL) return v;
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MEMORY, "sqlcipher_memset: setting %p[0-%u]=%d)", a, len, value);
for(i = 0; i < len; i++) {
a[i] = value;
}
return v;
}
/* constant time memory check tests every position of a memory segement
matches a single value (i.e. the memory is all zeros)
returns 0 if match, 1 of no match */
int sqlcipher_ismemset(const void *v, unsigned char value, sqlite_uint64 len) {
const volatile unsigned char *a = v;
volatile sqlite_uint64 i = 0, result = 0;
for(i = 0; i < len; i++) {
result |= a[i] ^ value;
}
return (result != 0);
}
/* constant time memory comparison routine.
returns 0 if match, 1 if no match */
int sqlcipher_memcmp(const void *v0, const void *v1, int len) {
const volatile unsigned char *a0 = v0, *a1 = v1;
volatile int i = 0, result = 0;
for(i = 0; i < len; i++) {
result |= a0[i] ^ a1[i];
}
return (result != 0);
}
static void sqlcipher_mlock(void *ptr, sqlite_uint64 sz) {
#ifndef OMIT_MEMLOCK
#if defined(__unix__) || defined(__APPLE__)
int rc;
unsigned long pagesize = sysconf(_SC_PAGESIZE);
unsigned long offset = (unsigned long) ptr % pagesize;
if(ptr == NULL || sz == 0) return;
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MEMORY, "sqlcipher_mlock: calling mlock(%p,%lu); _SC_PAGESIZE=%lu", ptr - offset, sz + offset, pagesize);
rc = mlock(ptr - offset, sz + offset);
if(rc!=0) {
sqlcipher_log(SQLCIPHER_LOG_WARN, SQLCIPHER_LOG_MEMORY, "sqlcipher_mlock: mlock() returned %d errno=%d", rc, errno);
sqlcipher_log(SQLCIPHER_LOG_INFO, SQLCIPHER_LOG_MEMORY, "sqlcipher_mlock: mlock(%p,%lu) returned %d errno=%d", ptr - offset, sz + offset, rc, errno);
}
#elif defined(_WIN32)
#if !(defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP || WINAPI_FAMILY == WINAPI_FAMILY_PC_APP))
int rc;
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MEMORY, "sqlcipher_mlock: calling VirtualLock(%p,%d)", ptr, sz);
rc = VirtualLock(ptr, sz);
if(rc==0) {
sqlcipher_log(SQLCIPHER_LOG_WARN, SQLCIPHER_LOG_MEMORY, "sqlcipher_mlock: VirtualLock() returned %d LastError=%d", rc, GetLastError());
sqlcipher_log(SQLCIPHER_LOG_INFO, SQLCIPHER_LOG_MEMORY, "sqlcipher_mlock: VirtualLock(%p,%d) returned %d LastError=%d", ptr, sz, rc, GetLastError());
}
#endif
#endif
#endif
}
static void sqlcipher_munlock(void *ptr, sqlite_uint64 sz) {
#ifndef OMIT_MEMLOCK
#if defined(__unix__) || defined(__APPLE__)
int rc;
unsigned long pagesize = sysconf(_SC_PAGESIZE);
unsigned long offset = (unsigned long) ptr % pagesize;
if(ptr == NULL || sz == 0) return;
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MEMORY, "sqlcipher_munlock: calling munlock(%p,%lu)", ptr - offset, sz + offset);
rc = munlock(ptr - offset, sz + offset);
if(rc!=0) {
sqlcipher_log(SQLCIPHER_LOG_INFO, SQLCIPHER_LOG_MEMORY, "sqlcipher_munlock: munlock(%p,%lu) returned %d errno=%d", ptr - offset, sz + offset, rc, errno);
}
#elif defined(_WIN32)
#if !(defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP || WINAPI_FAMILY == WINAPI_FAMILY_PC_APP))
int rc;
if(ptr == NULL || sz == 0) return;
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MEMORY, "sqlcipher_munlock: calling VirtualUnlock(%p,%d)", ptr, sz);
rc = VirtualUnlock(ptr, sz);
/* because memory allocations may be made from the same individual page, it is possible for VirtualUnlock to be called
* multiple times for the same page. Subsequent calls will return an error, but this can be safely ignored (i.e. because
* the previous call for that page unlocked the memory already). Log an info level event only in that case. */
if(!rc) {
sqlcipher_log(SQLCIPHER_LOG_INFO, SQLCIPHER_LOG_MEMORY, "sqlcipher_munlock: VirtualUnlock(%p,%d) returned %d LastError=%d", ptr, sz, rc, GetLastError());
}
#endif
#endif
#endif
}
/** sqlcipher wraps the default memory subsystem so it can optionally provide the
* memory security feature which will lock and sanitize ALL memory used by
* the sqlite library internally. Memory security feature is disabled by default
* but but the wrapper is used regardless, it just forwards to the default
* memory management implementation when disabled
*/
static int sqlcipher_mem_init(void *pAppData) {
return default_mem_methods.xInit(pAppData);
}
static void sqlcipher_mem_shutdown(void *pAppData) {
default_mem_methods.xShutdown(pAppData);
}
static void *sqlcipher_mem_malloc(int n) {
void *ptr = default_mem_methods.xMalloc(n);
if(!sqlcipher_mem_executed) sqlcipher_mem_executed = 1;
if(sqlcipher_mem_security_on) {
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MEMORY, "sqlcipher_mem_malloc: calling sqlcipher_mlock(%p,%d)", ptr, n);
sqlcipher_mlock(ptr, n);
}
return ptr;
}
static int sqlcipher_mem_size(void *p) {
return default_mem_methods.xSize(p);
}
static void sqlcipher_mem_free(void *p) {
int sz;
if(!sqlcipher_mem_executed) sqlcipher_mem_executed = 1;
if(sqlcipher_mem_security_on) {
sz = sqlcipher_mem_size(p);
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MEMORY, "%s: calling xoshiro_randomness(%p,%d) and sqlcipher_munlock(%p, %d)", __func__, p, sz, p, sz);
xoshiro_randomness(p, sz);
sqlcipher_munlock(p, sz);
}
default_mem_methods.xFree(p);
}
static void *sqlcipher_mem_realloc(void *p, int n) {
void *new = NULL;
int orig_sz = 0;
if(sqlcipher_mem_security_on) {
orig_sz = sqlcipher_mem_size(p);
if (n==0) {
sqlcipher_mem_free(p);
return NULL;
} else if (!p) {
return sqlcipher_mem_malloc(n);
} else if(n <= orig_sz) {
return p;
} else {
new = sqlcipher_mem_malloc(n);
if(new) {
memcpy(new, p, orig_sz);
sqlcipher_mem_free(p);
}
return new;
}
} else {
return default_mem_methods.xRealloc(p, n);
}
}
static int sqlcipher_mem_roundup(int n) {
return default_mem_methods.xRoundup(n);
}
static sqlite3_mem_methods sqlcipher_mem_methods = {
sqlcipher_mem_malloc,
sqlcipher_mem_free,
sqlcipher_mem_realloc,
sqlcipher_mem_size,
sqlcipher_mem_roundup,
sqlcipher_mem_init,
sqlcipher_mem_shutdown,
0
};
void sqlcipher_init_memmethods() {
if(sqlcipher_mem_initialized) return;
if(sqlite3_config(SQLITE_CONFIG_GETMALLOC, &default_mem_methods) != SQLITE_OK ||
sqlite3_config(SQLITE_CONFIG_MALLOC, &sqlcipher_mem_methods) != SQLITE_OK) {
sqlcipher_mem_security_on = sqlcipher_mem_executed = sqlcipher_mem_initialized = 0;
} else {
sqlcipher_mem_initialized = 1;
}
}
/**
* Free and wipe memory. Uses SQLites internal sqlite3_free so that memory
* can be countend and memory leak detection works in the test suite.
* If ptr is not null memory will be freed.
* If sz is greater than zero, the memory will be overwritten with zero before it is freed
* If sz is > 0, and not compiled with OMIT_MEMLOCK, system will attempt to unlock the
* memory segment so it can be paged
*/
static void sqlcipher_internal_free(void *ptr, sqlite_uint64 sz) {
xoshiro_randomness(ptr, sz);
sqlcipher_munlock(ptr, sz);
sqlite3_free(ptr);
}
/**
* allocate memory. Uses sqlite's internall malloc wrapper so memory can be
* reference counted and leak detection works. Unless compiled with OMIT_MEMLOCK
* attempts to lock the memory pages so sensitive information won't be swapped
*/
static void* sqlcipher_internal_malloc(sqlite_uint64 sz) {
void *ptr;
ptr = sqlite3_malloc(sz);
sqlcipher_memset(ptr, 0, sz);
sqlcipher_mlock(ptr, sz);
return ptr;
}
void *sqlcipher_malloc(sqlite3_uint64 size) {
void *alloc = NULL;
private_block *block = NULL, *split = NULL;
if(size < 1) return NULL;
size = SQLCIPHER_PRIVATE_HEAP_ROUNDUP(size);
block = (private_block *) private_heap;
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MUTEX, "%s: entering SQLCIPHER_MUTEX_MEM", __func__);
sqlite3_mutex_enter(sqlcipher_mutex(SQLCIPHER_MUTEX_MEM));
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MUTEX, "%s: entered SQLCIPHER_MUTEX_MEM", __func__);
/* iterate through the blocks in the heap to find one which is big enough to hold
the requested allocation. Stop when one is found. */
while(block != NULL && alloc == NULL) {
if(!block->is_used && block->size >= size) {
/* mark the block as in use and set the return pointer to the start
of the block free space */
block->is_used = 1;
alloc = ((u8*)block) + sizeof(private_block);
sqlcipher_memset(alloc, 0, size);
/* if there is at least the minimim amount of required space left after allocation,
split off a new free block and insert it after the in-use block */
if(block->size >= size + sizeof(private_block) + SQLCIPHER_PRIVATE_HEAP_MIN_SPLIT_SIZE) {
split = (private_block*) (((u8*) block) + size + sizeof(private_block));
split->is_used = 0;
split->size = block->size - size - sizeof(private_block);
/* insert inbetween current block and next */
split->next = block->next;
block->next = split;
/* only set the size of the current block to the requested amount
if the block was split. otherwise, size will be the full amount
of the block, which will actually be larger than the requested amount */
block->size = size;
}
}
block = block->next;
}
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MUTEX, "%s: leaving SQLCIPHER_MUTEX_MEM", __func__);
sqlite3_mutex_leave(sqlcipher_mutex(SQLCIPHER_MUTEX_MEM));
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MUTEX, "%s: left SQLCIPHER_MUTEX_MEM", __func__);
/* If we were unable to locate a free block large enough to service the request, the fallback
behavior will simply attempt to allocate additional memory using malloc. */
if(alloc == NULL) {
private_heap_overflow += size;
private_heap_overflows++;
alloc = sqlcipher_internal_malloc(size);
sqlcipher_log(SQLCIPHER_LOG_INFO, SQLCIPHER_LOG_MEMORY, "%s: unable to allocate %u bytes on private heap, allocated %p using sqlcipher_internal_malloc fallback", __func__, size, alloc);
} else {
private_heap_used += size;
if(private_heap_used > private_heap_hwm) {
/* if the current bytes allocated on the private heap are greater than the high water mark, set the HWM to the new amount */
private_heap_hwm = private_heap_used;
}
private_heap_alloc += size;
private_heap_allocs++;
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MEMORY, "%s allocated %u bytes on private heap at %p", __func__, size, alloc);
}
return alloc;
}
void sqlcipher_free(void *mem, sqlite3_uint64 sz) {
private_block *block = NULL, *prev = NULL;
void *alloc = NULL;
u32 block_size = 0;
block = (private_block *) private_heap;
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MUTEX, "%s: entering SQLCIPHER_MUTEX_MEM", __func__);
sqlite3_mutex_enter(sqlcipher_mutex(SQLCIPHER_MUTEX_MEM));
sqlcipher_log(SQLCIPHER_LOG_TRACE, SQLCIPHER_LOG_MUTEX, "%s: entered SQLCIPHER_MUTEX_MEM", __func__);
/* search the heap for the block that contains this address */
while(block != NULL) {
alloc = ((u8*)block)+sizeof(private_block);
/* if the memory address to be freed corresponds to this block's
allocation, mark it as unused. If they don't match, move
on to the next block */
if(mem == alloc) {
block->is_used = 0;
block_size = block->size; /* retain the acual size of the block in use for stats adjustment */
xoshiro_randomness(alloc, block->size);
/* check whether the previous block is free, if so merge*/
if(prev && !prev->is_used) {
prev->size = prev->size + sizeof(private_block) + block->size;
prev->next = block->next;
block = prev;
}
/* check to see whether the next block is free, if so merge */
if(block->next && !block->next->is_used) {
block->size = block->size + sizeof(private_block) + block->next->size;
block->next = block->next->next;
}
/* once the block has been identified, marked free, and optionally
consolidated with it's neighbors, exit the loop, but leave
the block pointer intact so we know we found it in the heap */
break;
}
prev = block;