1 | # s3-bsync Copyright (c) 2022 Joshua Stockin |
2 | # <https://joshstock.in> |
3 | # <https://git.joshstock.in/s3-bsync> |
4 | # |
5 | # This software is licensed and distributed under the terms of the MIT License. |
6 | # See the MIT License in the LICENSE file of this project's root folder. |
7 | # |
8 | # This comment block and its contents, including this disclaimer, MUST be |
9 | # preserved in all copies or distributions of this software's source. |
10 |
|
11 | import logging |
12 | import datetime |
13 |
|
14 | from . import syncfile |
15 | from . import filescan |
16 | from .classes import sync_managed_bucket |
17 |
|
18 | logger = logging.getLogger(__name__) |
19 |
|
20 | __all__ = ["run"] |
21 |
|
22 |
|
23 | def purge(state): |
24 | logger.debug("Purging syncfile") |
25 | state.purge() |
26 | logger.debug("Success. Exiting...") |
27 | exit(0) |
28 |
|
29 |
|
30 | def dump(state): |
31 | logger.debug("Running in DUMP mode. Echoing deserialized information to stdout:") |
32 | print(f"DUMP mode") |
33 | print( |
34 | f'Inputted valid s3sync File "{state.file_path}" (version {state.file_version})' |
35 | ) |
36 | print(f"Metadata") |
37 | print( |
38 | f" Last synced time: {state.last_synced_time} (resolves to {datetime.datetime.fromtimestamp(state.last_synced_time / 1000.0)})" |
39 | ) |
40 | print(f" Number of tracked buckets: {len(state.managed_buckets)}") |
41 | print( |
42 | f" Total # of mapped directores: {sum([len(bucket.directory_maps) for bucket in state.managed_buckets])}" |
43 | ) |
44 | print( |
45 | f" Total # of tracked fileobjects: {sum([len(bucket.fileobjects) for bucket in state.managed_buckets])}" |
46 | ) |
47 | print(f" Filesize: {state.file_size}") |
48 |
|
49 | for bucket in state.managed_buckets: |
50 | print(f'Bucket "{bucket.bucket_name}"') |
51 | print(f" # of mapped directores: {len(bucket.directory_maps)}") |
52 | print(f" # of tracked fileobjects: {len(bucket.fileobjects)}") |
53 | if len(bucket.directory_maps) > 0: |
54 | print(f" Mapped directories:") |
55 | for dirmap in bucket.directory_maps: |
56 | print( |
57 | f" > {syncfile.dirmap_stringify(dirmap.local_path, bucket.bucket_name, dirmap.s3_prefix)}" |
58 | ) |
59 | print(f" gz_compress {dirmap.gz_compress}") |
60 | print(f" recursive {dirmap.recursive}") |
61 | print(f" gpg_enabled {dirmap.gpg_enabled}") |
62 | print(f' gpg_email "{dirmap.gpg_email}"') |
63 |
|
64 | logger.debug("Finished dump. Exiting...") |
65 | exit(0) |
66 |
|
67 |
|
68 | def run(settings): |
69 | logger.debug("Entering run sequence") |
70 | state = syncfile.syncfile(settings.syncfile) |
71 |
|
72 | if "PURGE" in settings.mode: |
73 | purge(state) |
74 |
|
75 | if ( |
76 | state.file_exists() and not "OVERWRITE" in settings.mode |
77 | ): # data will be used, not overwritten |
78 | logger.debug("Syncfile exists. Deserializing...") |
79 | state.deserialize() |
80 |
|
81 | if not state.file_exists() and "INIT" not in settings.mode: |
82 | logger.error("Syncfile is nonexistent; run in INIT mode to create") |
83 | exit(1) |
84 |
|
85 | if "DUMP" in settings.mode: |
86 | dump(state) |
87 |
|
88 | if "INIT" in settings.mode: |
89 | if hasattr(settings, "dirmaps"): |
90 | for local_path in settings.dirmaps: |
91 | state.map_directory(local_path, settings.dirmaps[local_path]) |
92 | if hasattr(settings, "rmdirs"): |
93 | for local_path in settings.rmdirs: |
94 | state.remove_dirmap(local_path, settings.rmdirs[local_path]) |
95 |
|
96 | if "SYNC" in settings.mode: |
97 | for bucket in state.managed_buckets: |
98 | filescan.by_bucket(bucket) |
99 |
|
100 | state.serialize() |
101 | exit(0) |
102 |
|