-
Notifications
You must be signed in to change notification settings - Fork 0
/
fat_table.c
145 lines (127 loc) · 5.29 KB
/
fat_table.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
/*
* fat_table.c
*
* The fat_table TAD is an abstraction the can be used to navigate, reand and
* write the FAT table and the chains of data clusters.
*/
#include "fat_table.h"
#include <unistd.h>
inline bool fat_table_is_valid_cluster_number(const fat_table table,
u32 cluster) {
return cluster >= 2 && cluster < table->num_data_clusters + 2;
}
u32 fat_table_get_next_cluster(fat_table table, u32 cur_cluster) {
u32 next_cluster;
next_cluster = le32_to_cpu(((const le32 *)table->fat_map)[cur_cluster]);
/* We currently don't check for the actual special cluster values, but
* instead treat all out of range values as end-of-chain. This may be
* okay for read-only mounts. */
if (!fat_table_is_valid_cluster_number(table, next_cluster)) {
next_cluster = FAT_CLUSTER_END_OF_CHAIN_MAX;
}
return next_cluster;
}
inline size_t fat_table_bytes_per_cluster(fat_table table) {
return 1 << table->cluster_order;
}
size_t fat_table_mask_offset(off_t offset, fat_table table) {
size_t bytes_per_cluster = fat_table_bytes_per_cluster(table);
return offset & (bytes_per_cluster - 1);
}
size_t fat_table_get_cluster_remaining_bytes(fat_table table,
size_t bytes_remaining,
off_t offset) {
size_t bytes_per_cluster = fat_table_bytes_per_cluster(table);
size_t masked_offset = fat_table_mask_offset(offset, table);
return min(bytes_per_cluster - masked_offset, bytes_remaining);
}
u32 fat_table_get_clusters_for_size(fat_table table, size_t file_size) {
size_t bytes_per_cluster = fat_table_bytes_per_cluster(table);
return ((off_t)file_size + (bytes_per_cluster - 1)) >> table->cluster_order;
}
u32 fat_table_get_next_free_cluster(fat_table table) {
u32 fat_table_get_next_free_cluster =
2; /* First two clusters are reserved */
while (
le32_to_cpu(
((const le32 *)table->fat_map)[fat_table_get_next_free_cluster]) !=
FAT_CLUSTER_FREE) {
fat_table_get_next_free_cluster++;
}
if (!fat_table_is_valid_cluster_number(table,
fat_table_get_next_free_cluster)) {
fat_error("There was a problem fetching for a free cluster");
fat_table_get_next_free_cluster = FAT_CLUSTER_END_OF_CHAIN_MAX;
}
DEBUG("next free cluster = %u", fat_table_get_next_free_cluster);
return fat_table_get_next_free_cluster;
}
inline off_t fat_table_cluster_offset(const fat_table table, u32 cluster) {
return table->data_start_offset +
((off_t)(cluster - 2) << table->cluster_order);
}
inline bool fat_table_is_cluster_used(fat_table table, u32 cluster) {
return le32_to_cpu(((const le32 *)table->fat_map)[cluster]) != 0;
}
void fat_table_set_next_cluster(fat_table table, u32 cur_cluster,
u32 next_cluster) {
le32 next_cluster_le32 = cpu_to_le32(next_cluster);
/* Write the disk fat table */
off_t entry_offset = (off_t)(cur_cluster * 4) + table->fat_offset;
ssize_t written_bytes =
pwrite(table->fd, &next_cluster_le32, sizeof(le32), entry_offset);
if (written_bytes <= 0) {
DEBUG("Error writing next cluster disk entry");
errno = EIO;
return;
}
/* Alter the in-memory table */
((le32 *)table->fat_map)[cur_cluster] = next_cluster_le32;
}
u32 fat_table_seek_cluster(fat_table table, u32 start_cluster, off_t offset) {
u32 positions_to_move = offset >> table->cluster_order;
// Move start_cluster to first cluster to read
for (u32 i = 0; i < positions_to_move; ++i) {
if (start_cluster == FAT_CLUSTER_END_OF_CHAIN_MAX) {
DEBUG("Offset is bigger than file's last cluster.");
errno = EOVERFLOW;
return 0;
}
start_cluster = fat_table_get_next_cluster(table, start_cluster);
}
return start_cluster;
}
bool fat_table_cluster_is_EOC(u32 cluster) {
return cluster >= FAT_CLUSTER_END_OF_CHAIN_MIN &&
cluster <= FAT_CLUSTER_END_OF_CHAIN_MAX;
}
bool fat_table_cluster_is_bad_sector(u32 cluster) {
return cluster == FAT_CLUSTER_BAD_SECTOR;
}
bool fat_table_is_next_cluster_bad_sector(fat_table table, u32 cluster) {
u32 cluster_content = le32_to_cpu(((const le32 *)table->fat_map)[cluster]);
return fat_table_cluster_is_bad_sector(cluster_content);
}
bool fat_table_cluster_is_valid(u32 cluster) {
return !fat_table_cluster_is_EOC(cluster) &&
!fat_table_cluster_is_bad_sector(cluster);
}
void fat_table_print(fat_table table, u32 start_cluster, u32 end_cluster) {
u32 cur_cluster = start_cluster, counter = 50;
while (counter >= 0 && cur_cluster != end_cluster) {
if (fat_table_cluster_is_EOC(
le32_to_cpu(((const le32 *)table->fat_map)[cur_cluster]))) {
printf("|[%u]EOC", cur_cluster);
} else if (fat_table_cluster_is_bad_sector(le32_to_cpu(
((const le32 *)table->fat_map)[cur_cluster]))) {
printf("|[%u]0x%x", cur_cluster,
((le32 *)table->fat_map)[cur_cluster]);
} else {
printf("|[%u]%u", cur_cluster,
((le32 *)table->fat_map)[cur_cluster]);
}
cur_cluster++;
counter--;
}
printf("|\n");
}