mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-30 10:55:03 +00:00
Btrfs: unaligned access fixes
Btrfs set/get macros lose type information needed to avoid unaligned accesses on sparc64. ere is a patch for the kernel bits which fixes most of the unaligned accesses on sparc64. btrfs_name_hash is modified to return the hash value instead of getting a return location via a (potentially unaligned) pointer. Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
39b5637f6f
commit
df68b8a7ad
5 changed files with 33 additions and 45 deletions
|
@ -495,22 +495,17 @@ void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
|
||||||
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
|
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
|
||||||
static inline u##bits btrfs_##name(struct extent_buffer *eb) \
|
static inline u##bits btrfs_##name(struct extent_buffer *eb) \
|
||||||
{ \
|
{ \
|
||||||
char *kaddr = kmap_atomic(eb->first_page, KM_USER0); \
|
type *p = kmap_atomic(eb->first_page, KM_USER0); \
|
||||||
unsigned long offset = offsetof(type, member); \
|
u##bits res = le##bits##_to_cpu(p->member); \
|
||||||
u##bits res; \
|
kunmap_atomic(p, KM_USER0); \
|
||||||
__le##bits *tmp = (__le##bits *)(kaddr + offset); \
|
|
||||||
res = le##bits##_to_cpu(*tmp); \
|
|
||||||
kunmap_atomic(kaddr, KM_USER0); \
|
|
||||||
return res; \
|
return res; \
|
||||||
} \
|
} \
|
||||||
static inline void btrfs_set_##name(struct extent_buffer *eb, \
|
static inline void btrfs_set_##name(struct extent_buffer *eb, \
|
||||||
u##bits val) \
|
u##bits val) \
|
||||||
{ \
|
{ \
|
||||||
char *kaddr = kmap_atomic(eb->first_page, KM_USER0); \
|
type *p = kmap_atomic(eb->first_page, KM_USER0); \
|
||||||
unsigned long offset = offsetof(type, member); \
|
p->member = cpu_to_le##bits(val); \
|
||||||
__le##bits *tmp = (__le##bits *)(kaddr + offset); \
|
kunmap_atomic(p, KM_USER0); \
|
||||||
*tmp = cpu_to_le##bits(val); \
|
|
||||||
kunmap_atomic(kaddr, KM_USER0); \
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
|
#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
|
||||||
|
|
|
@ -71,8 +71,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
key.objectid = dir;
|
key.objectid = dir;
|
||||||
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
|
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
|
||||||
ret = btrfs_name_hash(name, name_len, &key.offset);
|
key.offset = btrfs_name_hash(name, name_len);
|
||||||
BUG_ON(ret);
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -125,8 +124,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
|
||||||
|
|
||||||
key.objectid = dir;
|
key.objectid = dir;
|
||||||
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
|
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
|
||||||
ret = btrfs_name_hash(name, name_len, &key.offset);
|
key.offset = btrfs_name_hash(name, name_len);
|
||||||
BUG_ON(ret);
|
|
||||||
path = btrfs_alloc_path();
|
path = btrfs_alloc_path();
|
||||||
data_size = sizeof(*dir_item) + name_len;
|
data_size = sizeof(*dir_item) + name_len;
|
||||||
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
|
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
|
||||||
|
@ -199,8 +197,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
|
||||||
key.objectid = dir;
|
key.objectid = dir;
|
||||||
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
|
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
|
||||||
|
|
||||||
ret = btrfs_name_hash(name, name_len, &key.offset);
|
key.offset = btrfs_name_hash(name, name_len);
|
||||||
BUG_ON(ret);
|
|
||||||
|
|
||||||
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
|
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -261,8 +258,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
key.objectid = dir;
|
key.objectid = dir;
|
||||||
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
|
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
|
||||||
ret = btrfs_name_hash(name, name_len, &key.offset);
|
key.offset = btrfs_name_hash(name, name_len);
|
||||||
BUG_ON(ret);
|
|
||||||
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
|
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
|
@ -76,19 +76,18 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
|
||||||
*buf++ = pad;
|
*buf++ = pad;
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_name_hash(const char *name, int len, u64 *hash_result)
|
u64 btrfs_name_hash(const char *name, int len)
|
||||||
{
|
{
|
||||||
__u32 hash;
|
__u32 hash;
|
||||||
__u32 minor_hash = 0;
|
__u32 minor_hash = 0;
|
||||||
const char *p;
|
const char *p;
|
||||||
__u32 in[8], buf[2];
|
__u32 in[8], buf[2];
|
||||||
|
u64 hash_result;
|
||||||
|
|
||||||
if (len == 1 && *name == '.') {
|
if (len == 1 && *name == '.') {
|
||||||
*hash_result = 1;
|
return 1;
|
||||||
return 0;
|
|
||||||
} else if (len == 2 && name[0] == '.' && name[1] == '.') {
|
} else if (len == 2 && name[0] == '.' && name[1] == '.') {
|
||||||
*hash_result = 2;
|
return 2;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize the default seed for the hash checksum functions */
|
/* Initialize the default seed for the hash checksum functions */
|
||||||
|
@ -106,8 +105,8 @@ int btrfs_name_hash(const char *name, int len, u64 *hash_result)
|
||||||
}
|
}
|
||||||
hash = buf[0];
|
hash = buf[0];
|
||||||
minor_hash = buf[1];
|
minor_hash = buf[1];
|
||||||
*hash_result = buf[0];
|
hash_result = buf[0];
|
||||||
*hash_result <<= 32;
|
hash_result <<= 32;
|
||||||
*hash_result |= buf[1];
|
hash_result |= buf[1];
|
||||||
return 0;
|
return hash_result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,5 +18,5 @@
|
||||||
|
|
||||||
#ifndef __HASH__
|
#ifndef __HASH__
|
||||||
#define __HASH__
|
#define __HASH__
|
||||||
int btrfs_name_hash(const char *name, int len, u64 *hash_result);
|
u64 btrfs_name_hash(const char *name, int len);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -21,16 +21,15 @@
|
||||||
u##bits btrfs_##name(struct extent_buffer *eb, \
|
u##bits btrfs_##name(struct extent_buffer *eb, \
|
||||||
type *s) \
|
type *s) \
|
||||||
{ \
|
{ \
|
||||||
unsigned long offset = (unsigned long)s + \
|
unsigned long part_offset = (unsigned long)s; \
|
||||||
offsetof(type, member); \
|
unsigned long offset = part_offset + offsetof(type, member); \
|
||||||
__le##bits *tmp; \
|
type *p; \
|
||||||
/* ugly, but we want the fast path here */ \
|
/* ugly, but we want the fast path here */ \
|
||||||
if (eb->map_token && offset >= eb->map_start && \
|
if (eb->map_token && offset >= eb->map_start && \
|
||||||
offset + sizeof(((type *)0)->member) <= eb->map_start + \
|
offset + sizeof(((type *)0)->member) <= eb->map_start + \
|
||||||
eb->map_len) { \
|
eb->map_len) { \
|
||||||
tmp = (__le##bits *)(eb->kaddr + offset - \
|
p = (type *)(eb->kaddr + part_offset - eb->map_start); \
|
||||||
eb->map_start); \
|
return le##bits##_to_cpu(p->member); \
|
||||||
return le##bits##_to_cpu(*tmp); \
|
|
||||||
} \
|
} \
|
||||||
{ \
|
{ \
|
||||||
int err; \
|
int err; \
|
||||||
|
@ -48,8 +47,8 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
|
||||||
read_eb_member(eb, s, type, member, &res); \
|
read_eb_member(eb, s, type, member, &res); \
|
||||||
return le##bits##_to_cpu(res); \
|
return le##bits##_to_cpu(res); \
|
||||||
} \
|
} \
|
||||||
tmp = (__le##bits *)(kaddr + offset - map_start); \
|
p = (type *)(kaddr + part_offset - map_start); \
|
||||||
res = le##bits##_to_cpu(*tmp); \
|
res = le##bits##_to_cpu(p->member); \
|
||||||
if (unmap_on_exit) \
|
if (unmap_on_exit) \
|
||||||
unmap_extent_buffer(eb, map_token, KM_USER1); \
|
unmap_extent_buffer(eb, map_token, KM_USER1); \
|
||||||
return res; \
|
return res; \
|
||||||
|
@ -58,16 +57,15 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
|
||||||
void btrfs_set_##name(struct extent_buffer *eb, \
|
void btrfs_set_##name(struct extent_buffer *eb, \
|
||||||
type *s, u##bits val) \
|
type *s, u##bits val) \
|
||||||
{ \
|
{ \
|
||||||
unsigned long offset = (unsigned long)s + \
|
unsigned long part_offset = (unsigned long)s; \
|
||||||
offsetof(type, member); \
|
unsigned long offset = part_offset + offsetof(type, member); \
|
||||||
__le##bits *tmp; \
|
type *p; \
|
||||||
/* ugly, but we want the fast path here */ \
|
/* ugly, but we want the fast path here */ \
|
||||||
if (eb->map_token && offset >= eb->map_start && \
|
if (eb->map_token && offset >= eb->map_start && \
|
||||||
offset + sizeof(((type *)0)->member) <= eb->map_start + \
|
offset + sizeof(((type *)0)->member) <= eb->map_start + \
|
||||||
eb->map_len) { \
|
eb->map_len) { \
|
||||||
tmp = (__le##bits *)(eb->kaddr + offset - \
|
p = (type *)(eb->kaddr + part_offset - eb->map_start); \
|
||||||
eb->map_start); \
|
p->member = cpu_to_le##bits(val); \
|
||||||
*tmp = cpu_to_le##bits(val); \
|
|
||||||
return; \
|
return; \
|
||||||
} \
|
} \
|
||||||
{ \
|
{ \
|
||||||
|
@ -86,8 +84,8 @@ void btrfs_set_##name(struct extent_buffer *eb, \
|
||||||
write_eb_member(eb, s, type, member, &val); \
|
write_eb_member(eb, s, type, member, &val); \
|
||||||
return; \
|
return; \
|
||||||
} \
|
} \
|
||||||
tmp = (__le##bits *)(kaddr + offset - map_start); \
|
p = (type *)(kaddr + part_offset - map_start); \
|
||||||
*tmp = cpu_to_le##bits(val); \
|
p->member = cpu_to_le##bits(val); \
|
||||||
if (unmap_on_exit) \
|
if (unmap_on_exit) \
|
||||||
unmap_extent_buffer(eb, map_token, KM_USER1); \
|
unmap_extent_buffer(eb, map_token, KM_USER1); \
|
||||||
} \
|
} \
|
||||||
|
|
Loading…
Add table
Reference in a new issue