[Ocfs2-commits] manish commits r2514 - in trunk: . fs/configfs
fs/ocfs2 fs/ocfs2/cluster fs/ocfs2/dlm kapi-compat/include
svn-commits at oss.oracle.com
svn-commits at oss.oracle.com
Fri Aug 12 18:35:08 CDT 2005
Author: manish
Signed-off-by: zab
Date: 2005-08-12 18:35:01 -0500 (Fri, 12 Aug 2005)
New Revision: 2514
Added:
trunk/fs/ocfs2/cluster/endian.h
trunk/fs/ocfs2/endian.h
trunk/kapi-compat/include/sparse_endian_types.h
Modified:
trunk/Config.make.in
trunk/Makefile
trunk/configure.in
trunk/fs/configfs/configfs_example.c
trunk/fs/ocfs2/Makefile
trunk/fs/ocfs2/alloc.c
trunk/fs/ocfs2/alloc.h
trunk/fs/ocfs2/aops.c
trunk/fs/ocfs2/cluster/Makefile
trunk/fs/ocfs2/cluster/heartbeat.c
trunk/fs/ocfs2/cluster/masklog.h
trunk/fs/ocfs2/cluster/net_proc.c
trunk/fs/ocfs2/cluster/nodemanager.c
trunk/fs/ocfs2/cluster/nodemanager.h
trunk/fs/ocfs2/cluster/ocfs2_heartbeat.h
trunk/fs/ocfs2/cluster/quorum.c
trunk/fs/ocfs2/cluster/tcp.c
trunk/fs/ocfs2/cluster/tcp.h
trunk/fs/ocfs2/cluster/tcp_internal.h
trunk/fs/ocfs2/dir.c
trunk/fs/ocfs2/dlm/dlmast.c
trunk/fs/ocfs2/dlm/dlmcommon.h
trunk/fs/ocfs2/dlm/dlmconvert.c
trunk/fs/ocfs2/dlm/dlmdomain.c
trunk/fs/ocfs2/dlm/dlmlock.c
trunk/fs/ocfs2/dlm/dlmmaster.c
trunk/fs/ocfs2/dlm/dlmrecovery.c
trunk/fs/ocfs2/dlm/dlmunlock.c
trunk/fs/ocfs2/dlm/userdlm.c
trunk/fs/ocfs2/dlmglue.c
trunk/fs/ocfs2/dlmglue.h
trunk/fs/ocfs2/extent_map.c
trunk/fs/ocfs2/file.c
trunk/fs/ocfs2/inode.c
trunk/fs/ocfs2/journal.c
trunk/fs/ocfs2/journal.h
trunk/fs/ocfs2/localalloc.c
trunk/fs/ocfs2/namei.c
trunk/fs/ocfs2/ocfs2.h
trunk/fs/ocfs2/ocfs2_fs.h
trunk/fs/ocfs2/slot_map.c
trunk/fs/ocfs2/suballoc.c
trunk/fs/ocfs2/super.c
trunk/fs/ocfs2/vote.c
Log:
Merged endian-safe branch to trunk, r2498:2512
Signed-off-by: zab
Modified: trunk/Config.make.in
===================================================================
--- trunk/Config.make.in 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/Config.make.in 2005-08-12 23:35:01 UTC (rev 2514)
@@ -55,6 +55,7 @@
BACKING_DEV_CAPABILITIES = @BACKING_DEV_CAPABILITIES@
IDR_GET_NEW_RETURNS_ID = @IDR_GET_NEW_RETURNS_ID@
INET_SK_RETURNS_INET_OPT = @INET_SK_RETURNS_INET_OPT@
+HAVE_SPARSE_ENDIAN_TYPES = @HAVE_SPARSE_ENDIAN_TYPES@
OCFS_DEBUG = @OCFS_DEBUG@
Modified: trunk/Makefile
===================================================================
--- trunk/Makefile 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/Makefile 2005-08-12 23:35:01 UTC (rev 2514)
@@ -13,7 +13,8 @@
kapi-compat/include/assert_spin_locked.h \
kapi-compat/include/journal_access.h \
kapi-compat/include/kref_init.h \
- kapi-compat/include/kref_put.h
+ kapi-compat/include/kref_put.h \
+ kapi-compat/include/sparse_endian_types.h
PATCH_FILES = \
patches/export_generic_drop_inode.patch
Modified: trunk/configure.in
===================================================================
--- trunk/configure.in 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/configure.in 2005-08-12 23:35:01 UTC (rev 2514)
@@ -227,6 +227,11 @@
[kref_init.*release])
KAPI_COMPAT_HEADERS="$KAPI_COMPAT_HEADERS $kref_compat_header"
+kref_compat_header=""
+OCFS2_CHECK_KERNEL([sparse endian types], types.h,
+ , kref_compat_header="sparse_endian_types.h", [__bitwise __be32])
+KAPI_COMPAT_HEADERS="$KAPI_COMPAT_HEADERS $kref_compat_header"
+
JOURNAL_ACCESS_WITH_CREDITS=
OCFS2_CHECK_KERNEL([journal access functions with a credits pointer], jbd.h,
JOURNAL_ACCESS_WITH_CREDITS=yes, , [\<int \*credits);])
Modified: trunk/fs/configfs/configfs_example.c
===================================================================
--- trunk/fs/configfs/configfs_example.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/configfs/configfs_example.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -346,7 +346,7 @@
struct simple_children {
struct config_group group;
-}item;
+};
static struct config_group *group_children_make_group(struct config_group *group, const char *name)
{
Modified: trunk/fs/ocfs2/Makefile
===================================================================
--- trunk/fs/ocfs2/Makefile 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/Makefile 2005-08-12 23:35:01 UTC (rev 2514)
@@ -72,6 +72,7 @@
dcache.h \
dir.h \
dlmglue.h \
+ endian.h \
extent_map.h \
file.h \
heartbeat.h \
Modified: trunk/fs/ocfs2/alloc.c
===================================================================
--- trunk/fs/ocfs2/alloc.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/alloc.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -96,9 +96,9 @@
ocfs2_extent_rec *ext,
u64 blkno)
{
- return blkno == (ext->e_blkno +
+ return blkno == (le64_to_cpu(ext->e_blkno) +
ocfs2_clusters_to_blocks(inode->i_sb,
- ext->e_clusters));
+ le32_to_cpu(ext->e_clusters)));
}
/*
@@ -118,8 +118,8 @@
OCFS2_BUG_ON_INVALID_DINODE(fe);
if (fe->i_last_eb_blk) {
- retval = ocfs2_read_block(osb, fe->i_last_eb_blk, &eb_bh,
- OCFS2_BH_CACHED, inode);
+ retval = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
+ &eb_bh, OCFS2_BH_CACHED, inode);
if (retval < 0) {
mlog_errno(retval);
goto bail;
@@ -131,7 +131,7 @@
BUG_ON(el->l_tree_depth != 0);
- retval = el->l_count - el->l_next_free_rec;
+ retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec);
bail:
if (eb_bh)
brelse(eb_bh);
@@ -204,7 +204,8 @@
eb->h_suballoc_slot = cpu_to_le16(osb->slot_num);
#endif
eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
- eb->h_list.l_count = ocfs2_extent_recs_per_eb(osb->sb);
+ eb->h_list.l_count =
+ cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
suballoc_bit_start++;
first_blkno++;
@@ -277,7 +278,7 @@
/* we never add a branch to a leaf. */
BUG_ON(!el->l_tree_depth);
- new_blocks = el->l_tree_depth;
+ new_blocks = le16_to_cpu(el->l_tree_depth);
/* allocate the number of new eb blocks we need */
new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *),
@@ -317,11 +318,11 @@
}
eb->h_next_leaf_blk = 0;
- eb_el->l_tree_depth = i;
- eb_el->l_next_free_rec = 1;
+ eb_el->l_tree_depth = cpu_to_le16(i);
+ eb_el->l_next_free_rec = cpu_to_le16(1);
eb_el->l_recs[0].e_cpos = fe->i_clusters;
- eb_el->l_recs[0].e_blkno = next_blkno;
- eb_el->l_recs[0].e_clusters = 0;
+ eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
+ eb_el->l_recs[0].e_clusters = cpu_to_le32(0);
if (!eb_el->l_tree_depth)
new_last_eb_blk = le64_to_cpu(eb->h_blkno);
@@ -363,18 +364,18 @@
/* Link the new branch into the rest of the tree (el will
* either be on the fe, or the extent block passed in. */
- i = el->l_next_free_rec;
- el->l_recs[i].e_blkno = next_blkno;
+ i = le16_to_cpu(el->l_next_free_rec);
+ el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
el->l_recs[i].e_cpos = fe->i_clusters;
el->l_recs[i].e_clusters = 0;
- el->l_next_free_rec++;
+ le16_add_cpu(&el->l_next_free_rec, 1);
/* fe needs a new last extent block pointer, as does the
* next_leaf on the previously last-extent-block. */
- fe->i_last_eb_blk = new_last_eb_blk;
+ fe->i_last_eb_blk = cpu_to_le64(new_last_eb_blk);
eb = (ocfs2_extent_block *) last_eb_bh->b_data;
- eb->h_next_leaf_blk = new_last_eb_blk;
+ eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
status = ocfs2_journal_dirty(handle, last_eb_bh);
if (status < 0)
@@ -445,7 +446,7 @@
/* copy the fe data into the new extent block */
eb_el->l_tree_depth = fe_el->l_tree_depth;
eb_el->l_next_free_rec = fe_el->l_next_free_rec;
- for(i = 0; i < fe_el->l_next_free_rec; i++) {
+ for(i = 0; i < le16_to_cpu(fe_el->l_next_free_rec); i++) {
eb_el->l_recs[i].e_cpos = fe_el->l_recs[i].e_cpos;
eb_el->l_recs[i].e_clusters = fe_el->l_recs[i].e_clusters;
eb_el->l_recs[i].e_blkno = fe_el->l_recs[i].e_blkno;
@@ -465,20 +466,20 @@
}
/* update fe now */
- fe_el->l_tree_depth++;
+ le16_add_cpu(&fe_el->l_tree_depth, 1);
fe_el->l_recs[0].e_cpos = 0;
fe_el->l_recs[0].e_blkno = eb->h_blkno;
fe_el->l_recs[0].e_clusters = fe->i_clusters;
- for(i = 1; i < fe_el->l_next_free_rec; i++) {
+ for(i = 1; i < le16_to_cpu(fe_el->l_next_free_rec); i++) {
fe_el->l_recs[i].e_cpos = 0;
fe_el->l_recs[i].e_clusters = 0;
fe_el->l_recs[i].e_blkno = 0;
}
- fe_el->l_next_free_rec = 1;
+ fe_el->l_next_free_rec = cpu_to_le16(1);
/* If this is our 1st tree depth shift, then last_eb_blk
* becomes the allocated extent block */
- if (fe_el->l_tree_depth == 1)
+ if (fe_el->l_tree_depth == cpu_to_le16(1))
fe->i_last_eb_blk = eb->h_blkno;
status = ocfs2_journal_dirty(handle, fe_bh);
@@ -512,6 +513,7 @@
{
int status, i, num_bhs = 0;
u64 next_blkno;
+ u16 next_free;
struct buffer_head **eb_bhs = NULL;
ocfs2_dinode *fe;
ocfs2_extent_block *eb;
@@ -535,7 +537,7 @@
* forward. As a result, we have to record the buffers
* for this part of the tree in an array and reserve a
* journal write to them before making any changes. */
- num_bhs = fe->id2.i_list.l_tree_depth;
+ num_bhs = le16_to_cpu(fe->id2.i_list.l_tree_depth);
eb_bhs = kcalloc(num_bhs, sizeof(struct buffer_head *),
GFP_KERNEL);
if (!eb_bhs) {
@@ -546,8 +548,9 @@
i = 0;
while(el->l_tree_depth) {
- OCFS2_BUG_ON_RO(!el->l_next_free_rec);
- next_blkno = el->l_recs[el->l_next_free_rec-1].e_blkno;
+ next_free = le16_to_cpu(el->l_next_free_rec);
+ OCFS2_BUG_ON_RO(next_free == 0);
+ next_blkno = le64_to_cpu(el->l_recs[next_free - 1].e_blkno);
BUG_ON(i >= num_bhs);
status = ocfs2_read_block(osb, next_blkno, &eb_bhs[i],
@@ -578,8 +581,10 @@
* trivial, and we want to switch el out for the
* bottom-most leaf in order to update it with the
* actual extent data below. */
- OCFS2_BUG_ON_RO(!el->l_next_free_rec);
- el->l_recs[el->l_next_free_rec - 1].e_clusters += new_clusters;
+ next_free = le16_to_cpu(el->l_next_free_rec);
+ OCFS2_BUG_ON_RO(next_free == 0);
+ le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,
+ new_clusters);
/* (num_bhs - 1) to avoid the leaf */
for(i = 0; i < (num_bhs - 1); i++) {
eb = (ocfs2_extent_block *) eb_bhs[i]->b_data;
@@ -587,8 +592,9 @@
/* finally, make our actual change to the
* intermediate extent blocks. */
- el->l_recs[el->l_next_free_rec - 1].e_clusters
- += new_clusters;
+ next_free = le16_to_cpu(el->l_next_free_rec);
+ le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,
+ new_clusters);
status = ocfs2_journal_dirty(handle, eb_bhs[i]);
if (status < 0)
@@ -603,27 +609,28 @@
}
/* yay, we can finally add the actual extent now! */
- i = el->l_next_free_rec - 1;
- if (el->l_next_free_rec && ocfs2_extent_contig(inode,
- &el->l_recs[i],
- start_blk)) {
- el->l_recs[i].e_clusters += new_clusters;
- } else if (el->l_next_free_rec && !el->l_recs[i].e_clusters) {
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+ if (le16_to_cpu(el->l_next_free_rec) &&
+ ocfs2_extent_contig(inode, &el->l_recs[i], start_blk)) {
+ le32_add_cpu(&el->l_recs[i].e_clusters, new_clusters);
+ } else if (le16_to_cpu(el->l_next_free_rec) &&
+ (le32_to_cpu(el->l_recs[i].e_clusters) == 0)) {
/* having an empty extent at eof is legal. */
OCFS2_BUG_ON_RO(el->l_recs[i].e_cpos != fe->i_clusters);
- el->l_recs[i].e_blkno = start_blk;
- el->l_recs[i].e_clusters = new_clusters;
+ el->l_recs[i].e_blkno = cpu_to_le64(start_blk);
+ el->l_recs[i].e_clusters = cpu_to_le32(new_clusters);
} else {
/* No contiguous record, or no empty record at eof, so
* we add a new one. */
- BUG_ON(el->l_next_free_rec >= el->l_count);
- i = el->l_next_free_rec;
+ BUG_ON(le16_to_cpu(el->l_next_free_rec) >=
+ le16_to_cpu(el->l_count));
+ i = le16_to_cpu(el->l_next_free_rec);
- el->l_recs[i].e_blkno = start_blk;
- el->l_recs[i].e_clusters = new_clusters;
+ el->l_recs[i].e_blkno = cpu_to_le64(start_blk);
+ el->l_recs[i].e_clusters = cpu_to_le32(new_clusters);
el->l_recs[i].e_cpos = fe->i_clusters;
- el->l_next_free_rec++;
+ le16_add_cpu(&el->l_next_free_rec, 1);
}
/*
@@ -634,7 +641,7 @@
new_clusters);
if (status) {
mlog_errno(status);
- ocfs2_extent_map_drop(inode, fe->i_clusters);
+ ocfs2_extent_map_drop(inode, le32_to_cpu(fe->i_clusters));
}
status = ocfs2_journal_dirty(handle, fe_bh);
@@ -696,10 +703,10 @@
fe = (ocfs2_dinode *) fe_bh->b_data;
el = &fe->id2.i_list;
- while(el->l_tree_depth > 1) {
- OCFS2_BUG_ON_RO(!el->l_next_free_rec);
- i = el->l_next_free_rec - 1;
- blkno = el->l_recs[i].e_blkno;
+ while(le16_to_cpu(el->l_tree_depth) > 1) {
+ OCFS2_BUG_ON_RO(le16_to_cpu(el->l_next_free_rec) == 0);
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+ blkno = le64_to_cpu(el->l_recs[i].e_blkno);
OCFS2_BUG_ON_RO(!blkno);
if (bh) {
@@ -718,7 +725,8 @@
OCFS2_BUG_ON_INVALID_EXTENT_BLOCK(eb);
el = &eb->h_list;
- if (el->l_next_free_rec < el->l_count) {
+ if (le16_to_cpu(el->l_next_free_rec) <
+ le16_to_cpu(el->l_count)) {
if (lowest_bh)
brelse(lowest_bh);
lowest_bh = bh;
@@ -768,8 +776,8 @@
if (el->l_tree_depth) {
/* jump to end of tree */
- status = ocfs2_read_block(osb, fe->i_last_eb_blk, &last_eb_bh,
- OCFS2_BH_CACHED, inode);
+ status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
+ &last_eb_bh, OCFS2_BH_CACHED, inode);
if (status < 0) {
mlog_exit(status);
goto bail;
@@ -779,10 +787,10 @@
}
/* Can we allocate without adding/shifting tree bits? */
- i = el->l_next_free_rec - 1;
- if (!el->l_next_free_rec
- || (el->l_next_free_rec < el->l_count)
- || !el->l_recs[i].e_clusters
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+ if (le16_to_cpu(el->l_next_free_rec) == 0
+ || (le16_to_cpu(el->l_next_free_rec) < le16_to_cpu(el->l_count))
+ || le32_to_cpu(el->l_recs[i].e_clusters) == 0
|| ocfs2_extent_contig(inode, &el->l_recs[i], start_blk))
goto out_add;
@@ -801,10 +809,12 @@
* another tree level */
if (shift) {
/* if we hit a leaf, we'd better be empty :) */
- BUG_ON(el->l_next_free_rec != el->l_count);
+ BUG_ON(le16_to_cpu(el->l_next_free_rec) !=
+ le16_to_cpu(el->l_count));
BUG_ON(bh);
mlog(0, "ocfs2_allocate_extent: need to shift tree depth "
- "(current = %u)\n", fe->id2.i_list.l_tree_depth);
+ "(current = %u)\n",
+ le16_to_cpu(fe->id2.i_list.l_tree_depth));
/* ocfs2_shift_tree_depth will return us a buffer with
* the new extent block (so we can pass that to
@@ -817,7 +827,7 @@
}
/* Special case: we have room now if we shifted from
* tree_depth 0 */
- if (fe->id2.i_list.l_tree_depth == 1)
+ if (fe->id2.i_list.l_tree_depth == cpu_to_le16(1))
goto out_add;
}
@@ -1055,7 +1065,7 @@
tl = &di->id2.i_dealloc;
OCFS2_BUG_ON_INVALID_DINODE(di);
- num_to_flush = le32_to_cpu(tl->tl_used);
+ num_to_flush = le16_to_cpu(tl->tl_used);
mlog(0, "Flush %u records from truncate log #%"MLFu64"\n",
num_to_flush, OCFS2_I(tl_inode)->ip_blkno);
if (!num_to_flush) {
@@ -1072,7 +1082,7 @@
data_alloc_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
- -1);
+ OCFS2_INVALID_SLOT);
if (!data_alloc_inode) {
status = -EINVAL;
mlog(ML_ERROR, "Could not get bitmap inode!\n");
@@ -1270,7 +1280,7 @@
mlog_entry_void();
- if (OCFS2_I(tl_inode)->ip_blkno == tl_copy->i_blkno) {
+ if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
return -EINVAL;
}
@@ -1373,7 +1383,7 @@
static int ocfs2_find_new_last_ext_blk(ocfs2_super *osb,
struct inode *inode,
ocfs2_dinode *fe,
- unsigned int new_i_clusters,
+ u32 new_i_clusters,
struct buffer_head *old_last_eb,
struct buffer_head **new_last_eb)
{
@@ -1402,7 +1412,7 @@
/* Make sure that this guy will actually be empty after we
* clear away the data. */
- if (el->l_recs[0].e_cpos < new_i_clusters)
+ if (le32_to_cpu(el->l_recs[0].e_cpos) < new_i_clusters)
goto bail;
/* Ok, at this point, we know that last_eb will definitely
@@ -1411,9 +1421,10 @@
el = &(fe->id2.i_list);
/* go down the tree, */
do {
- for(i = (el->l_next_free_rec - 1); i >= 0; i--) {
- if (el->l_recs[i].e_cpos < new_i_clusters) {
- block = el->l_recs[i].e_blkno;
+ for(i = (le16_to_cpu(el->l_next_free_rec) - 1); i >= 0; i--) {
+ if (le32_to_cpu(el->l_recs[i].e_cpos) <
+ new_i_clusters) {
+ block = le64_to_cpu(el->l_recs[i].e_blkno);
break;
}
}
@@ -1468,7 +1479,8 @@
status = ocfs2_find_new_last_ext_blk(osb,
inode,
fe,
- fe->i_clusters - clusters_to_del,
+ le32_to_cpu(fe->i_clusters) -
+ clusters_to_del,
old_last_eb_bh,
&last_eb_bh);
if (status < 0) {
@@ -1487,25 +1499,26 @@
el = &(fe->id2.i_list);
spin_lock(&OCFS2_I(inode)->ip_lock);
- OCFS2_I(inode)->ip_clusters = fe->i_clusters - clusters_to_del;
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
+ clusters_to_del;
spin_unlock(&OCFS2_I(inode)->ip_lock);
- fe->i_clusters -= clusters_to_del;
- fe->i_mtime = CURRENT_TIME.tv_sec;
+ le32_add_cpu(&fe->i_clusters, -clusters_to_del);
+ fe->i_mtime = cpu_to_le64(CURRENT_TIME.tv_sec);
fe->i_mtime_nsec = cpu_to_le32(CURRENT_TIME.tv_nsec);
- i = el->l_next_free_rec - 1;
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
- BUG_ON(el->l_recs[i].e_clusters < clusters_to_del);
- el->l_recs[i].e_clusters -= clusters_to_del;
+ BUG_ON(le32_to_cpu(el->l_recs[i].e_clusters) < clusters_to_del);
+ le32_add_cpu(&el->l_recs[i].e_clusters, -clusters_to_del);
/* tree depth zero, we can just delete the clusters, otherwise
* we need to record the offset of the next level extent block
* as we may overwrite it. */
if (!el->l_tree_depth)
- delete_blk = el->l_recs[i].e_blkno
+ delete_blk = le64_to_cpu(el->l_recs[i].e_blkno)
+ ocfs2_clusters_to_blocks(osb->sb,
- el->l_recs[i].e_clusters);
+ le32_to_cpu(el->l_recs[i].e_clusters));
else
- next_eb = el->l_recs[i].e_blkno;
+ next_eb = le64_to_cpu(el->l_recs[i].e_blkno);
if (!el->l_recs[i].e_clusters) {
/* if we deleted the whole extent record, then clear
@@ -1515,10 +1528,10 @@
el->l_recs[i].e_cpos = 0;
el->l_recs[i].e_blkno = 0;
BUG_ON(!el->l_next_free_rec);
- el->l_next_free_rec--;
+ le16_add_cpu(&el->l_next_free_rec, -1);
}
- depth = el->l_tree_depth;
+ depth = le16_to_cpu(el->l_tree_depth);
if (!fe->i_clusters) {
/* trunc to zero is a special case. */
el->l_tree_depth = 0;
@@ -1571,37 +1584,41 @@
goto bail;
}
- BUG_ON(!el->l_next_free_rec);
- BUG_ON(depth != (el->l_tree_depth + 1));
+ BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
+ BUG_ON(depth != (le16_to_cpu(el->l_tree_depth) + 1));
- i = el->l_next_free_rec - 1;
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
mlog(0, "extent block %"MLFu64", before: record %d: "
"(%u, %u, %"MLFu64"), next = %u\n",
le64_to_cpu(eb->h_blkno), i,
- el->l_recs[i].e_cpos, el->l_recs[i].e_clusters,
- el->l_recs[i].e_blkno, el->l_next_free_rec);
+ le32_to_cpu(el->l_recs[i].e_cpos),
+ le32_to_cpu(el->l_recs[i].e_clusters),
+ le64_to_cpu(el->l_recs[i].e_blkno),
+ le16_to_cpu(el->l_next_free_rec));
- BUG_ON(el->l_recs[i].e_clusters < clusters_to_del);
- el->l_recs[i].e_clusters -= clusters_to_del;
+ BUG_ON(le32_to_cpu(el->l_recs[i].e_clusters) < clusters_to_del);
+ le32_add_cpu(&el->l_recs[i].e_clusters, -clusters_to_del);
- next_eb = el->l_recs[i].e_blkno;
+ next_eb = le64_to_cpu(el->l_recs[i].e_blkno);
/* bottom-most block requires us to delete data.*/
if (!el->l_tree_depth)
- delete_blk = el->l_recs[i].e_blkno
+ delete_blk = le64_to_cpu(el->l_recs[i].e_blkno)
+ ocfs2_clusters_to_blocks(osb->sb,
- el->l_recs[i].e_clusters);
+ le32_to_cpu(el->l_recs[i].e_clusters));
if (!el->l_recs[i].e_clusters) {
el->l_recs[i].e_cpos = 0;
el->l_recs[i].e_blkno = 0;
BUG_ON(!el->l_next_free_rec);
- el->l_next_free_rec--;
+ le16_add_cpu(&el->l_next_free_rec, -1);
}
mlog(0, "extent block %"MLFu64", after: record %d: "
"(%u, %u, %"MLFu64"), next = %u\n",
le64_to_cpu(eb->h_blkno), i,
- el->l_recs[i].e_cpos, el->l_recs[i].e_clusters,
- el->l_recs[i].e_blkno, el->l_next_free_rec);
+ le32_to_cpu(el->l_recs[i].e_cpos),
+ le32_to_cpu(el->l_recs[i].e_clusters),
+ le64_to_cpu(el->l_recs[i].e_blkno),
+ le16_to_cpu(el->l_next_free_rec));
status = ocfs2_journal_dirty(handle, eb_bh);
if (status < 0) {
@@ -1642,7 +1659,7 @@
status = 0;
bail:
if (!status)
- ocfs2_extent_map_trunc(inode, fe->i_clusters);
+ ocfs2_extent_map_trunc(inode, le32_to_cpu(fe->i_clusters));
else
ocfs2_extent_map_drop(inode, 0);
mlog_exit(status);
@@ -1661,7 +1678,7 @@
struct ocfs2_truncate_context *tc)
{
int status, i, credits, tl_sem = 0;
- unsigned int clusters_to_del, target_i_clusters;
+ u32 clusters_to_del, target_i_clusters;
u64 last_eb = 0;
ocfs2_dinode *fe;
ocfs2_extent_block *eb;
@@ -1690,18 +1707,19 @@
el = &eb->h_list;
} else
el = &fe->id2.i_list;
- last_eb = fe->i_last_eb_blk;
+ last_eb = le64_to_cpu(fe->i_last_eb_blk);
start:
mlog(0, "ocfs2_commit_truncate: fe->i_clusters = %u, "
"last_eb = %"MLFu64", fe->i_last_eb_blk = %"MLFu64", "
"fe->id2.i_list.l_tree_depth = %u last_eb_bh = %p\n",
- fe->i_clusters, last_eb, fe->i_last_eb_blk,
- fe->id2.i_list.l_tree_depth, last_eb_bh);
+ le32_to_cpu(fe->i_clusters), last_eb,
+ le64_to_cpu(fe->i_last_eb_blk),
+ le16_to_cpu(fe->id2.i_list.l_tree_depth), last_eb_bh);
- if (last_eb != fe->i_last_eb_blk) {
+ if (last_eb != le64_to_cpu(fe->i_last_eb_blk)) {
mlog(0, "last_eb changed!\n");
BUG_ON(!fe->id2.i_list.l_tree_depth);
- last_eb = fe->i_last_eb_blk;
+ last_eb = le64_to_cpu(fe->i_last_eb_blk);
/* i_last_eb_blk may have changed, read it if
* necessary. We don't have to worry about the
* truncate to zero case here (where there becomes no
@@ -1726,12 +1744,13 @@
/* by now, el will point to the extent list on the bottom most
* portion of this tree. */
- i = el->l_next_free_rec - 1;
- if (el->l_recs[i].e_cpos >= target_i_clusters)
- clusters_to_del = el->l_recs[i].e_clusters;
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+ if (le32_to_cpu(el->l_recs[i].e_cpos) >= target_i_clusters)
+ clusters_to_del = le32_to_cpu(el->l_recs[i].e_clusters);
else
- clusters_to_del = (el->l_recs[i].e_clusters
- + el->l_recs[i].e_cpos) - target_i_clusters;
+ clusters_to_del = (le32_to_cpu(el->l_recs[i].e_clusters) +
+ le32_to_cpu(el->l_recs[i].e_cpos)) -
+ target_i_clusters;
mlog(0, "clusters_to_del = %u in this pass\n", clusters_to_del);
@@ -1783,8 +1802,8 @@
ocfs2_commit_trans(handle);
handle = NULL;
- BUG_ON(fe->i_clusters < target_i_clusters);
- if (fe->i_clusters > target_i_clusters)
+ BUG_ON(le32_to_cpu(fe->i_clusters) < target_i_clusters);
+ if (le32_to_cpu(fe->i_clusters) > target_i_clusters)
goto start;
bail:
up_write(&OCFS2_I(inode)->ip_alloc_sem);
@@ -1838,7 +1857,7 @@
mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
"%"MLFu64"\n", fe->i_clusters, new_i_clusters, fe->i_size);
- BUG_ON(fe->i_clusters <= new_i_clusters);
+ BUG_ON(le32_to_cpu(fe->i_clusters) <= new_i_clusters);
*tc = kcalloc(1, sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
if (!(*tc)) {
@@ -1852,7 +1871,7 @@
/* If we have a tree, then the truncate may result in
* metadata deletes. Figure this out from the
* rightmost leaf block.*/
- status = ocfs2_read_block(osb, fe->i_last_eb_blk,
+ status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
&last_eb_bh, OCFS2_BH_CACHED, inode);
if (status < 0) {
mlog_errno(status);
@@ -1861,7 +1880,7 @@
eb = (ocfs2_extent_block *) last_eb_bh->b_data;
OCFS2_BUG_ON_INVALID_EXTENT_BLOCK(eb);
el = &(eb->h_list);
- if (el->l_recs[0].e_cpos >= new_i_clusters)
+ if (le32_to_cpu(el->l_recs[0].e_cpos) >= new_i_clusters)
metadata_delete = 1;
}
Modified: trunk/fs/ocfs2/alloc.h
===================================================================
--- trunk/fs/ocfs2/alloc.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/alloc.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -48,7 +48,7 @@
* new tree_depth==0 extent_block, and one block at the new
* top-of-the tree.
*/
- return fe->id2.i_list.l_tree_depth + 2;
+ return le16_to_cpu(fe->id2.i_list.l_tree_depth) + 2;
}
int ocfs2_truncate_log_init(ocfs2_super *osb);
Modified: trunk/fs/ocfs2/aops.c
===================================================================
--- trunk/fs/ocfs2/aops.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/aops.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -78,7 +78,7 @@
}
if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
- fe->i_clusters)) {
+ le32_to_cpu(fe->i_clusters))) {
mlog(ML_ERROR, "block offset is outside the allocated size: "
"%llu\n", (unsigned long long)iblock);
goto bail;
@@ -87,8 +87,9 @@
/* We don't use the page cache to create symlink data, so if
* need be, copy it over from the buffer cache. */
if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
- buffer_cache_bh = sb_getblk(osb->sb,
- fe->id2.i_list.l_recs[0].e_blkno + iblock);
+ u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
+ iblock;
+ buffer_cache_bh = sb_getblk(osb->sb, blkno);
if (!buffer_cache_bh) {
mlog(ML_ERROR, "couldn't getblock for symlink!\n");
goto bail;
@@ -115,7 +116,7 @@
}
map_bh(bh_result, inode->i_sb,
- fe->id2.i_list.l_recs[0].e_blkno + iblock);
+ le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
err = 0;
Modified: trunk/fs/ocfs2/cluster/Makefile
===================================================================
--- trunk/fs/ocfs2/cluster/Makefile 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/Makefile 2005-08-12 23:35:01 UTC (rev 2514)
@@ -33,6 +33,7 @@
ver.c
HEADERS = \
+ endian.h \
heartbeat.h \
masklog.h \
nodemanager.h \
Copied: trunk/fs/ocfs2/cluster/endian.h (from rev 2512, branches/endian-safe/fs/ocfs2/cluster/endian.h)
Modified: trunk/fs/ocfs2/cluster/heartbeat.c
===================================================================
--- trunk/fs/ocfs2/cluster/heartbeat.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/heartbeat.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -412,7 +412,8 @@
static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg,
struct o2hb_disk_heartbeat_block *hb_block)
{
- u32 old_cksum, ret;
+ __le32 old_cksum;
+ u32 ret;
/* We want to compute the block crc with a 0 value in the
* hb_cksum field. Save it off here and replace after the
@@ -439,12 +440,12 @@
static int o2hb_verify_crc(struct o2hb_region *reg,
struct o2hb_disk_heartbeat_block *hb_block)
{
- u32 computed, read;
+ u32 read, computed;
read = le32_to_cpu(hb_block->hb_cksum);
computed = o2hb_compute_block_crc_le(reg, hb_block);
- return read == le32_to_cpu(computed);
+ return read == computed;
}
/* We want to make sure that nobody is heartbeating on top of us --
@@ -493,7 +494,8 @@
hb_block->hb_generation = cpu_to_le64(generation);
/* This step must always happen last! */
- hb_block->hb_cksum = o2hb_compute_block_crc_le(reg, hb_block);
+ hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg,
+ hb_block));
mlog(ML_HB_BIO, "our node generation = 0x%"MLFx64", cksum = 0x%x\n",
cpu_to_le64(generation), le32_to_cpu(hb_block->hb_cksum));
Modified: trunk/fs/ocfs2/cluster/masklog.h
===================================================================
--- trunk/fs/ocfs2/cluster/masklog.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/masklog.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -212,7 +212,8 @@
/* We disable this for old compilers since they don't have support for
* __builtin_types_compatible_p.
*/
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
+#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) && \
+ !defined(__CHECKER__)
#define mlog_exit(st) do { \
if (__builtin_types_compatible_p(typeof(st), unsigned long)) \
mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \
Modified: trunk/fs/ocfs2/cluster/net_proc.c
===================================================================
--- trunk/fs/ocfs2/cluster/net_proc.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/net_proc.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -269,11 +269,14 @@
#endif
u32 saddr = 0, daddr = 0;
- if (sc->sc_sock)
+ if (sc->sc_sock) {
inet = inet_sk(sc->sc_sock->sk);
saddr = inet->saddr;
daddr = inet->daddr;
+ }
+ /* XXX sigh, inet-> doesn't have sparse annotation so any
+ * use of it here generates a warning with -Wbitwise */
seq_printf(seq, "%p:\n"
" krefs: %d\n"
" sock: %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n"
Modified: trunk/fs/ocfs2/cluster/nodemanager.c
===================================================================
--- trunk/fs/ocfs2/cluster/nodemanager.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/nodemanager.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -25,6 +25,7 @@
#include <linux/proc_fs.h>
#include <linux/configfs.h>
+#include "endian.h"
#include "tcp.h"
#include "nodemanager.h"
#include "heartbeat.h"
@@ -142,7 +143,7 @@
EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
- u32 ip_needle,
+ __be32 ip_needle,
struct rb_node ***ret_p,
struct rb_node **ret_parent)
{
@@ -154,9 +155,11 @@
parent = *p;
node = rb_entry(parent, struct o2nm_node, nd_ip_node);
- if (ip_needle < node->nd_ipv4_address)
+ if (memcmp(&ip_needle, &node->nd_ipv4_address,
+ sizeof(ip_needle)) < 0)
p = &(*p)->rb_left;
- else if (ip_needle > node->nd_ipv4_address)
+ else if (memcmp(&ip_needle, &node->nd_ipv4_address,
+ sizeof(ip_needle)) > 0)
p = &(*p)->rb_right;
else {
ret = node;
@@ -172,7 +175,7 @@
return ret;
}
-struct o2nm_node *o2nm_get_node_by_ip(u32 addr)
+struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
{
struct o2nm_node *node = NULL;
struct o2nm_cluster *cluster = o2nm_single_cluster;
@@ -328,7 +331,7 @@
int ret, i;
struct rb_node **p, *parent;
unsigned int octets[4];
- u32 ipv4_addr = 0; /* network order */
+ __be32 ipv4_addr = 0;
ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
&octets[1], &octets[0]);
@@ -338,9 +341,8 @@
for (i = 0; i < ARRAY_SIZE(octets); i++) {
if (octets[i] > 255)
return -ERANGE;
- ipv4_addr |= octets[i] << (i * 8);
+ be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
}
- ipv4_addr = htonl(ipv4_addr);
ret = 0;
write_lock(&cluster->cl_nodes_lock);
Modified: trunk/fs/ocfs2/cluster/nodemanager.h
===================================================================
--- trunk/fs/ocfs2/cluster/nodemanager.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/nodemanager.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -43,10 +43,9 @@
struct config_item nd_item;
char nd_name[O2NM_MAX_NAME_LEN+1]; /* replace? */
__u8 nd_num;
- /* only one address per node, as attributes, for now. both
- * in network order */
- __u32 nd_ipv4_address;
- __u16 nd_ipv4_port;
+ /* only one address per node, as attributes, for now. */
+ __be32 nd_ipv4_address;
+ __be16 nd_ipv4_port;
struct rb_node nd_ip_node;
/* there can be only one local node for now */
int nd_local;
@@ -58,7 +57,7 @@
int o2nm_configured_node_map(unsigned long *map, unsigned bytes);
struct o2nm_node *o2nm_get_node_by_num(u8 node_num);
-struct o2nm_node *o2nm_get_node_by_ip(u32 addr);
+struct o2nm_node *o2nm_get_node_by_ip(__be32 addr);
void o2nm_node_get(struct o2nm_node *node);
void o2nm_node_put(struct o2nm_node *node);
Modified: trunk/fs/ocfs2/cluster/ocfs2_heartbeat.h
===================================================================
--- trunk/fs/ocfs2/cluster/ocfs2_heartbeat.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/ocfs2_heartbeat.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -27,11 +27,11 @@
#define _OCFS2_HEARTBEAT_H
struct o2hb_disk_heartbeat_block {
- __u64 hb_seq;
+ __le64 hb_seq;
__u8 hb_node;
__u8 hb_pad1[3];
- __u32 hb_cksum;
- __u64 hb_generation;
+ __le32 hb_cksum;
+ __le64 hb_generation;
};
#endif /* _OCFS2_HEARTBEAT_H */
Modified: trunk/fs/ocfs2/cluster/quorum.c
===================================================================
--- trunk/fs/ocfs2/cluster/quorum.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/quorum.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -51,6 +51,7 @@
#include "nodemanager.h"
#define MLOG_MASK_PREFIX ML_QUORUM
#include "masklog.h"
+#include "quorum.h"
static struct o2quo_state {
spinlock_t qs_lock;
Modified: trunk/fs/ocfs2/cluster/tcp.c
===================================================================
--- trunk/fs/ocfs2/cluster/tcp.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/tcp.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -79,9 +79,11 @@
#define msglog(hdr, fmt, args...) do { \
typeof(hdr) __hdr = (hdr); \
mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
- "key %08x num %u] " fmt, __hdr->magic, __hdr->data_len, \
- __hdr->msg_type, __hdr->status, __hdr->sys_status, \
- __hdr->key, __hdr->msg_num , ##args); \
+ "key %08x num %u] " fmt, \
+ be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
+ be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
+ be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
+ be32_to_cpu(__hdr->msg_num) , ##args); \
} while (0)
#define sclog(sc, fmt, args...) do { \
@@ -156,28 +158,6 @@
return nn - o2net_nodes;
}
-static void o2net_msg_to_net(o2net_msg *m)
-{
- m->magic = htons(m->magic);
- m->data_len = htons(m->data_len);
- m->msg_type = htons(m->msg_type);
- m->sys_status = htonl(m->sys_status);
- m->status = htonl(m->status);
- m->key = htonl(m->key);
- m->msg_num = htonl(m->msg_num);
-}
-
-static void o2net_msg_to_host(o2net_msg *m)
-{
- m->magic = ntohs(m->magic);
- m->data_len = ntohs(m->data_len);
- m->msg_type = ntohs(m->msg_type);
- m->sys_status = ntohl(m->sys_status);
- m->status = ntohl(m->status);
- m->key = ntohl(m->key);
- m->msg_num = ntohl(m->msg_num);
-}
-
/* ------------------------------------------------------------ */
static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
@@ -854,12 +834,12 @@
static void o2net_init_msg(o2net_msg *msg, u16 data_len, u16 msg_type, u32 key)
{
memset(msg, 0, sizeof(o2net_msg));
- msg->magic = O2NET_MSG_MAGIC;
- msg->data_len = data_len;
- msg->msg_type = msg_type;
- msg->sys_status = O2NET_ERR_NONE;
+ msg->magic = cpu_to_be16(O2NET_MSG_MAGIC);
+ msg->data_len = cpu_to_be16(data_len);
+ msg->msg_type = cpu_to_be16(msg_type);
+ msg->sys_status = cpu_to_be32(O2NET_ERR_NONE);
msg->status = 0;
- msg->key = key;
+ msg->key = cpu_to_be32(key);
}
static int o2net_tx_can_proceed(struct o2net_node *nn,
@@ -967,15 +947,13 @@
if (ret)
goto out;
- msg->msg_num = nsw.ns_id;
+ msg->msg_num = cpu_to_be32(nsw.ns_id);
do_gettimeofday(&nst.st_send_time);
/* finally, convert the message header to network byte-order
* and send */
- o2net_msg_to_net(msg);
ret = o2net_send_tcp_msg(sc->sc_sock, iov, iovlen,
sizeof(o2net_msg) + caller_bytes);
- o2net_msg_to_host(msg); /* just swapping for printk, its unused now */
msglog(msg, "sending returned %d\n", ret);
if (ret < 0) {
mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret);
@@ -1032,14 +1010,13 @@
/* leave other fields intact from the incoming message, msg_num
* in particular */
- hdr->sys_status = syserr;
- hdr->status = err;
- hdr->magic = O2NET_MSG_STATUS_MAGIC; // twiddle the magic
+ hdr->sys_status = cpu_to_be32(syserr);
+ hdr->status = cpu_to_be32(err);
+ hdr->magic = cpu_to_be16(O2NET_MSG_STATUS_MAGIC); // twiddle the magic
hdr->data_len = 0;
msglog(hdr, "about to send status magic %d\n", err);
/* hdr has been in host byteorder this whole time */
- o2net_msg_to_net(hdr);
return o2net_send_tcp_msg(sock, &iov, 1, sizeof(o2net_msg));
}
@@ -1057,11 +1034,13 @@
o2net_sc_postpone_idle(sc);
- switch(hdr->magic) {
+ switch(be16_to_cpu(hdr->magic)) {
case O2NET_MSG_STATUS_MAGIC:
/* special type for returning message status */
- o2net_complete_nsw(nn, NULL, hdr->msg_num,
- hdr->sys_status, hdr->status);
+ o2net_complete_nsw(nn, NULL,
+ be32_to_cpu(hdr->msg_num),
+ be32_to_cpu(hdr->sys_status),
+ be32_to_cpu(hdr->status));
goto out;
case O2NET_MSG_KEEP_REQ_MAGIC:
o2net_sendpage(sc, o2net_keep_resp,
@@ -1080,23 +1059,25 @@
/* find a handler for it */
handler_status = 0;
- nmh = o2net_handler_get(hdr->msg_type, hdr->key);
+ nmh = o2net_handler_get(be16_to_cpu(hdr->msg_type),
+ be32_to_cpu(hdr->key));
if (!nmh) {
mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
- hdr->msg_type, hdr->key);
+ be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
syserr = O2NET_ERR_NO_HNDLR;
goto out_respond;
}
syserr = O2NET_ERR_NONE;
- if (hdr->data_len > nmh->nh_max_len)
+ if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
syserr = O2NET_ERR_OVERFLOW;
if (syserr != O2NET_ERR_NONE)
goto out_respond;
- handler_status = (nmh->nh_func)(hdr, sizeof(o2net_msg) + hdr->data_len,
+ handler_status = (nmh->nh_func)(hdr, sizeof(o2net_msg) +
+ be16_to_cpu(hdr->data_len),
nmh->nh_func_data);
out_respond:
@@ -1118,16 +1099,13 @@
struct o2net_handshake *hand = page_address(sc->sc_page);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
- hand->protocol_version = be64_to_cpu(hand->protocol_version);
- hand->connector_id = be64_to_cpu(hand->connector_id);
-
- if (hand->protocol_version != O2NET_PROTOCOL_VERSION) {
+ if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
mlog(ML_NOTICE, "node %s at %u.%u.%u.%u:%u advertised "
"net protocol version %llu but %llu is required,"
"disconnecting\n", sc->sc_node->nd_name,
NIPQUAD(sc->sc_node->nd_ipv4_address),
ntohs(sc->sc_node->nd_ipv4_port),
- (unsigned long long)hand->protocol_version,
+ (unsigned long long)be64_to_cpu(hand->protocol_version),
O2NET_PROTOCOL_VERSION);
/* don't bother reconnecting if its the wrong version. */
@@ -1187,8 +1165,8 @@
* being under to over */
if (sc->sc_page_off == sizeof(o2net_msg)) {
hdr = page_address(sc->sc_page);
- o2net_msg_to_host(hdr);
- if (hdr->data_len > O2NET_MAX_PAYLOAD_BYTES)
+ if (be16_to_cpu(hdr->data_len) >
+ O2NET_MAX_PAYLOAD_BYTES)
ret = -EOVERFLOW;
}
}
@@ -1207,10 +1185,10 @@
msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
/* do we need more payload? */
- if (sc->sc_page_off - sizeof(o2net_msg) < hdr->data_len) {
+ if (sc->sc_page_off - sizeof(o2net_msg) < be16_to_cpu(hdr->data_len)) {
/* need more payload */
data = page_address(sc->sc_page) + sc->sc_page_off;
- datalen = (sizeof(o2net_msg) + hdr->data_len) -
+ datalen = (sizeof(o2net_msg) + be16_to_cpu(hdr->data_len)) -
sc->sc_page_off;
ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
if (ret > 0)
@@ -1219,7 +1197,7 @@
goto out;
}
- if (sc->sc_page_off - sizeof(o2net_msg) == hdr->data_len) {
+ if (sc->sc_page_off - sizeof(o2net_msg) == be16_to_cpu(hdr->data_len)) {
/* we can only get here once, the first time we read
* the payload.. so set ret to progress if the handler
* works out. after calling this the message is toast */
@@ -1291,7 +1269,7 @@
mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
(unsigned long long)O2NET_PROTOCOL_VERSION,
- (unsigned long long)o2net_hand->connector_id);
+ (unsigned long long)be64_to_cpu(o2net_hand->connector_id));
o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
sc_put(sc);
@@ -1703,7 +1681,7 @@
ready(sk, bytes);
}
-static int o2net_open_listening_sock(u16 port)
+static int o2net_open_listening_sock(__be16 port)
{
struct socket *sock = NULL;
int ret;
Modified: trunk/fs/ocfs2/cluster/tcp.h
===================================================================
--- trunk/fs/ocfs2/cluster/tcp.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/tcp.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -39,14 +39,14 @@
typedef struct _o2net_msg
{
- __u16 magic;
- __u16 data_len;
- __u16 msg_type;
- __u16 pad1;
- __u32 sys_status;
- __s32 status;
- __u32 key;
- __u32 msg_num;
+ __be16 magic;
+ __be16 data_len;
+ __be16 msg_type;
+ __be16 pad1;
+ __be32 sys_status;
+ __be32 status;
+ __be32 key;
+ __be32 msg_num;
__u8 buf[0];
} o2net_msg;
Modified: trunk/fs/ocfs2/cluster/tcp_internal.h
===================================================================
--- trunk/fs/ocfs2/cluster/tcp_internal.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/cluster/tcp_internal.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -42,8 +42,8 @@
#define O2NET_PROTOCOL_VERSION 1ULL
struct o2net_handshake {
- u64 protocol_version;
- u64 connector_id;
+ __be64 protocol_version;
+ __be64 connector_id;
};
struct o2net_node {
Modified: trunk/fs/ocfs2/dir.c
===================================================================
--- trunk/fs/ocfs2/dir.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dir.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -475,7 +475,7 @@
memset(new_bh->b_data, 0, sb->s_blocksize);
de = (struct ocfs2_dir_entry *) new_bh->b_data;
de->inode = 0;
- de->rec_len = le16_to_cpu(sb->s_blocksize);
+ de->rec_len = cpu_to_le16(sb->s_blocksize);
status = ocfs2_journal_dirty(handle, new_bh);
if (status < 0) {
mlog_errno(status);
@@ -535,7 +535,7 @@
BUG_ON(!S_ISDIR(dir->i_mode));
fe = (ocfs2_dinode *) parent_fe_bh->b_data;
- BUG_ON(fe->i_size != i_size_read(dir));
+ BUG_ON(le64_to_cpu(fe->i_size) != i_size_read(dir));
sb = dir->i_sb;
Modified: trunk/fs/ocfs2/dlm/dlmast.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmast.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlm/dlmast.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -43,6 +43,7 @@
#include "cluster/heartbeat.h"
#include "cluster/nodemanager.h"
#include "cluster/tcp.h"
+#include "cluster/endian.h"
#include "dlmapi.h"
#include "dlmcommon.h"
@@ -264,11 +265,10 @@
mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
"Domain %s not fully joined!\n", dlm->name);
- dlm_proxy_ast_to_host(past);
name = past->name;
locklen = past->namelen;
- cookie = past->cookie;
- flags = past->flags;
+ cookie = be64_to_cpu(past->cookie);
+ flags = be32_to_cpu(past->flags);
if (locklen > DLM_LOCKID_NAME_MAX) {
ret = DLM_IVBUFLEN;
@@ -327,7 +327,7 @@
lock = NULL;
list_for_each(iter, head) {
lock = list_entry (iter, struct dlm_lock, list);
- if (lock->ml.cookie == cookie)
+ if (be64_to_cpu(lock->ml.cookie) == cookie)
goto do_ast;
}
@@ -339,7 +339,7 @@
list_for_each(iter, head) {
lock = list_entry (iter, struct dlm_lock, list);
- if (lock->ml.cookie == cookie)
+ if (be64_to_cpu(lock->ml.cookie) == cookie)
goto do_ast;
}
@@ -419,13 +419,12 @@
iov[0].iov_base = &past;
if (flags & DLM_LKSB_GET_LVB) {
mlog(0, "returning requested LVB data\n");
- past.flags |= LKM_GET_LVB;
+ be32_add_cpu(&past.flags, LKM_GET_LVB);
iov[1].iov_len = DLM_LVB_LEN;
iov[1].iov_base = lock->lksb->lvb;
iovlen++;
}
- dlm_proxy_ast_to_net(&past);
ret = o2net_send_message_iov(DLM_PROXY_AST_MSG, dlm->key, iov, iovlen,
lock->ml.node, &status);
if (ret < 0)
Modified: trunk/fs/ocfs2/dlm/dlmcommon.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmcommon.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlm/dlmcommon.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -242,11 +242,11 @@
struct dlm_migratable_lock
{
- u64 cookie;
+ __be64 cookie;
/* these 3 are just padding for the in-memory structure, but
* list and flags are actually used when sent over the wire */
- u16 pad1;
+ __be16 pad1;
u8 list; // 0=granted, 1=converting, 2=blocked
u8 flags;
@@ -411,8 +411,8 @@
{
u8 node_idx;
u8 namelen;
- u16 pad1;
- u32 flags;
+ __be16 pad1;
+ __be32 flags;
u8 name[O2NM_MAX_NAME_LEN];
};
@@ -424,8 +424,8 @@
{
u8 node_idx;
u8 namelen;
- u16 pad1;
- u32 flags;
+ __be16 pad1;
+ __be32 flags;
u8 name[O2NM_MAX_NAME_LEN];
};
@@ -436,7 +436,7 @@
u8 new_master;
u8 namelen;
u8 pad1;
- u32 pad2;
+ __be32 pad2;
u8 name[O2NM_MAX_NAME_LEN];
};
@@ -446,7 +446,7 @@
u8 pad2;
u8 node_idx;
u8 namelen;
- u32 pad3;
+ __be32 pad3;
u8 name[O2NM_MAX_NAME_LEN];
};
@@ -492,8 +492,8 @@
u8 lockname_len;
u8 num_locks; // locks sent in this structure
u8 flags;
- u32 total_locks; // locks to be sent for this migration cookie
- u64 mig_cookie; // cookie for this lockres migration
+ __be32 total_locks; // locks to be sent for this migration cookie
+ __be64 mig_cookie; // cookie for this lockres migration
// or zero if not needed
// 16 bytes
u8 lockname[DLM_LOCKID_NAME_MAX];
@@ -514,9 +514,9 @@
struct dlm_create_lock
{
- u64 cookie;
+ __be64 cookie;
- u32 flags;
+ __be32 flags;
u8 pad1;
u8 node_idx;
s8 requested_type;
@@ -527,9 +527,9 @@
struct dlm_convert_lock
{
- u64 cookie;
+ __be64 cookie;
- u32 flags;
+ __be32 flags;
u8 pad1;
u8 node_idx;
s8 requested_type;
@@ -543,10 +543,10 @@
struct dlm_unlock_lock
{
- u64 cookie;
+ __be64 cookie;
- u32 flags;
- u16 pad1;
+ __be32 flags;
+ __be16 pad1;
u8 node_idx;
u8 namelen;
@@ -558,9 +558,9 @@
struct dlm_proxy_ast
{
- u64 cookie;
+ __be64 cookie;
- u32 flags;
+ __be32 flags;
u8 node_idx;
u8 type;
u8 blocked_type;
@@ -583,16 +583,16 @@
{
u8 node_idx;
u8 dead_node;
- u16 pad1;
- u32 pad2;
+ __be16 pad1;
+ __be32 pad2;
};
struct dlm_reco_data_done
{
u8 node_idx;
u8 dead_node;
- u16 pad1;
- u32 pad2;
+ __be16 pad1;
+ __be32 pad2;
/* unused for now */
/* eventually we can use this to attempt
@@ -604,8 +604,8 @@
{
u8 node_idx;
u8 dead_node;
- u16 pad1;
- u32 pad2;
+ __be16 pad1;
+ __be32 pad2;
};
@@ -643,197 +643,10 @@
{
u8 node_idx;
u8 dead_node;
- u16 pad1;
- u32 pad2;
+ __be16 pad1;
+ __be32 pad2;
};
-
-static inline void
-dlm_query_join_request_to_net(struct dlm_query_join_request *m)
-{
- /* do nothing */
-}
-static inline void
-dlm_query_join_request_to_host(struct dlm_query_join_request *m)
-{
- /* do nothing */
-}
-static inline void dlm_assert_joined_to_net(struct dlm_assert_joined *m)
-{
- /* do nothing */
-}
-static inline void dlm_assert_joined_to_host(struct dlm_assert_joined *m)
-{
- /* do nothing */
-}
-static inline void dlm_cancel_join_to_net(struct dlm_cancel_join *m)
-{
- /* do nothing */
-}
-static inline void dlm_cancel_join_to_host(struct dlm_cancel_join *m)
-{
- /* do nothing */
-}
-static inline void dlm_exit_domin_to_net(struct dlm_exit_domain *m)
-{
- /* do nothing */
-}
-static inline void dlm_exit_domain_to_host(struct dlm_exit_domain *m)
-{
- /* do nothing */
-}
-static inline void dlm_master_request_to_net(struct dlm_master_request *m)
-{
- m->flags = htonl(m->flags);
-}
-static inline void dlm_master_request_to_host(struct dlm_master_request *m)
-{
- m->flags = ntohl(m->flags);
-}
-
-static inline void dlm_assert_master_to_net(struct dlm_assert_master *m)
-{
- m->flags = htonl(m->flags);
-}
-static inline void dlm_assert_master_to_host(struct dlm_assert_master *m)
-{
- m->flags = ntohl(m->flags);
-}
-
-static inline void dlm_migrate_request_to_net(struct dlm_migrate_request *m)
-{
- /* do nothing */
-}
-static inline void dlm_migrate_request_to_host(struct dlm_migrate_request *m)
-{
- /* do nothing */
-}
-
-static inline void dlm_master_requery_to_net(struct dlm_master_requery *m)
-{
- /* do nothing */
-}
-static inline void dlm_master_requery_to_host(struct dlm_master_requery *m)
-{
- /* do nothing */
-}
-
-static inline void dlm_create_lock_to_net(struct dlm_create_lock *c)
-{
- c->cookie = cpu_to_be64(c->cookie);
- c->flags = htonl(c->flags);
-}
-static inline void dlm_create_lock_to_host(struct dlm_create_lock *c)
-{
- c->cookie = be64_to_cpu(c->cookie);
- c->flags = ntohl(c->flags);
-}
-
-static inline void dlm_convert_lock_to_net(struct dlm_convert_lock *c)
-{
- c->cookie = cpu_to_be64(c->cookie);
- c->flags = htonl(c->flags);
-}
-static inline void dlm_convert_lock_to_host(struct dlm_convert_lock *c)
-{
- c->cookie = be64_to_cpu(c->cookie);
- c->flags = ntohl(c->flags);
-}
-
-static inline void dlm_unlock_lock_to_net(struct dlm_unlock_lock *u)
-{
- u->cookie = cpu_to_be64(u->cookie);
- u->flags = htonl(u->flags);
-}
-static inline void dlm_unlock_lock_to_host(struct dlm_unlock_lock *u)
-{
- u->cookie = be64_to_cpu(u->cookie);
- u->flags = ntohl(u->flags);
-}
-
-static inline void dlm_proxy_ast_to_net(struct dlm_proxy_ast *a)
-{
- a->cookie = cpu_to_be64(a->cookie);
- a->flags = htonl(a->flags);
-}
-static inline void dlm_proxy_ast_to_host(struct dlm_proxy_ast *a)
-{
- a->cookie = be64_to_cpu(a->cookie);
- a->flags = ntohl(a->flags);
-}
-static inline void dlm_migratable_lock_to_net(struct dlm_migratable_lock *ml)
-{
- ml->cookie = cpu_to_be64(ml->cookie);
-}
-static inline void dlm_migratable_lock_to_host(struct dlm_migratable_lock *ml)
-{
- ml->cookie = be64_to_cpu(ml->cookie);
-}
-static inline void dlm_lock_request_to_net(struct dlm_lock_request *r)
-{
- /* do nothing */
-}
-static inline void dlm_lock_request_to_host(struct dlm_lock_request *r)
-{
- /* do nothing */
-}
-static inline void dlm_reco_data_done_to_net(struct dlm_reco_data_done *r)
-{
- /* do nothing */
-}
-static inline void dlm_reco_data_done_to_host(struct dlm_reco_data_done *r)
-{
- /* do nothing */
-}
-
-static inline void dlm_begin_reco_to_net(struct dlm_begin_reco *r)
-{
- /* do nothing */
-}
-static inline void dlm_begin_reco_to_host(struct dlm_begin_reco *r)
-{
- /* do nothing */
-}
-static inline void dlm_finalize_reco_to_net(struct dlm_finalize_reco *f)
-{
- /* do nothing */
-}
-static inline void dlm_finalize_reco_to_host(struct dlm_finalize_reco *f)
-{
- /* do nothing */
-}
-
-static inline void
-dlm_migratable_lockres_to_net(struct dlm_migratable_lockres *mr)
-{
- int i, nr = mr->total_locks;
-
- BUG_ON(nr < 0);
- BUG_ON(nr > DLM_MAX_MIGRATABLE_LOCKS);
-
- mr->total_locks = htonl(mr->total_locks);
- mr->mig_cookie = cpu_to_be64(mr->mig_cookie);
-
- for (i=0; i<nr; i++)
- dlm_migratable_lock_to_net(&(mr->ml[i]));
-}
-
-static inline void
-dlm_migratable_lockres_to_host(struct dlm_migratable_lockres *mr)
-{
- int i, nr;
-
- mr->total_locks = ntohl(mr->total_locks);
- mr->mig_cookie = be64_to_cpu(mr->mig_cookie);
-
- nr = mr->total_locks;
- BUG_ON(nr < 0);
- BUG_ON(nr > DLM_MAX_MIGRATABLE_LOCKS);
-
- for (i=0; i<nr; i++)
- dlm_migratable_lock_to_host(&(mr->ml[i]));
-}
-
static inline enum dlm_status
__dlm_lockres_state_to_status(struct dlm_lock_resource *res)
{
Modified: trunk/fs/ocfs2/dlm/dlmconvert.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmconvert.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlm/dlmconvert.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -357,7 +357,7 @@
convert.requested_type = type;
convert.cookie = lock->ml.cookie;
convert.namelen = res->lockname.len;
- convert.flags = flags;
+ convert.flags = cpu_to_be32(flags);
memcpy(convert.name, res->lockname.name, convert.namelen);
iov[0].iov_len = sizeof(struct dlm_convert_lock);
@@ -370,7 +370,6 @@
iovlen++;
}
- dlm_convert_lock_to_net(&convert);
tmpret = o2net_send_message_iov(DLM_CONVERT_LOCK_MSG, dlm->key,
iov, iovlen, res->owner, &status);
if (tmpret >= 0) {
@@ -430,15 +429,13 @@
mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
"Domain %s not fully joined!\n", dlm->name);
- dlm_convert_lock_to_host(cnv);
-
if (cnv->namelen > DLM_LOCKID_NAME_MAX) {
status = DLM_IVBUFLEN;
dlm_error(status);
goto leave;
}
- flags = cnv->flags;
+ flags = be32_to_cpu(cnv->flags);
if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
(LKM_PUT_LVB|LKM_GET_LVB)) {
Modified: trunk/fs/ocfs2/dlm/dlmdomain.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdomain.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlm/dlmdomain.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -396,8 +396,6 @@
if (!dlm_grab(dlm))
return 0;
- dlm_exit_domain_to_host(exit_msg);
-
node = exit_msg->node_idx;
mlog(0, "Node %u leaves domain %s\n", node, dlm->name);
@@ -424,8 +422,6 @@
memset(&leave_msg, 0, sizeof(leave_msg));
leave_msg.node_idx = dlm->node_num;
- dlm_exit_domin_to_net(&leave_msg);
-
status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key,
&leave_msg, sizeof(leave_msg), node,
NULL);
@@ -554,7 +550,6 @@
struct dlm_ctxt *dlm = NULL;
query = (struct dlm_query_join_request *) msg->buf;
- dlm_query_join_request_to_host(query);
mlog(0, "node %u wants to join domain %s\n", query->node_idx,
query->domain);
@@ -615,7 +610,6 @@
struct dlm_ctxt *dlm = NULL;
assert = (struct dlm_assert_joined *) msg->buf;
- dlm_assert_joined_to_host(assert);
mlog(0, "node %u asserts join on domain %s\n", assert->node_idx,
assert->domain);
@@ -648,7 +642,6 @@
struct dlm_ctxt *dlm = NULL;
cancel = (struct dlm_cancel_join *) msg->buf;
- dlm_cancel_join_to_host(cancel);
mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx,
cancel->domain);
@@ -682,8 +675,6 @@
cancel_msg.name_len = strlen(dlm->name);
memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
- dlm_cancel_join_to_net(&cancel_msg);
-
status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
&cancel_msg, sizeof(cancel_msg), node,
NULL);
@@ -747,8 +738,6 @@
join_msg.name_len = strlen(dlm->name);
memcpy(join_msg.domain, dlm->name, join_msg.name_len);
- dlm_query_join_request_to_net(&join_msg);
-
status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
sizeof(join_msg), node, &retval);
if (status < 0 && status != -ENOPROTOOPT) {
@@ -793,8 +782,6 @@
assert_msg.name_len = strlen(dlm->name);
memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
- dlm_assert_joined_to_net(&assert_msg);
-
status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
&assert_msg, sizeof(assert_msg), node,
NULL);
Modified: trunk/fs/ocfs2/dlm/dlmlock.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmlock.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlm/dlmlock.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -250,10 +250,9 @@
create.requested_type = lock->ml.type;
create.cookie = lock->ml.cookie;
create.namelen = res->lockname.len;
- create.flags = flags;
+ create.flags = cpu_to_be32(flags);
memcpy(create.name, res->lockname.name, create.namelen);
- dlm_create_lock_to_net(&create);
tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
sizeof(create), res->owner, &status);
if (tmpret >= 0) {
@@ -342,7 +341,7 @@
newlock->ast = NULL;
newlock->bast = NULL;
newlock->astdata = NULL;
- newlock->ml.cookie = cookie;
+ newlock->ml.cookie = cpu_to_be64(cookie);
newlock->ast_pending = 0;
newlock->bast_pending = 0;
newlock->convert_pending = 0;
@@ -410,7 +409,6 @@
mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
"Domain %s not fully joined!\n", dlm->name);
- dlm_create_lock_to_host(create);
name = create->name;
namelen = create->namelen;
@@ -423,7 +421,7 @@
status = DLM_SYSERR;
newlock = dlm_new_lock(create->requested_type,
create->node_idx,
- create->cookie, NULL);
+ be64_to_cpu(create->cookie), NULL);
if (!newlock) {
dlm_error(status);
goto leave;
@@ -431,7 +429,7 @@
lksb = newlock->lksb;
- if (create->flags & LKM_GET_LVB) {
+ if (be32_to_cpu(create->flags) & LKM_GET_LVB) {
lksb->flags |= DLM_LKSB_GET_LVB;
mlog(0, "set DLM_LKSB_GET_LVB flag\n");
}
@@ -454,7 +452,7 @@
dlm_lock_attach_lockres(newlock, res);
- status = dlmlock_master(dlm, res, newlock, create->flags);
+ status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags));
leave:
if (status != DLM_NORMAL)
if (newlock)
Modified: trunk/fs/ocfs2/dlm/dlmmaster.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmmaster.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlm/dlmmaster.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -1059,7 +1059,6 @@
request.namelen);
}
- dlm_master_request_to_net(&request);
again:
ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
sizeof(request), to, &response);
@@ -1155,7 +1154,6 @@
goto send_response;
}
- dlm_master_request_to_host(request);
name = request->name;
namelen = request->namelen;
@@ -1363,9 +1361,8 @@
assert.node_idx = dlm->node_num;
assert.namelen = namelen;
memcpy(assert.name, lockname, namelen);
- assert.flags = flags;
+ assert.flags = cpu_to_be32(flags);
- dlm_assert_master_to_net(&assert);
tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
&assert, sizeof(assert), to, &r);
if (tmpret < 0) {
@@ -1412,10 +1409,9 @@
if (!dlm_grab(dlm))
return 0;
- dlm_assert_master_to_host(assert);
name = assert->name;
namelen = assert->namelen;
- flags = assert->flags;
+ flags = be32_to_cpu(assert->flags);
if (namelen > DLM_LOCKID_NAME_MAX) {
mlog(ML_ERROR, "Invalid name length!");
@@ -2053,8 +2049,6 @@
migrate.new_master = new_master;
migrate.master = master;
- dlm_migrate_request_to_net(&migrate);
-
ret = 0;
/* send message to all nodes, except the master and myself */
@@ -2103,7 +2097,6 @@
if (!dlm_grab(dlm))
return -EINVAL;
- dlm_migrate_request_to_host(migrate);
name = migrate->name;
namelen = migrate->namelen;
Modified: trunk/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmrecovery.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlm/dlmrecovery.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -606,7 +606,6 @@
// send message
ret = DLM_NOLOCKMGR;
- dlm_lock_request_to_net(&lr);
ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
&lr, sizeof(lr), request_from, NULL);
if (ret < 0)
@@ -628,7 +627,6 @@
if (!dlm_grab(dlm))
return -EINVAL;
- dlm_lock_request_to_host(lr);
BUG_ON(lr->dead_node != dlm->reco.dead_node);
item = kcalloc(1, sizeof(*item), GFP_KERNEL);
@@ -718,7 +716,6 @@
mlog(0, "sending DATA DONE message to %u, "
"my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
done_msg.dead_node);
- dlm_reco_data_done_to_net(&done_msg);
ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
sizeof(done_msg), send_to, &tmpret);
@@ -739,7 +736,6 @@
if (!dlm_grab(dlm))
return -EINVAL;
- dlm_reco_data_done_to_host(done);
mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
"node_idx=%u, this node=%u\n", done->dead_node,
dlm->reco.dead_node, done->node_idx, dlm->node_num);
@@ -838,8 +834,8 @@
struct dlm_lock_resource *res,
int total_locks)
{
- u64 mig_cookie = mres->mig_cookie;
- int mres_total_locks = mres->total_locks;
+ u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
+ int mres_total_locks = be32_to_cpu(mres->total_locks);
int sz, ret = 0, status = 0;
u8 orig_flags = mres->flags,
orig_master = mres->master;
@@ -857,9 +853,6 @@
if (total_locks == mres_total_locks)
mres->flags |= DLM_MRES_ALL_DONE;
- /* convert to net byteorder */
- dlm_migratable_lockres_to_net(mres);
-
/* send it */
ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
sz, send_to, &status);
@@ -896,8 +889,8 @@
mres->lockname_len = namelen;
memcpy(mres->lockname, lockname, namelen);
mres->num_locks = 0;
- mres->total_locks = total_locks;
- mres->mig_cookie = cookie;
+ mres->total_locks = cpu_to_be32(total_locks);
+ mres->mig_cookie = cpu_to_be64(cookie);
mres->flags = flags;
mres->master = master;
}
@@ -1030,8 +1023,6 @@
if (!dlm_grab(dlm))
return -EINVAL;
- dlm_migratable_lockres_to_host(mres);
-
BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
real_master = mres->master;
@@ -1047,7 +1038,7 @@
mlog(0, "all done flag. all lockres data received!\n");
ret = -ENOMEM;
- buf = kmalloc(msg->data_len, GFP_KERNEL);
+ buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL);
item = kcalloc(1, sizeof(*item), GFP_KERNEL);
if (!buf || !item)
goto leave;
@@ -1121,7 +1112,7 @@
/* queue up work for dlm_mig_lockres_worker */
dlm_grab(dlm); /* get an extra ref for the work item */
- memcpy(buf, msg->buf, msg->data_len); /* copy the whole message */
+ memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
item->u.ml.lockres = res; /* already have a ref */
item->u.ml.real_master = real_master;
@@ -1268,7 +1259,6 @@
req.namelen = res->lockname.len;
memcpy(req.name, res->lockname.name, res->lockname.len);
- dlm_master_requery_to_net(&req);
ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
&req, sizeof(req), nodenum, &status);
if (ret < 0)
@@ -1302,8 +1292,6 @@
return master;
}
- dlm_master_requery_to_host(req);
-
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres(dlm, req->name, req->namelen);
if (res) {
@@ -1425,7 +1413,8 @@
}
/* lock is for another node. */
- newlock = dlm_new_lock(ml->type, ml->node, ml->cookie, NULL);
+ newlock = dlm_new_lock(ml->type, ml->node,
+ be64_to_cpu(ml->cookie), NULL);
if (!newlock) {
ret = -ENOMEM;
goto leave;
@@ -1893,7 +1882,6 @@
memset(&br, 0, sizeof(br));
br.node_idx = dlm->node_num;
br.dead_node = dead_node;
- dlm_begin_reco_to_net(&br);
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
ret = 0;
@@ -1943,8 +1931,6 @@
if (!dlm_grab(dlm))
return 0;
- dlm_begin_reco_to_host(br);
-
mlog(0, "node %u wants to recover node %u\n",
br->node_idx, br->dead_node);
@@ -1992,7 +1978,6 @@
memset(&fr, 0, sizeof(fr));
fr.node_idx = dlm->node_num;
fr.dead_node = dlm->reco.dead_node;
- dlm_finalize_reco_to_net(&fr);
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
if (nodenum == dlm->node_num)
@@ -2019,8 +2004,6 @@
if (!dlm_grab(dlm))
return 0;
- dlm_finalize_reco_to_host(fr);
-
mlog(0, "node %u finalizing recovery of node %u\n",
fr->node_idx, fr->dead_node);
Modified: trunk/fs/ocfs2/dlm/dlmunlock.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmunlock.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlm/dlmunlock.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -306,7 +306,7 @@
memset(&unlock, 0, sizeof(unlock));
unlock.node_idx = dlm->node_num;
- unlock.flags = flags;
+ unlock.flags = cpu_to_be32(flags);
unlock.cookie = lock->ml.cookie;
unlock.namelen = res->lockname.len;
memcpy(unlock.name, res->lockname.name, unlock.namelen);
@@ -321,7 +321,6 @@
iovlen++;
}
- dlm_unlock_lock_to_net(&unlock);
tmpret = o2net_send_message_iov(DLM_UNLOCK_LOCK_MSG, dlm->key,
iov, iovlen, owner, &status);
if (tmpret >= 0) {
@@ -377,8 +376,7 @@
u32 flags;
struct list_head *queue;
- dlm_unlock_lock_to_host(unlock);
- flags = unlock->flags;
+ flags = be32_to_cpu(unlock->flags);
if (flags & LKM_GET_LVB) {
mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n");
Modified: trunk/fs/ocfs2/dlm/userdlm.c
===================================================================
--- trunk/fs/ocfs2/dlm/userdlm.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlm/userdlm.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -640,7 +640,7 @@
return ERR_PTR(-ENOMEM);
}
- dlm_key = crc32(0, name->name, name->len);
+ dlm_key = crc32_be(0, name->name, name->len);
snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
Modified: trunk/fs/ocfs2/dlmglue.c
===================================================================
--- trunk/fs/ocfs2/dlmglue.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlmglue.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -1515,10 +1515,12 @@
"inode->i_generation: %u\n",
oi->ip_blkno, le32_to_cpu(fe->i_generation),
inode->i_generation);
- mlog_bug_on_msg(fe->i_dtime || !(fe->i_flags & OCFS2_VALID_FL),
+ mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
+ !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
"Stale dinode %"MLFu64" dtime: %"MLFu64" "
- "flags: 0x%x\n", oi->ip_blkno, fe->i_dtime,
- fe->i_flags);
+ "flags: 0x%x\n", oi->ip_blkno,
+ le64_to_cpu(fe->i_dtime),
+ le32_to_cpu(fe->i_flags));
ocfs2_refresh_inode(inode, fe);
}
@@ -1796,7 +1798,7 @@
/* used by the dlm code to make message headers unique, each
* node in this domain must agree on this. */
- dlm_key = crc32(0, osb->uuid_str, strlen(osb->uuid_str));
+ dlm_key = crc32_be(0, osb->uuid_str, strlen(osb->uuid_str));
/* for now, uuid == domain */
dlm = dlm_register_domain(osb->uuid_str, dlm_key);
Modified: trunk/fs/ocfs2/dlmglue.h
===================================================================
--- trunk/fs/ocfs2/dlmglue.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/dlmglue.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -61,21 +61,21 @@
* do an additional increment of my local seq.
*/
struct ocfs2_lvb {
- u32 lvb_seq;
+ __be32 lvb_seq;
};
struct ocfs2_meta_lvb {
struct ocfs2_lvb lvb;
- u32 lvb_trunc_clusters;
- u32 lvb_iclusters;
- u32 lvb_iuid;
- u32 lvb_igid;
- u16 lvb_imode;
- u16 lvb_inlink;
- u64 lvb_iatime_packed;
- u64 lvb_ictime_packed;
- u64 lvb_imtime_packed;
- u32 lvb_isize_off;
- u32 lvb_reserved[3];
+ __be32 lvb_trunc_clusters;
+ __be32 lvb_iclusters;
+ __be32 lvb_iuid;
+ __be32 lvb_igid;
+ __be16 lvb_imode;
+ __be16 lvb_inlink;
+ __be64 lvb_iatime_packed;
+ __be64 lvb_ictime_packed;
+ __be64 lvb_imtime_packed;
+ __be32 lvb_isize_off;
+ __be32 lvb_reserved[3];
};
int ocfs2_dlm_init(ocfs2_super *osb);
Copied: trunk/fs/ocfs2/endian.h (from rev 2512, branches/endian-safe/fs/ocfs2/endian.h)
Modified: trunk/fs/ocfs2/extent_map.c
===================================================================
--- trunk/fs/ocfs2/extent_map.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/extent_map.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -81,7 +81,21 @@
int tree_depth,
struct ocfs2_em_insert_context *ctxt);
+/* returns 1 only if the rec contains all the given clusters -- that is that
+ * rec's cpos is <= the cluster cpos and that the rec endpoint (cpos +
+ * clusters) is >= the argument's endpoint */
+static int ocfs2_extent_rec_contains_clusters(ocfs2_extent_rec *rec,
+ u32 cpos, u32 clusters)
+{
+ if (le32_to_cpu(rec->e_cpos) > cpos)
+ return 0;
+ if (cpos + clusters > le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters))
+ return 0;
+ return 1;
+}
+
/*
* Find an entry in the tree that intersects the region passed in.
* Note that this will find straddled intervals, it is up to the
@@ -109,11 +123,11 @@
parent = *p;
ent = rb_entry(parent, struct ocfs2_extent_map_entry,
e_node);
- if ((cpos + clusters) <= ent->e_rec.e_cpos) {
+ if ((cpos + clusters) <= le32_to_cpu(ent->e_rec.e_cpos)) {
p = &(*p)->rb_left;
ent = NULL;
- } else if (cpos >= (ent->e_rec.e_cpos +
- ent->e_rec.e_clusters)) {
+ } else if (cpos >= (le32_to_cpu(ent->e_rec.e_cpos) +
+ le32_to_cpu(ent->e_rec.e_clusters))) {
p = &(*p)->rb_right;
ent = NULL;
} else
@@ -143,6 +157,7 @@
int i, ret;
struct buffer_head *eb_bh = NULL;
u64 blkno;
+ u32 rec_end;
ocfs2_extent_block *eb;
ocfs2_extent_rec *rec;
@@ -154,26 +169,25 @@
while (el->l_tree_depth)
{
blkno = 0;
- for (i = 0; i < el->l_next_free_rec; i++) {
+ for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
rec = &el->l_recs[i];
+ rec_end = (le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters));
ret = -EBADR;
- if ((rec->e_cpos + rec->e_clusters) >
- OCFS2_I(inode)->ip_clusters)
+ if (rec_end > OCFS2_I(inode)->ip_clusters)
goto out_free;
- if ((rec->e_cpos + rec->e_clusters) <= cpos) {
- ret = ocfs2_extent_map_insert(inode,
- rec,
- el->l_tree_depth);
+ if (rec_end <= cpos) {
+ ret = ocfs2_extent_map_insert(inode, rec,
+ le16_to_cpu(el->l_tree_depth));
if (ret && (ret != -EEXIST))
goto out_free;
continue;
}
- if ((cpos + clusters) <= rec->e_cpos) {
- ret = ocfs2_extent_map_insert(inode,
- rec,
- el->l_tree_depth);
+ if ((cpos + clusters) <= le32_to_cpu(rec->e_cpos)) {
+ ret = ocfs2_extent_map_insert(inode, rec,
+ le16_to_cpu(el->l_tree_depth));
if (ret && (ret != -EEXIST))
goto out_free;
continue;
@@ -187,9 +201,9 @@
/* Check to see if we're stradling */
ret = -ESRCH;
- if ((rec->e_cpos > cpos) ||
- ((cpos + clusters) >
- (rec->e_cpos + rec->e_clusters)))
+ if (!ocfs2_extent_rec_contains_clusters(rec,
+ cpos,
+ clusters))
goto out_free;
/*
@@ -201,7 +215,7 @@
if (blkno)
goto out_free;
- blkno = rec->e_blkno;
+ blkno = le64_to_cpu(rec->e_blkno);
}
/*
@@ -229,10 +243,10 @@
if (el->l_tree_depth)
BUG();
- for (i = 0; i < el->l_next_free_rec; i++) {
+ for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
rec = &el->l_recs[i];
ret = ocfs2_extent_map_insert(inode, rec,
- el->l_tree_depth);
+ le16_to_cpu(el->l_tree_depth));
if (ret)
goto out_free;
}
@@ -273,7 +287,7 @@
*ret_ent = ent;
return 0;
}
- blkno = ent->e_rec.e_blkno;
+ blkno = le64_to_cpu(ent->e_rec.e_blkno);
spin_unlock(&OCFS2_I(inode)->ip_lock);
ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), blkno, &bh,
@@ -329,8 +343,8 @@
struct rb_node **p, *parent;
struct ocfs2_extent_map_entry *old_ent;
- old_ent = ocfs2_extent_map_lookup(em, ent->e_rec.e_cpos,
- ent->e_rec.e_clusters,
+ old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(ent->e_rec.e_cpos),
+ le32_to_cpu(ent->e_rec.e_clusters),
&p, &parent);
if (old_ent)
return -EEXIST;
@@ -366,8 +380,9 @@
goto out_unlock;
}
- old_ent = ocfs2_extent_map_lookup(em, rec->e_cpos,
- rec->e_clusters, NULL, NULL);
+ old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos),
+ le32_to_cpu(rec->e_clusters), NULL,
+ NULL);
if (!old_ent)
BUG();
@@ -395,28 +410,31 @@
* the first go round. In the end, the pieces we need will
* be used, and the pieces we don't will be freed.
*/
- ctxt->need_left = !!(rec->e_cpos > old_ent->e_rec.e_cpos);
- ctxt->need_right = !!((old_ent->e_rec.e_cpos +
- old_ent->e_rec.e_clusters) >
- (rec->e_cpos + rec->e_clusters));
+ ctxt->need_left = !!(le32_to_cpu(rec->e_cpos) >
+ le32_to_cpu(old_ent->e_rec.e_cpos));
+ ctxt->need_right = !!((le32_to_cpu(old_ent->e_rec.e_cpos) +
+ le32_to_cpu(old_ent->e_rec.e_clusters)) >
+ (le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)));
ret = -EAGAIN;
if (ctxt->need_left) {
if (!ctxt->left_ent)
goto out_unlock;
*(ctxt->left_ent) = *old_ent;
ctxt->left_ent->e_rec.e_clusters =
- rec->e_cpos - ctxt->left_ent->e_rec.e_cpos;
+ cpu_to_le32(le32_to_cpu(rec->e_cpos) -
+ le32_to_cpu(ctxt->left_ent->e_rec.e_cpos));
}
if (ctxt->need_right) {
if (!ctxt->right_ent)
goto out_unlock;
*(ctxt->right_ent) = *old_ent;
ctxt->right_ent->e_rec.e_cpos =
- rec->e_cpos + rec->e_clusters;
+ cpu_to_le32(le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters));
ctxt->right_ent->e_rec.e_clusters =
- (old_ent->e_rec.e_cpos +
- old_ent->e_rec.e_clusters) -
- ctxt->right_ent->e_rec.e_cpos;
+ cpu_to_le32((le32_to_cpu(old_ent->e_rec.e_cpos) +
+ le32_to_cpu(old_ent->e_rec.e_clusters)) -
+ le32_to_cpu(ctxt->right_ent->e_rec.e_cpos));
}
rb_erase(&old_ent->e_node, &em->em_extents);
@@ -457,13 +475,13 @@
int ret;
struct ocfs2_em_insert_context ctxt = {0, };
- if ((rec->e_cpos + rec->e_clusters) >
+ if ((le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)) >
OCFS2_I(inode)->ip_map.em_clusters)
return -EBADR;
/* Zero e_clusters means a truncated tail record. It better be EOF */
if (!rec->e_clusters) {
- if ((rec->e_cpos + rec->e_clusters) !=
+ if ((le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)) !=
OCFS2_I(inode)->ip_map.em_clusters)
return -EBADR;
@@ -543,7 +561,7 @@
ocfs2_extent_rec *old;
BUG_ON(!new_clusters);
- BUG_ON(rec->e_clusters < new_clusters);
+ BUG_ON(le32_to_cpu(rec->e_clusters) < new_clusters);
if (em->em_clusters < OCFS2_I(inode)->ip_clusters) {
/*
@@ -555,39 +573,44 @@
em->em_clusters = OCFS2_I(inode)->ip_clusters;
}
- mlog_bug_on_msg((rec->e_cpos + rec->e_clusters) !=
+ mlog_bug_on_msg((le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters)) !=
(em->em_clusters + new_clusters),
"Inode %"MLFu64":\n"
"rec->e_cpos = %u + rec->e_clusters = %u = %u\n"
"em->em_clusters = %u + new_clusters = %u = %u\n",
OCFS2_I(inode)->ip_blkno,
- rec->e_cpos, rec->e_clusters,
- rec->e_cpos + rec->e_clusters,
+ le32_to_cpu(rec->e_cpos), le32_to_cpu(rec->e_clusters),
+ le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters),
em->em_clusters, new_clusters,
em->em_clusters + new_clusters);
em->em_clusters += new_clusters;
ret = -ENOENT;
- if (rec->e_clusters > new_clusters) {
+ if (le32_to_cpu(rec->e_clusters) > new_clusters) {
/* This is a contiguous append */
- ent = ocfs2_extent_map_lookup(em, rec->e_cpos, 1,
+ ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos), 1,
NULL, NULL);
if (ent) {
old = &ent->e_rec;
- BUG_ON((rec->e_cpos + rec->e_clusters) !=
- (old->e_cpos + old->e_clusters +
- new_clusters));
- if (!ent->e_tree_depth) {
- BUG_ON(old->e_cpos != rec->e_cpos);
- BUG_ON(old->e_blkno != rec->e_blkno);
+ BUG_ON((le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters)) !=
+ (le32_to_cpu(old->e_cpos) +
+ le32_to_cpu(old->e_clusters) +
+ new_clusters));
+ if (ent->e_tree_depth == 0) {
+ BUG_ON(le32_to_cpu(old->e_cpos) !=
+ le32_to_cpu(rec->e_cpos));
+ BUG_ON(le64_to_cpu(old->e_blkno) !=
+ le64_to_cpu(rec->e_blkno));
ret = 0;
}
/*
* Let non-leafs fall through as -ENOENT to
* force insertion of the new leaf.
*/
- old->e_clusters += new_clusters;
+ le32_add_cpu(&old->e_clusters, new_clusters);
}
}
@@ -680,18 +703,18 @@
if (ent) {
/* We should never find ourselves straddling an interval */
- if ((ent->e_rec.e_cpos > v_cpos) ||
- ((v_cpos + count) >
- (ent->e_rec.e_cpos + ent->e_rec.e_clusters)))
+ if (!ocfs2_extent_rec_contains_clusters(&ent->e_rec,
+ v_cpos,
+ count))
return -ESRCH;
- coff = v_cpos - ent->e_rec.e_cpos;
+ coff = v_cpos - le32_to_cpu(ent->e_rec.e_cpos);
*p_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
- ent->e_rec.e_blkno) +
- coff;
+ le64_to_cpu(ent->e_rec.e_blkno)) +
+ coff;
if (ret_count)
- *ret_count = ent->e_rec.e_clusters - coff;
+ *ret_count = le32_to_cpu(ent->e_rec.e_clusters) - coff;
return 0;
}
@@ -739,19 +762,17 @@
rec = &ent->e_rec;
/* We should never find ourselves straddling an interval */
- if ((rec->e_cpos > cpos) ||
- ((cpos + clusters) >
- (rec->e_cpos + rec->e_clusters)))
+ if (!ocfs2_extent_rec_contains_clusters(rec, cpos, clusters))
return -ESRCH;
- boff = ocfs2_clusters_to_blocks(inode->i_sb,
- cpos - rec->e_cpos);
+ boff = ocfs2_clusters_to_blocks(inode->i_sb, cpos -
+ le32_to_cpu(rec->e_cpos));
boff += (v_blkno & (u64)(bpc - 1));
- *p_blkno = rec->e_blkno + boff;
+ *p_blkno = le64_to_cpu(rec->e_blkno) + boff;
if (ret_count) {
*ret_count = ocfs2_clusters_to_blocks(inode->i_sb,
- rec->e_clusters) - boff;
+ le32_to_cpu(rec->e_clusters)) - boff;
}
return 0;
@@ -790,7 +811,7 @@
ent = rb_entry(node, struct ocfs2_extent_map_entry,
e_node);
- if (ent->e_rec.e_cpos < new_clusters)
+ if (le32_to_cpu(ent->e_rec.e_cpos) < new_clusters)
break;
rb_erase(&ent->e_node, &em->em_extents);
@@ -805,8 +826,8 @@
/* Do we have an entry straddling new_clusters? */
if (tail_ent) {
if (ent &&
- ((ent->e_rec.e_cpos + ent->e_rec.e_clusters) >
- new_clusters))
+ ((le32_to_cpu(ent->e_rec.e_cpos) +
+ le32_to_cpu(ent->e_rec.e_clusters)) > new_clusters))
*tail_ent = ent;
else
*tail_ent = NULL;
@@ -875,8 +896,8 @@
__ocfs2_extent_map_drop(inode, new_clusters, &free_head, &ent);
if (ent)
- ent->e_rec.e_clusters =
- new_clusters - ent->e_rec.e_cpos;
+ ent->e_rec.e_clusters = cpu_to_le32(new_clusters -
+ le32_to_cpu(ent->e_rec.e_cpos));
OCFS2_I(inode)->ip_map.em_clusters = new_clusters;
Modified: trunk/fs/ocfs2/file.c
===================================================================
--- trunk/fs/ocfs2/file.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/file.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -485,32 +485,32 @@
fe = (ocfs2_dinode *) fe_bh->b_data;
OCFS2_BUG_ON_INVALID_DINODE(fe);
- mlog_bug_on_msg(fe->i_size != i_size_read(inode),
+ mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
"Inode %"MLFu64", inode i_size = %lld != di "
"i_size = %"MLFu64", i_flags = 0x%x\n",
OCFS2_I(inode)->ip_blkno,
i_size_read(inode),
- fe->i_size, fe->i_flags);
+ le64_to_cpu(fe->i_size), le32_to_cpu(fe->i_flags));
- if (new_i_size > fe->i_size) {
+ if (new_i_size > le64_to_cpu(fe->i_size)) {
mlog(0, "asked to truncate file with size (%"MLFu64") "
"to size (%"MLFu64")!\n",
- fe->i_size, new_i_size);
+ le64_to_cpu(fe->i_size), new_i_size);
status = -EINVAL;
mlog_errno(status);
goto bail;
}
mlog(0, "inode %"MLFu64", i_size = %"MLFu64", new_i_size = %"MLFu64"\n",
- fe->i_blkno, fe->i_size, new_i_size);
+ le64_to_cpu(fe->i_blkno), le64_to_cpu(fe->i_size), new_i_size);
/* lets handle the simple truncate cases before doing any more
* cluster locking. */
- if (new_i_size == fe->i_size)
+ if (new_i_size == le64_to_cpu(fe->i_size))
goto bail;
- if (fe->i_clusters == ocfs2_clusters_for_bytes(osb->sb,
- new_i_size)) {
+ if (le32_to_cpu(fe->i_clusters) ==
+ ocfs2_clusters_for_bytes(osb->sb, new_i_size)) {
mlog(0, "fe->i_clusters = %u, so we do a simple truncate\n",
fe->i_clusters);
/* No allocation change is required, so lets fast path
@@ -695,9 +695,9 @@
goto leave;
}
- fe->i_clusters += num_bits;
+ le32_add_cpu(&fe->i_clusters, num_bits);
spin_lock(&OCFS2_I(inode)->ip_lock);
- OCFS2_I(inode)->ip_clusters = fe->i_clusters;
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
spin_unlock(&OCFS2_I(inode)->ip_lock);
status = ocfs2_journal_dirty(handle, fe_bh);
@@ -772,14 +772,15 @@
fe = (ocfs2_dinode *) bh->b_data;
OCFS2_BUG_ON_INVALID_DINODE(fe);
- BUG_ON(i_size_read(inode) != (fe->i_size - *bytes_extended));
+ BUG_ON(i_size_read(inode) !=
+ (le64_to_cpu(fe->i_size) - *bytes_extended));
BUG_ON(new_i_size < i_size_read(inode));
if (i_size_read(inode) == new_i_size)
goto leave;
- clusters_to_add = ocfs2_clusters_for_bytes(osb->sb, new_i_size)
- - fe->i_clusters;
+ clusters_to_add = ocfs2_clusters_for_bytes(osb->sb, new_i_size) -
+ le32_to_cpu(fe->i_clusters);
mlog(0, "extend inode %"MLFu64", new_i_size = %"MLFu64", "
"i_size = %lld, fe->i_clusters = %u, clusters_to_add = %u\n",
@@ -866,9 +867,8 @@
goto leave;
}
- if (status == -EAGAIN
- && (new_i_size >
- ocfs2_clusters_to_bytes(osb->sb, fe->i_clusters))) {
+ if (status == -EAGAIN && (new_i_size >
+ ocfs2_clusters_to_bytes(osb->sb, le32_to_cpu(fe->i_clusters)))) {
if (why == RESTART_META) {
mlog(0, "restarting function.\n");
@@ -877,13 +877,14 @@
BUG_ON(why != RESTART_TRANS);
new_fe_size = ocfs2_clusters_to_bytes(osb->sb,
- fe->i_clusters);
- *bytes_extended += new_fe_size - fe->i_size;
+ le32_to_cpu(fe->i_clusters));
+ *bytes_extended += new_fe_size -
+ le64_to_cpu(fe->i_size);
/* update i_size in case we crash after the
* extend_trans */
- fe->i_size = new_fe_size;
+ fe->i_size = cpu_to_le64(new_fe_size);
- fe->i_mtime = CURRENT_TIME.tv_sec;
+ fe->i_mtime = cpu_to_le64(CURRENT_TIME.tv_sec);
fe->i_mtime_nsec = cpu_to_le32(CURRENT_TIME.tv_nsec);
status = ocfs2_journal_dirty(handle, bh);
@@ -895,7 +896,7 @@
clusters_to_add =
ocfs2_clusters_for_bytes(osb->sb,
new_i_size)
- - fe->i_clusters;
+ - le32_to_cpu(fe->i_clusters);
mlog(0, "restarting transaction.\n");
/* TODO: This can be more intelligent. */
credits = ocfs2_calc_extend_credits(osb->sb,
@@ -917,12 +918,13 @@
no_alloc:
/* this may not be the end of our allocation so only update
* i_size to what's appropriate. */
- new_fe_size = ocfs2_clusters_to_bytes(osb->sb, fe->i_clusters);
+ new_fe_size = ocfs2_clusters_to_bytes(osb->sb,
+ le32_to_cpu(fe->i_clusters));
if (new_i_size < new_fe_size)
new_fe_size = new_i_size;
- *bytes_extended += new_fe_size - fe->i_size;
- fe->i_size = new_fe_size;
+ *bytes_extended += new_fe_size - le64_to_cpu(fe->i_size);
+ fe->i_size = cpu_to_le64(new_fe_size);
mlog(0, "fe: i_clusters = %u, i_size=%"MLFu64"\n",
fe->i_clusters, fe->i_size);
@@ -930,7 +932,7 @@
mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
OCFS2_I(inode)->ip_clusters, i_size_read(inode));
- fe->i_ctime = fe->i_mtime = CURRENT_TIME.tv_sec;
+ fe->i_ctime = fe->i_mtime = cpu_to_le64(CURRENT_TIME.tv_sec);
fe->i_ctime_nsec = fe->i_mtime_nsec = cpu_to_le32(CURRENT_TIME.tv_nsec);
status = ocfs2_journal_dirty(handle, bh);
Modified: trunk/fs/ocfs2/inode.c
===================================================================
--- trunk/fs/ocfs2/inode.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/inode.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -223,11 +223,12 @@
/* this means that read_inode cannot create a superblock inode
* today. change if needed. */
- if (!OCFS2_IS_VALID_DINODE(fe) || !(fe->i_flags & OCFS2_VALID_FL)) {
+ if (!OCFS2_IS_VALID_DINODE(fe) ||
+ !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
mlog(ML_ERROR, "Invalid dinode: i_ino=%lu, i_blkno=%"MLFu64", "
"signature = %.*s, flags = 0x%x\n",
- inode->i_ino, fe->i_blkno, 7, fe->i_signature,
- fe->i_flags);
+ inode->i_ino, le64_to_cpu(fe->i_blkno), 7,
+ fe->i_signature, le32_to_cpu(fe->i_flags));
goto bail;
}
@@ -242,47 +243,49 @@
inode->i_version = 1;
inode->i_generation = le32_to_cpu(fe->i_generation);
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
- inode->i_mode = fe->i_mode;
- inode->i_uid = fe->i_uid;
- inode->i_gid = fe->i_gid;
+ inode->i_mode = le16_to_cpu(fe->i_mode);
+ inode->i_uid = le32_to_cpu(fe->i_uid);
+ inode->i_gid = le32_to_cpu(fe->i_gid);
inode->i_blksize = (u32)osb->s_clustersize;
/* Fast symlinks will have i_size but no allocated clusters. */
if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
inode->i_blocks = 0;
else
- inode->i_blocks = ocfs2_align_bytes_to_sectors(fe->i_size);
+ inode->i_blocks =
+ ocfs2_align_bytes_to_sectors(le64_to_cpu(fe->i_size));
inode->i_mapping->a_ops = &ocfs2_aops;
inode->i_flags |= S_NOATIME;
- inode->i_atime.tv_sec = fe->i_atime;
+ inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
- inode->i_mtime.tv_sec = fe->i_mtime;
+ inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
- inode->i_ctime.tv_sec = fe->i_ctime;
+ inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);
- if (OCFS2_I(inode)->ip_blkno != fe->i_blkno)
+ if (OCFS2_I(inode)->ip_blkno != le64_to_cpu(fe->i_blkno))
mlog(ML_ERROR,
"ip_blkno %"MLFu64" != i_blkno %"MLFu64"!\n",
OCFS2_I(inode)->ip_blkno, fe->i_blkno);
- OCFS2_I(inode)->ip_clusters = fe->i_clusters;
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_orphaned_slot = OCFS2_INVALID_SLOT;
if (create_ino)
- inode->i_ino = ino_from_blkno(inode->i_sb, fe->i_blkno);
+ inode->i_ino = ino_from_blkno(inode->i_sb,
+ le64_to_cpu(fe->i_blkno));
mlog(0, "blkno = %"MLFu64", ino = %lu, create_ino = %s\n",
fe->i_blkno, inode->i_ino, create_ino ? "true" : "false");
- inode->i_nlink = fe->i_links_count;
+ inode->i_nlink = le16_to_cpu(fe->i_links_count);
- if (fe->i_flags & OCFS2_LOCAL_ALLOC_FL) {
+ if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino);
- } else if (fe->i_flags & OCFS2_BITMAP_FL) {
+ } else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) {
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
- } else if (fe->i_flags & OCFS2_SUPER_BLOCK_FL) {
+ } else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) {
mlog(0, "superblock inode: i_ino=%lu\n", inode->i_ino);
/* we can't actually hit this as read_inode can't
* handle superblocks today ;-) */
@@ -293,20 +296,20 @@
case S_IFREG:
inode->i_fop = &ocfs2_fops;
inode->i_op = &ocfs2_file_iops;
- i_size_write(inode, fe->i_size);
+ i_size_write(inode, le64_to_cpu(fe->i_size));
OCFS2_I(inode)->ip_mmu_private = inode->i_size;
break;
case S_IFDIR:
inode->i_op = &ocfs2_dir_iops;
inode->i_fop = &ocfs2_dops;
- i_size_write(inode, fe->i_size);
+ i_size_write(inode, le64_to_cpu(fe->i_size));
break;
case S_IFLNK:
if (ocfs2_inode_is_fast_symlink(inode))
inode->i_op = &ocfs2_fast_symlink_inode_operations;
else
inode->i_op = &ocfs2_symlink_inode_operations;
- i_size_write(inode, fe->i_size);
+ i_size_write(inode, le64_to_cpu(fe->i_size));
break;
default:
inode->i_op = &ocfs2_special_file_iops;
@@ -370,8 +373,11 @@
goto bail;
}
- sysfile = fe->i_flags & OCFS2_SYSTEM_FL;
- if (S_ISCHR(fe->i_mode) || S_ISBLK(fe->i_mode))
+ if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL))
+ sysfile = 1;
+
+ if (S_ISCHR(le16_to_cpu(fe->i_mode)) ||
+ S_ISBLK(le16_to_cpu(fe->i_mode)))
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
status = -EINVAL;
@@ -382,7 +388,7 @@
goto bail;
}
- BUG_ON(args->fi_blkno != fe->i_blkno);
+ BUG_ON(args->fi_blkno != le64_to_cpu(fe->i_blkno));
if (sysfile)
OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE;
@@ -516,8 +522,8 @@
goto bail_commit;
}
- di->i_dtime = CURRENT_TIME.tv_sec;
- di->i_flags &= (~(OCFS2_VALID_FL | OCFS2_ORPHANED_FL));
+ di->i_dtime = cpu_to_le64(CURRENT_TIME.tv_sec);
+ le32_and_cpu(&di->i_flags, ~(OCFS2_VALID_FL | OCFS2_ORPHANED_FL));
status = ocfs2_journal_dirty(handle, di_bh);
if (status < 0) {
@@ -687,7 +693,7 @@
/* Do some basic inode verification... */
di = (ocfs2_dinode *) di_bh->b_data;
- if (!(di->i_flags & OCFS2_ORPHANED_FL)) {
+ if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL))) {
/* for lack of a better error? */
status = -EEXIST;
mlog(ML_ERROR,
@@ -1075,19 +1081,19 @@
}
spin_lock(&OCFS2_I(inode)->ip_lock);
- fe->i_clusters = OCFS2_I(inode)->ip_clusters;
+ fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters);
spin_unlock(&OCFS2_I(inode)->ip_lock);
- fe->i_size = (u64)i_size_read(inode);
- fe->i_links_count = inode->i_nlink;
- fe->i_uid = inode->i_uid;
- fe->i_gid = inode->i_gid;
- fe->i_mode = inode->i_mode;
- fe->i_atime = inode->i_atime.tv_sec;
+ fe->i_size = cpu_to_le64(i_size_read(inode));
+ fe->i_links_count = cpu_to_le16(inode->i_nlink);
+ fe->i_uid = cpu_to_le32(inode->i_uid);
+ fe->i_gid = cpu_to_le32(inode->i_gid);
+ fe->i_mode = cpu_to_le16(inode->i_mode);
+ fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- fe->i_ctime = inode->i_ctime.tv_sec;
+ fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
- fe->i_mtime = inode->i_mtime.tv_sec;
+ fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
status = ocfs2_journal_dirty(handle, bh);
@@ -1113,25 +1119,25 @@
spin_lock(&OCFS2_I(inode)->ip_lock);
- OCFS2_I(inode)->ip_clusters = fe->i_clusters;
- i_size_write(inode, fe->i_size);
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
+ i_size_write(inode, le64_to_cpu(fe->i_size));
if (S_ISREG(inode->i_mode)) {
OCFS2_I(inode)->ip_mmu_private = i_size_read(inode);
}
- inode->i_nlink = fe->i_links_count;
- inode->i_uid = fe->i_uid;
- inode->i_gid = fe->i_gid;
- inode->i_mode = fe->i_mode;
+ inode->i_nlink = le16_to_cpu(fe->i_links_count);
+ inode->i_uid = le32_to_cpu(fe->i_uid);
+ inode->i_gid = le32_to_cpu(fe->i_gid);
+ inode->i_mode = le16_to_cpu(fe->i_mode);
inode->i_blksize = (u32) osb->s_clustersize;
- if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
+ if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0)
inode->i_blocks = 0;
else
inode->i_blocks = ocfs2_align_bytes_to_sectors(i_size_read(inode));
- inode->i_atime.tv_sec = fe->i_atime;
+ inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
- inode->i_mtime.tv_sec = fe->i_mtime;
+ inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
- inode->i_ctime.tv_sec = fe->i_ctime;
+ inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);
spin_unlock(&OCFS2_I(inode)->ip_lock);
Modified: trunk/fs/ocfs2/journal.c
===================================================================
--- trunk/fs/ocfs2/journal.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/journal.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -1122,7 +1122,7 @@
node_num, slot_num,
MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
- OCFS2_I(inode)->ip_clusters = fe->i_clusters;
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
status = ocfs2_force_read_journal(inode);
if (status < 0) {
@@ -1440,7 +1440,7 @@
if (de->name_len == 2 && !strncmp("..", de->name, 2))
continue;
- iter = ocfs2_iget(osb, de->inode);
+ iter = ocfs2_iget(osb, le64_to_cpu(de->inode));
if (!iter)
continue;
Modified: trunk/fs/ocfs2/journal.h
===================================================================
--- trunk/fs/ocfs2/journal.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/journal.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -397,7 +397,7 @@
* prev. last_eb_blk + blocks along edge of tree.
* calc_symlink_credits passes because we just need 1
* credit for the dinode there. */
- dinode_blocks = 1 + 1 + fe->id2.i_list.l_tree_depth;
+ dinode_blocks = 1 + 1 + le16_to_cpu(fe->id2.i_list.l_tree_depth);
return bitmap_blocks + sysfile_bitmap_blocks + dinode_blocks;
}
@@ -430,18 +430,19 @@
ocfs2_extent_list *last_el)
{
/* for dinode + all headers in this pass + update to next leaf */
- int credits = 1 + fe->id2.i_list.l_tree_depth + 1;
+ u16 next_free = le16_to_cpu(last_el->l_next_free_rec);
+ u16 tree_depth = le16_to_cpu(fe->id2.i_list.l_tree_depth);
+ int credits = 1 + tree_depth + 1;
int i;
- i = last_el->l_next_free_rec - 1;
+ i = next_free - 1;
BUG_ON(i < 0);
/* We may be deleting metadata blocks, so metadata alloc dinode +
one desc. block for each possible delete. */
- if (fe->id2.i_list.l_tree_depth
- && (last_el->l_next_free_rec == 1)
- && ((last_el->l_recs[i].e_clusters - clusters_to_del) == 0))
- credits += 1 + fe->id2.i_list.l_tree_depth;
+ if (tree_depth && next_free == 1 &&
+ le32_to_cpu(last_el->l_recs[i].e_clusters) == clusters_to_del)
+ credits += 1 + tree_depth;
/* update to the truncate log. */
credits += OCFS2_TRUNCATE_LOG_UPDATE;
Modified: trunk/fs/ocfs2/localalloc.c
===================================================================
--- trunk/fs/ocfs2/localalloc.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/localalloc.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -44,6 +44,8 @@
#include "buffer_head_io.h"
+#define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab))
+
static inline int ocfs2_local_alloc_window_bits(ocfs2_super *osb);
static u32 ocfs2_local_alloc_count_bits(ocfs2_dinode *alloc);
@@ -115,6 +117,7 @@
struct buffer_head *alloc_bh = NULL;
u32 num_used;
struct inode *inode = NULL;
+ ocfs2_local_alloc *la;
mlog_entry_void();
@@ -135,18 +138,20 @@
}
alloc = (ocfs2_dinode *) alloc_bh->b_data;
+ la = OCFS2_LOCAL_ALLOC(alloc);
- if (!(alloc->i_flags & (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) {
+ if (!(le32_to_cpu(alloc->i_flags) &
+ (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) {
mlog(ML_ERROR, "Invalid local alloc inode, %"MLFu64"\n",
OCFS2_I(inode)->ip_blkno);
status = -EINVAL;
goto bail;
}
- if ((OCFS2_LOCAL_ALLOC(alloc)->la_size == 0) ||
- (OCFS2_LOCAL_ALLOC(alloc)->la_size > ocfs2_local_alloc_size(inode->i_sb))) {
+ if ((la->la_size == 0) ||
+ (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) {
mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n",
- OCFS2_LOCAL_ALLOC(alloc)->la_size);
+ le16_to_cpu(la->la_size));
status = -EINVAL;
goto bail;
}
@@ -159,7 +164,7 @@
if (num_used
|| alloc->id1.bitmap1.i_used
|| alloc->id1.bitmap1.i_total
- || OCFS2_LOCAL_ALLOC(alloc)->la_bm_off)
+ || la->la_bm_off)
mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
"found = %u, set = %u, taken = %u, off = %u\n",
num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
@@ -224,7 +229,7 @@
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
- -1);
+ OCFS2_INVALID_SLOT);
if (!main_bm_inode) {
status = -EINVAL;
mlog_errno(status);
@@ -393,7 +398,7 @@
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
- -1);
+ OCFS2_INVALID_SLOT);
if (!main_bm_inode) {
status = -EINVAL;
mlog_errno(status);
@@ -524,6 +529,7 @@
u32 bits_wanted;
void *bitmap;
ocfs2_dinode *alloc;
+ ocfs2_local_alloc *la;
mlog_entry_void();
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
@@ -531,6 +537,7 @@
bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
local_alloc_inode = ac->ac_inode;
alloc = (ocfs2_dinode *) osb->local_alloc_bh->b_data;
+ la = OCFS2_LOCAL_ALLOC(alloc);
start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted);
if (start == -1) {
@@ -540,8 +547,8 @@
goto bail;
}
- bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
- *bit_off = OCFS2_LOCAL_ALLOC(alloc)->la_bm_off + start;
+ bitmap = la->la_bitmap;
+ *bit_off = le32_to_cpu(la->la_bm_off) + start;
/* local alloc is always contiguous by nature -- we never
* delete bits from it! */
*num_bits = bits_wanted;
@@ -558,7 +565,7 @@
ocfs2_set_bit(start++, bitmap);
alloc->id1.bitmap1.i_used = cpu_to_le32(*num_bits +
- le32_to_cpu(alloc->id1.bitmap1.i_used));
+ le32_to_cpu(alloc->id1.bitmap1.i_used));
status = ocfs2_journal_dirty(handle, osb->local_alloc_bh);
if (status < 0) {
@@ -577,11 +584,12 @@
int i;
u8 *buffer;
u32 count = 0;
+ ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
mlog_entry_void();
- buffer = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
- for (i = 0; i < OCFS2_LOCAL_ALLOC(alloc)->la_size; i++)
+ buffer = la->la_bitmap;
+ for (i = 0; i < le16_to_cpu(la->la_size); i++)
count += hweight8(buffer[i]);
mlog_exit(count);
@@ -649,14 +657,15 @@
static void ocfs2_clear_local_alloc(ocfs2_dinode *alloc)
{
+ ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
int i;
mlog_entry_void();
alloc->id1.bitmap1.i_total = 0;
alloc->id1.bitmap1.i_used = 0;
- OCFS2_LOCAL_ALLOC(alloc)->la_bm_off = 0;
- for(i = 0; i < OCFS2_LOCAL_ALLOC(alloc)->la_size; i++)
- OCFS2_LOCAL_ALLOC(alloc)->la_bitmap[i] = 0;
+ la->la_bm_off = 0;
+ for(i = 0; i < le16_to_cpu(la->la_size); i++)
+ la->la_bitmap[i] = 0;
mlog_exit_void();
}
@@ -697,6 +706,7 @@
u64 la_start_blk;
u64 blkno;
void *bitmap;
+ ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
mlog_entry("total = %u, COUNT = %u, used = %u\n",
le32_to_cpu(alloc->id1.bitmap1.i_total),
@@ -708,14 +718,15 @@
goto bail;
}
- if (alloc->id1.bitmap1.i_used == alloc->id1.bitmap1.i_total) {
+ if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
+ le32_to_cpu(alloc->id1.bitmap1.i_total)) {
mlog(0, "all bits were taken!\n");
goto bail;
}
la_start_blk = ocfs2_clusters_to_blocks(osb->sb,
- OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
- bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
+ le32_to_cpu(la->la_bm_off));
+ bitmap = la->la_bitmap;
start = count = bit_off = 0;
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
@@ -804,10 +815,13 @@
int status = 0;
u32 cluster_off, cluster_count;
ocfs2_dinode *alloc = NULL;
+ ocfs2_local_alloc *la;
mlog_entry_void();
alloc = (ocfs2_dinode *) osb->local_alloc_bh->b_data;
+ la = OCFS2_LOCAL_ALLOC(alloc);
+
if (alloc->id1.bitmap1.i_total)
mlog(0, "asking me to alloc a new window over a non-empty "
"one\n");
@@ -826,9 +840,7 @@
goto bail;
}
- alloc = (ocfs2_dinode *) osb->local_alloc_bh->b_data;
-
- OCFS2_LOCAL_ALLOC(alloc)->la_bm_off = cluster_off;
+ la->la_bm_off = cpu_to_le32(cluster_off);
alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count);
/* just in case... In the future when we find space ourselves,
* we don't have to get all contiguous -- but we'll have to
@@ -836,7 +848,7 @@
* la_bits_set before setting the bits in the main bitmap. */
alloc->id1.bitmap1.i_used = 0;
memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0,
- OCFS2_LOCAL_ALLOC(alloc)->la_size);
+ le16_to_cpu(la->la_size));
mlog(0, "New window allocated:\n");
mlog(0, "window la_bm_off = %u\n",
Modified: trunk/fs/ocfs2/namei.c
===================================================================
--- trunk/fs/ocfs2/namei.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/namei.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -455,7 +455,7 @@
mlog_errno(status);
goto leave;
}
- dirfe->i_links_count++;
+ le16_add_cpu(&dirfe->i_links_count, 1);
status = ocfs2_journal_dirty(handle, parent_fe_bh);
if (status < 0) {
mlog_errno(status);
@@ -577,26 +577,27 @@
fe->i_generation = cpu_to_le32(inode->i_generation);
fe->i_fs_generation = cpu_to_le32(osb->fs_generation);
- fe->i_blkno = fe_blkno;
+ fe->i_blkno = cpu_to_le64(fe_blkno);
fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
fe->i_suballoc_slot = cpu_to_le16(osb->slot_num);
- fe->i_uid = current->fsuid;
+ fe->i_uid = cpu_to_le32(current->fsuid);
if (dir->i_mode & S_ISGID) {
- fe->i_gid = dir->i_gid;
+ fe->i_gid = cpu_to_le32(dir->i_gid);
if (S_ISDIR(mode))
mode |= S_ISGID;
} else
- fe->i_gid = current->fsgid;
- fe->i_mode = mode;
+ fe->i_gid = cpu_to_le32(current->fsgid);
+ fe->i_mode = cpu_to_le16(mode);
if (S_ISCHR(mode) || S_ISBLK(mode))
fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev));
- fe->i_links_count = inode->i_nlink;
+ fe->i_links_count = cpu_to_le16(inode->i_nlink);
fe->i_last_eb_blk = 0;
strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
- fe->i_flags |= OCFS2_VALID_FL;
- fe->i_atime = fe->i_ctime = fe->i_mtime = CURRENT_TIME.tv_sec;
+ le32_add_cpu(&fe->i_flags, OCFS2_VALID_FL);
+ fe->i_atime = fe->i_ctime = fe->i_mtime =
+ cpu_to_le64(CURRENT_TIME.tv_sec);
fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec =
cpu_to_le32(CURRENT_TIME.tv_nsec);
fe->i_dtime = 0;
@@ -604,7 +605,7 @@
fel = &fe->id2.i_list;
fel->l_tree_depth = 0;
fel->l_next_free_rec = 0;
- fel->l_count = ocfs2_extent_recs_per_inode(osb->sb);
+ fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb));
status = ocfs2_journal_dirty(handle, *new_fe_bh);
if (status < 0) {
@@ -733,7 +734,7 @@
}
fe = (ocfs2_dinode *) fe_bh->b_data;
- if (fe->i_links_count >= OCFS2_LINK_MAX) {
+ if (le16_to_cpu(fe->i_links_count) >= OCFS2_LINK_MAX) {
err = -EMLINK;
goto bail;
}
@@ -753,13 +754,13 @@
inode->i_nlink++;
inode->i_ctime = CURRENT_TIME;
- fe->i_links_count = inode->i_nlink;
- fe->i_ctime = inode->i_ctime.tv_sec;
+ fe->i_links_count = cpu_to_le16(inode->i_nlink);
+ fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
err = ocfs2_journal_dirty(handle, fe_bh);
if (err < 0) {
- fe->i_links_count--;
+ le16_add_cpu(&fe->i_links_count, -1);
inode->i_nlink--;
mlog_errno(err);
goto bail;
@@ -769,7 +770,7 @@
OCFS2_I(inode)->ip_blkno,
parent_fe_bh, de_bh);
if (err) {
- fe->i_links_count--;
+ le16_add_cpu(&fe->i_links_count, -1);
inode->i_nlink--;
mlog_errno(err);
goto bail;
@@ -935,7 +936,7 @@
/* We can set nlink on the dinode now. clear the saved version
* so that it doesn't get set later. */
- fe->i_links_count = inode->i_nlink;
+ fe->i_links_count = cpu_to_le16(inode->i_nlink);
saved_nlink = 0;
status = ocfs2_journal_dirty(handle, fe_bh);
@@ -1320,7 +1321,8 @@
goto bail;
}
- if (S_ISDIR(new_inode->i_mode) || (newfe->i_links_count == 1)){
+ if (S_ISDIR(new_inode->i_mode) ||
+ (newfe->i_links_count == cpu_to_le16(1))){
status = ocfs2_orphan_add(osb, handle, new_inode,
newfe, orphan_name,
orphan_entry_bh);
@@ -1337,7 +1339,7 @@
mlog_errno(status);
goto bail;
}
- new_de->inode = le64_to_cpu(OCFS2_I(old_inode)->ip_blkno);
+ new_de->inode = cpu_to_le64(OCFS2_I(old_inode)->ip_blkno);
new_de->file_type = old_de->file_type;
new_dir->i_version++;
status = ocfs2_journal_dirty(handle, new_de_bh);
@@ -1349,7 +1351,7 @@
if (S_ISDIR(new_inode->i_mode))
newfe->i_links_count = 0;
else
- newfe->i_links_count--;
+ le16_add_cpu(&newfe->i_links_count, -1);
status = ocfs2_journal_dirty(handle, newfe_bh);
if (status < 0) {
@@ -1383,7 +1385,7 @@
old_inode_de_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
PARENT_INO(old_inode_de_bh->b_data) =
- le64_to_cpu(OCFS2_I(new_dir)->ip_blkno);
+ cpu_to_le64(OCFS2_I(new_dir)->ip_blkno);
status = ocfs2_journal_dirty(handle, old_inode_de_bh);
old_dir->i_nlink--;
if (new_inode) {
@@ -1411,7 +1413,7 @@
new_dir_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
fe = (ocfs2_dinode *) new_dir_bh->b_data;
- fe->i_links_count = new_dir->i_nlink;
+ fe->i_links_count = cpu_to_le16(new_dir->i_nlink);
status = ocfs2_journal_dirty(handle, new_dir_bh);
}
}
@@ -1429,7 +1431,7 @@
old_dir_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
fe = (ocfs2_dinode *) old_dir_bh->b_data;
- fe->i_links_count = old_dir->i_nlink;
+ fe->i_links_count = cpu_to_le16(old_dir->i_nlink);
status = ocfs2_journal_dirty(handle, old_dir_bh);
}
}
@@ -1711,8 +1713,9 @@
}
}
- status = ocfs2_add_entry(handle, dentry, inode, fe->i_blkno,
- parent_fe_bh, de_bh);
+ status = ocfs2_add_entry(handle, dentry, inode,
+ le64_to_cpu(fe->i_blkno), parent_fe_bh,
+ de_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -2183,8 +2186,8 @@
* underneath us... */
orphan_fe = (ocfs2_dinode *) orphan_dir_bh->b_data;
if (S_ISDIR(inode->i_mode))
- orphan_fe->i_links_count++;
- orphan_dir_inode->i_nlink = orphan_fe->i_links_count;
+ le16_add_cpu(&orphan_fe->i_links_count, 1);
+ orphan_dir_inode->i_nlink = le16_to_cpu(orphan_fe->i_links_count);
status = ocfs2_journal_dirty(handle, orphan_dir_bh);
if (status < 0) {
@@ -2201,7 +2204,7 @@
goto leave;
}
- fe->i_flags |= OCFS2_ORPHANED_FL;
+ le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL);
/* Record which orphan dir our inode now resides
* in. delete_inode will use this to determine which orphan
@@ -2275,8 +2278,8 @@
/* do the i_nlink dance! :) */
orphan_fe = (ocfs2_dinode *) orphan_dir_bh->b_data;
if (S_ISDIR(inode->i_mode))
- orphan_fe->i_links_count--;
- orphan_dir_inode->i_nlink = orphan_fe->i_links_count;
+ le16_add_cpu(&orphan_fe->i_links_count, -1);
+ orphan_dir_inode->i_nlink = le16_to_cpu(orphan_fe->i_links_count);
status = ocfs2_journal_dirty(handle, orphan_dir_bh);
if (status < 0) {
Modified: trunk/fs/ocfs2/ocfs2.h
===================================================================
--- trunk/fs/ocfs2/ocfs2.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/ocfs2.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -40,6 +40,7 @@
#include "dlm/dlmapi.h"
#include "ocfs2_fs.h"
+#include "endian.h"
#include "ocfs2_lockid.h"
struct ocfs2_extent_map {
Modified: trunk/fs/ocfs2/ocfs2_fs.h
===================================================================
--- trunk/fs/ocfs2/ocfs2_fs.h 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/ocfs2_fs.h 2005-08-12 23:35:01 UTC (rev 2514)
@@ -86,20 +86,9 @@
OCFS2_SB(sb)->s_feature_incompat &= ~(mask)
#define OCFS2_FEATURE_COMPAT_SUPP 0
+#define OCFS2_FEATURE_INCOMPAT_SUPP 0
#define OCFS2_FEATURE_RO_COMPAT_SUPP 0
-/* We're not big endian safe yet. But it has been decreed that the
- * unwashed zLinux masses must be appeased, lest they storm the castle
- * with rakes and pitchforks. Thus...
- */
-#ifdef CONFIG_ARCH_S390
-#define OCFS2_FEATURE_INCOMPAT_B0RKEN_ENDIAN 0x0001
-
-#define OCFS2_FEATURE_INCOMPAT_SUPP OCFS2_FEATURE_INCOMPAT_B0RKEN_ENDIAN
-#else
-#define OCFS2_FEATURE_INCOMPAT_SUPP 0
-#endif
-
/*
* Heartbeat-only devices are missing journals and other files. The
* filesystem driver can't load them, but the library can. Never put
@@ -243,29 +232,27 @@
* Convenience casts
*/
#define OCFS2_RAW_SB(dinode) (&((dinode)->id2.i_super))
-#define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab))
-
/*
* On disk extent record for OCFS2
* It describes a range of clusters on disk.
*/
typedef struct _ocfs2_extent_rec {
-/*00*/ __u32 e_cpos; /* Offset into the file, in clusters */
- __u32 e_clusters; /* Clusters covered by this extent */
- __u64 e_blkno; /* Physical disk offset, in blocks */
+/*00*/ __le32 e_cpos; /* Offset into the file, in clusters */
+ __le32 e_clusters; /* Clusters covered by this extent */
+ __le64 e_blkno; /* Physical disk offset, in blocks */
/*10*/
} ocfs2_extent_rec;
typedef struct _ocfs2_chain_rec {
- __u32 c_free; /* Number of free bits in this chain. */
- __u32 c_total; /* Number of total bits in this chain */
- __u64 c_blkno; /* Physical disk offset (blocks) of 1st group */
+ __le32 c_free; /* Number of free bits in this chain. */
+ __le32 c_total; /* Number of total bits in this chain */
+ __le64 c_blkno; /* Physical disk offset (blocks) of 1st group */
} ocfs2_chain_rec;
typedef struct _ocfs2_truncate_rec {
- __u32 t_start; /* 1st cluster in this log */
- __u32 t_clusters; /* Number of total clusters covered */
+ __le32 t_start; /* 1st cluster in this log */
+ __le32 t_clusters; /* Number of total clusters covered */
} ocfs2_truncate_rec;
/*
@@ -275,14 +262,14 @@
* ocfs2_extent_block.h_list, respectively.
*/
typedef struct _ocfs2_extent_list {
-/*00*/ __u16 l_tree_depth; /* Extent tree depth from this
+/*00*/ __le16 l_tree_depth; /* Extent tree depth from this
point. 0 means data extents
hang directly off this
header (a leaf) */
- __u16 l_count; /* Number of extent records */
- __u16 l_next_free_rec; /* Next unused extent slot */
- __u16 l_reserved1;
- __u64 l_reserved2; /* Pad to
+ __le16 l_count; /* Number of extent records */
+ __le16 l_next_free_rec; /* Next unused extent slot */
+ __le16 l_reserved1;
+ __le64 l_reserved2; /* Pad to
sizeof(ocfs2_extent_rec) */
/*10*/ ocfs2_extent_rec l_recs[0]; /* Extent records */
} ocfs2_extent_list;
@@ -293,11 +280,11 @@
* ocfs2_dinode.id2.i_chain.
*/
typedef struct _ocfs2_chain_list {
-/*00*/ __u16 cl_cpg; /* Clusters per Block Group */
- __u16 cl_bpc; /* Bits per cluster */
- __u16 cl_count; /* Total chains in this list */
- __u16 cl_next_free_rec; /* Next unused chain slot */
- __u64 cl_reserved1;
+/*00*/ __le16 cl_cpg; /* Clusters per Block Group */
+ __le16 cl_bpc; /* Bits per cluster */
+ __le16 cl_count; /* Total chains in this list */
+ __le16 cl_next_free_rec; /* Next unused chain slot */
+ __le64 cl_reserved1;
/*10*/ ocfs2_chain_rec cl_recs[0]; /* Chain records */
} ocfs2_chain_list;
@@ -307,9 +294,9 @@
* ocfs2_dinode.id2.i_dealloc.
*/
typedef struct _ocfs2_truncate_log {
-/*00*/ __u16 tl_count; /* Total records in this log */
- __u16 tl_used; /* Number of records in use */
- __u32 tl_reserved1;
+/*00*/ __le16 tl_count; /* Total records in this log */
+ __le16 tl_used; /* Number of records in use */
+ __le32 tl_reserved1;
/*08*/ ocfs2_truncate_rec tl_recs[0]; /* Truncate records */
} ocfs2_truncate_log;
@@ -319,15 +306,15 @@
typedef struct _ocfs2_extent_block
{
/*00*/ __u8 h_signature[8]; /* Signature for verification */
- __u64 h_reserved1;
-/*10*/ __s16 h_suballoc_slot; /* Slot suballocator this
+ __le64 h_reserved1;
+/*10*/ __le16 h_suballoc_slot; /* Slot suballocator this
extent_header belongs to */
- __u16 h_suballoc_bit; /* Bit offset in suballocator
+ __le16 h_suballoc_bit; /* Bit offset in suballocator
block group */
- __u32 h_fs_generation; /* Must match super block */
- __u64 h_blkno; /* Offset on disk, in blocks */
-/*20*/ __u64 h_reserved3;
- __u64 h_next_leaf_blk; /* Offset on disk, in blocks,
+ __le32 h_fs_generation; /* Must match super block */
+ __le64 h_blkno; /* Offset on disk, in blocks */
+/*20*/ __le64 h_reserved3;
+ __le64 h_next_leaf_blk; /* Offset on disk, in blocks,
of next leaf header pointing
to data */
/*30*/ ocfs2_extent_list h_list; /* Extent record list */
@@ -340,29 +327,29 @@
* are relative to the start of ocfs2_dinode.id2.
*/
typedef struct _ocfs2_super_block {
-/*00*/ __u16 s_major_rev_level;
- __u16 s_minor_rev_level;
- __u16 s_mnt_count;
- __s16 s_max_mnt_count;
- __u16 s_state; /* File system state */
- __u16 s_errors; /* Behaviour when detecting errors */
- __u32 s_checkinterval; /* Max time between checks */
-/*10*/ __u64 s_lastcheck; /* Time of last check */
- __u32 s_creator_os; /* OS */
- __u32 s_feature_compat; /* Compatible feature set */
-/*20*/ __u32 s_feature_incompat; /* Incompatible feature set */
- __u32 s_feature_ro_compat; /* Readonly-compatible feature set */
- __u64 s_root_blkno; /* Offset, in blocks, of root directory
+/*00*/ __le16 s_major_rev_level;
+ __le16 s_minor_rev_level;
+ __le16 s_mnt_count;
+ __le16 s_max_mnt_count;
+ __le16 s_state; /* File system state */
+ __le16 s_errors; /* Behaviour when detecting errors */
+ __le32 s_checkinterval; /* Max time between checks */
+/*10*/ __le64 s_lastcheck; /* Time of last check */
+ __le32 s_creator_os; /* OS */
+ __le32 s_feature_compat; /* Compatible feature set */
+/*20*/ __le32 s_feature_incompat; /* Incompatible feature set */
+ __le32 s_feature_ro_compat; /* Readonly-compatible feature set */
+ __le64 s_root_blkno; /* Offset, in blocks, of root directory
dinode */
-/*30*/ __u64 s_system_dir_blkno; /* Offset, in blocks, of system
+/*30*/ __le64 s_system_dir_blkno; /* Offset, in blocks, of system
directory dinode */
- __u32 s_blocksize_bits; /* Blocksize for this fs */
- __u32 s_clustersize_bits; /* Clustersize for this fs */
-/*40*/ __u16 s_max_slots; /* Max number of simultaneous mounts
+ __le32 s_blocksize_bits; /* Blocksize for this fs */
+ __le32 s_clustersize_bits; /* Clustersize for this fs */
+/*40*/ __le16 s_max_slots; /* Max number of simultaneous mounts
before tunefs required */
- __u16 s_reserved1;
- __u32 s_reserved2;
- __u64 s_first_cluster_group; /* Block offset of 1st cluster
+ __le16 s_reserved1;
+ __le32 s_reserved2;
+ __le64 s_first_cluster_group; /* Block offset of 1st cluster
* group header */
/*50*/ __u8 s_label[OCFS2_MAX_VOL_LABEL_LEN]; /* Label for mounting, etc. */
/*90*/ __u8 s_uuid[OCFS2_VOL_UUID_LEN]; /* 128-bit uuid */
@@ -376,11 +363,11 @@
*/
typedef struct _ocfs2_local_alloc
{
-/*00*/ __u32 la_bm_off; /* Starting bit offset in main bitmap */
- __u16 la_size; /* Size of included bitmap, in bytes */
- __u16 la_reserved1;
- __u64 la_reserved2;
-/*10*/ __u8 la_bitmap[0];
+/*00*/ __le32 la_bm_off; /* Starting bit offset in main bitmap */
+ __le16 la_size; /* Size of included bitmap, in bytes */
+ __le16 la_reserved1;
+ __le64 la_reserved2;
+/*10*/ __u8 la_bitmap[0];
} ocfs2_local_alloc;
/*
@@ -388,47 +375,47 @@
*/
typedef struct _ocfs2_dinode {
/*00*/ __u8 i_signature[8]; /* Signature for validation */
- __u32 i_generation; /* Generation number */
- __s16 i_suballoc_slot; /* Slot suballocator this inode
+ __le32 i_generation; /* Generation number */
+ __le16 i_suballoc_slot; /* Slot suballocator this inode
belongs to */
- __u16 i_suballoc_bit; /* Bit offset in suballocator
+ __le16 i_suballoc_bit; /* Bit offset in suballocator
block group */
-/*10*/ __u32 i_reserved0;
- __u32 i_clusters; /* Cluster count */
- __u32 i_uid; /* Owner UID */
- __u32 i_gid; /* Owning GID */
-/*20*/ __u64 i_size; /* Size in bytes */
- __u16 i_mode; /* File mode */
- __u16 i_links_count; /* Links count */
- __u32 i_flags; /* File flags */
-/*30*/ __u64 i_atime; /* Access time */
- __u64 i_ctime; /* Creation time */
-/*40*/ __u64 i_mtime; /* Modification time */
- __u64 i_dtime; /* Deletion time */
-/*50*/ __u64 i_blkno; /* Offset on disk, in blocks */
- __u64 i_last_eb_blk; /* Pointer to last extent
+/*10*/ __le32 i_reserved0;
+ __le32 i_clusters; /* Cluster count */
+ __le32 i_uid; /* Owner UID */
+ __le32 i_gid; /* Owning GID */
+/*20*/ __le64 i_size; /* Size in bytes */
+ __le16 i_mode; /* File mode */
+ __le16 i_links_count; /* Links count */
+ __le32 i_flags; /* File flags */
+/*30*/ __le64 i_atime; /* Access time */
+ __le64 i_ctime; /* Creation time */
+/*40*/ __le64 i_mtime; /* Modification time */
+ __le64 i_dtime; /* Deletion time */
+/*50*/ __le64 i_blkno; /* Offset on disk, in blocks */
+ __le64 i_last_eb_blk; /* Pointer to last extent
block */
-/*60*/ __u32 i_fs_generation; /* Generation per fs-instance */
- __u32 i_atime_nsec;
- __u32 i_ctime_nsec;
- __u32 i_mtime_nsec;
-/*70*/ __u64 i_reserved1[9];
+/*60*/ __le32 i_fs_generation; /* Generation per fs-instance */
+ __le32 i_atime_nsec;
+ __le32 i_ctime_nsec;
+ __le32 i_mtime_nsec;
+/*70*/ __le64 i_reserved1[9];
/*B8*/ union {
- __u64 i_pad1; /* Generic way to refer to this
+ __le64 i_pad1; /* Generic way to refer to this
64bit union */
struct {
- __u64 i_rdev; /* Device number */
+ __le64 i_rdev; /* Device number */
} dev1;
struct { /* Info for bitmap system
inodes */
- __u32 i_used; /* Bits (ie, clusters) used */
- __u32 i_total; /* Total bits (clusters)
+ __le32 i_used; /* Bits (ie, clusters) used */
+ __le32 i_total; /* Total bits (clusters)
available */
} bitmap1;
struct { /* Info for journal system
inodes */
- __u32 ij_flags; /* Mounted, version, etc. */
- __u32 ij_pad;
+ __le32 ij_flags; /* Mounted, version, etc. */
+ __le32 ij_pad;
} journal1;
} id1; /* Inode type dependant 1 */
/*C0*/ union {
@@ -448,8 +435,8 @@
* Packed as this structure could be accessed unaligned on 64-bit platforms
*/
struct ocfs2_dir_entry {
-/*00*/ __u64 inode; /* Inode number */
- __u16 rec_len; /* Directory entry length */
+/*00*/ __le64 inode; /* Inode number */
+ __le16 rec_len; /* Directory entry length */
__u8 name_len; /* Name length */
__u8 file_type;
/*0C*/ char name[OCFS2_MAX_FILENAME_LEN]; /* File name */
@@ -462,20 +449,20 @@
typedef struct _ocfs2_group_desc
{
/*00*/ __u8 bg_signature[8]; /* Signature for validation */
- __u16 bg_size; /* Size of included bitmap in
+ __le16 bg_size; /* Size of included bitmap in
bytes. */
- __u16 bg_bits; /* Bits represented by this
+ __le16 bg_bits; /* Bits represented by this
group. */
- __u16 bg_free_bits_count; /* Free bits count */
- __u16 bg_chain; /* What chain I am in. */
-/*10*/ __u32 bg_generation;
- __u32 bg_reserved1;
- __u64 bg_next_group; /* Next group in my list, in
+ __le16 bg_free_bits_count; /* Free bits count */
+ __le16 bg_chain; /* What chain I am in. */
+/*10*/ __le32 bg_generation;
+ __le32 bg_reserved1;
+ __le64 bg_next_group; /* Next group in my list, in
blocks */
-/*20*/ __u64 bg_parent_dinode; /* dinode which owns me, in
+/*20*/ __le64 bg_parent_dinode; /* dinode which owns me, in
blocks */
- __u64 bg_blkno; /* Offset on disk, in blocks */
-/*30*/ __u64 bg_reserved2[2];
+ __le64 bg_blkno; /* Offset on disk, in blocks */
+/*30*/ __le64 bg_reserved2[2];
/*40*/ __u8 bg_bitmap[0];
} ocfs2_group_desc;
@@ -506,7 +493,7 @@
return size / sizeof(struct _ocfs2_chain_rec);
}
-static inline int ocfs2_extent_recs_per_eb(struct super_block *sb)
+static inline u16 ocfs2_extent_recs_per_eb(struct super_block *sb)
{
int size;
@@ -516,9 +503,9 @@
return size / sizeof(struct _ocfs2_extent_rec);
}
-static inline int ocfs2_local_alloc_size(struct super_block *sb)
+static inline u16 ocfs2_local_alloc_size(struct super_block *sb)
{
- int size;
+ u16 size;
size = sb->s_blocksize -
offsetof(struct _ocfs2_dinode, id2.i_lab.la_bitmap);
Modified: trunk/fs/ocfs2/slot_map.c
===================================================================
--- trunk/fs/ocfs2/slot_map.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/slot_map.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -71,12 +71,12 @@
void ocfs2_update_slot_info(struct ocfs2_slot_info *si)
{
int i;
- s16 *disk_info;
+ __le16 *disk_info;
/* we don't read the slot block here as ocfs2_super_lock
* should've made sure we have the most recent copy. */
spin_lock(&si->si_lock);
- disk_info = (s16 *) si->si_bh->b_data;
+ disk_info = (__le16 *) si->si_bh->b_data;
for (i = 0; i < si->si_size; i++)
si->si_global_node_nums[i] = le16_to_cpu(disk_info[i]);
@@ -90,7 +90,7 @@
struct ocfs2_slot_info *si)
{
int status, i;
- s16 *disk_info = (s16 *) si->si_bh->b_data;
+ __le16 *disk_info = (__le16 *) si->si_bh->b_data;
spin_lock(&si->si_lock);
for (i = 0; i < si->si_size; i++)
@@ -188,7 +188,8 @@
for(i = 0; i < si->si_num_slots; i++)
si->si_global_node_nums[i] = OCFS2_INVALID_SLOT;
- inode = ocfs2_get_system_file_inode(osb, SLOT_MAP_SYSTEM_INODE, -1);
+ inode = ocfs2_get_system_file_inode(osb, SLOT_MAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
if (!inode) {
status = -EINVAL;
mlog_errno(status);
Modified: trunk/fs/ocfs2/suballoc.c
===================================================================
--- trunk/fs/ocfs2/suballoc.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/suballoc.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -137,6 +137,11 @@
kfree(ac);
}
+static u32 ocfs2_bits_per_group(ocfs2_chain_list *cl)
+{
+ return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
+}
+
static int ocfs2_block_group_fill(ocfs2_journal_handle *handle,
struct inode *alloc_inode,
struct buffer_head *bg_bh,
@@ -164,15 +169,15 @@
memset(bg, 0, sb->s_blocksize);
strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
- bg->bg_size = ocfs2_group_bitmap_size(sb);
- bg->bg_bits = (u32) cl->cl_cpg * (u32) cl->cl_bpc;
- bg->bg_chain = my_chain;
+ bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb));
+ bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
+ bg->bg_chain = cpu_to_le16(my_chain);
bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
- bg->bg_parent_dinode = OCFS2_I(alloc_inode)->ip_blkno;
- bg->bg_blkno = group_blkno;
+ bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
+ bg->bg_blkno = cpu_to_le64(group_blkno);
/* set the 1st bit in the bitmap to account for the descriptor block */
ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
- bg->bg_free_bits_count = bg->bg_bits - 1;
+ bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
status = ocfs2_journal_dirty(handle, bg_bh);
if (status < 0)
@@ -193,8 +198,9 @@
u16 curr, best;
best = curr = 0;
- while (curr < cl->cl_count) {
- if (cl->cl_recs[best].c_total > cl->cl_recs[curr].c_total)
+ while (curr < le16_to_cpu(cl->cl_count)) {
+ if (le32_to_cpu(cl->cl_recs[best].c_total) >
+ le32_to_cpu(cl->cl_recs[curr].c_total))
best = curr;
curr++;
}
@@ -213,7 +219,7 @@
ocfs2_chain_list *cl;
ocfs2_alloc_context *ac = NULL;
ocfs2_journal_handle *handle = NULL;
- u32 bit_off, num_bits, tmp;
+ u32 bit_off, num_bits;
u16 alloc_rec;
u64 bg_blkno;
struct buffer_head *bg_bh = NULL;
@@ -233,7 +239,7 @@
cl = &fe->id2.i_chain;
status = ocfs2_reserve_clusters(osb,
handle,
- cl->cl_cpg,
+ le16_to_cpu(cl->cl_cpg),
&ac);
if (status < 0) {
if (status != -ENOSPC)
@@ -241,7 +247,8 @@
goto bail;
}
- credits = ocfs2_calc_group_alloc_credits(osb->sb, cl->cl_cpg);
+ credits = ocfs2_calc_group_alloc_credits(osb->sb,
+ le16_to_cpu(cl->cl_cpg));
handle = ocfs2_start_trans(osb, handle, credits);
if (!handle) {
status = -ENOMEM;
@@ -252,7 +259,7 @@
status = ocfs2_claim_clusters(osb,
handle,
ac,
- cl->cl_cpg,
+ le16_to_cpu(cl->cl_cpg),
&bit_off,
&num_bits);
if (status < 0) {
@@ -296,22 +303,18 @@
goto bail;
}
- cl->cl_recs[alloc_rec].c_free += bg->bg_free_bits_count;
- cl->cl_recs[alloc_rec].c_total += bg->bg_bits;
- cl->cl_recs[alloc_rec].c_blkno = bg_blkno;
- if (cl->cl_next_free_rec < cl->cl_count)
- cl->cl_next_free_rec++;
+ le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
+ le16_to_cpu(bg->bg_free_bits_count));
+ le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
+ cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg_blkno);
+ if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
+ le16_add_cpu(&cl->cl_next_free_rec, 1);
- tmp = le32_to_cpu(fe->id1.bitmap1.i_used);
- tmp += bg->bg_bits - bg->bg_free_bits_count;
- fe->id1.bitmap1.i_used = cpu_to_le32(tmp);
+ le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
+ le16_to_cpu(bg->bg_free_bits_count));
+ le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
+ le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
- tmp = le32_to_cpu(fe->id1.bitmap1.i_total);
- tmp += bg->bg_bits;
- fe->id1.bitmap1.i_total = cpu_to_le32(tmp);
-
- fe->i_clusters += cl->cl_cpg;
-
status = ocfs2_journal_dirty(handle, bh);
if (status < 0) {
mlog_errno(status);
@@ -319,11 +322,11 @@
}
spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
- OCFS2_I(alloc_inode)->ip_clusters = fe->i_clusters;
- fe->i_size = ocfs2_clusters_to_bytes(alloc_inode->i_sb,
- fe->i_clusters);
+ OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
+ fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
+ le32_to_cpu(fe->i_clusters)));
spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
- i_size_write(alloc_inode, fe->i_size);
+ i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
alloc_inode->i_blocks =
ocfs2_align_bytes_to_sectors(i_size_read(alloc_inode));
@@ -366,7 +369,7 @@
fe = (ocfs2_dinode *) bh->b_data;
OCFS2_BUG_ON_INVALID_DINODE(fe);
- OCFS2_BUG_ON_RO(!(fe->i_flags & OCFS2_CHAIN_FL));
+ OCFS2_BUG_ON_RO(!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL)));
free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
le32_to_cpu(fe->id1.bitmap1.i_used);
@@ -522,7 +525,7 @@
ac->ac_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
- -1);
+ OCFS2_INVALID_SLOT);
if (!ac->ac_inode) {
status = -EINVAL;
mlog(ML_ERROR, "Could not get bitmap inode!\n");
@@ -656,9 +659,9 @@
bitmap = bg->bg_bitmap;
while((offset = ocfs2_find_next_zero_bit(bitmap,
- bg->bg_bits,
+ le16_to_cpu(bg->bg_bits),
start)) != -1) {
- if (offset == bg->bg_bits)
+ if (offset == le16_to_cpu(bg->bg_bits))
break;
if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
@@ -718,7 +721,7 @@
mlog_entry_void();
OCFS2_BUG_ON_INVALID_GROUP_DESC(bg);
- BUG_ON(bg->bg_free_bits_count < num_bits);
+ BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
num_bits);
@@ -735,7 +738,7 @@
goto bail;
}
- bg->bg_free_bits_count -= num_bits;
+ le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
while(num_bits--)
ocfs2_set_bit(bit_off++, bitmap);
@@ -760,13 +763,14 @@
BUG_ON(!cl->cl_next_free_rec);
best = curr = 0;
- while (curr < cl->cl_next_free_rec) {
- if (cl->cl_recs[curr].c_free > cl->cl_recs[best].c_free)
+ while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
+ if (le32_to_cpu(cl->cl_recs[curr].c_free) >
+ le32_to_cpu(cl->cl_recs[best].c_free))
best = curr;
curr++;
}
- BUG_ON(best >= cl->cl_next_free_rec);
+ BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
return best;
}
@@ -793,9 +797,9 @@
"top, prev = %"MLFu64"\n",
fe->i_blkno, chain, bg->bg_blkno, prev_bg->bg_blkno);
- fe_ptr = fe->id2.i_chain.cl_recs[chain].c_blkno;
- bg_ptr = bg->bg_next_group;
- prev_bg_ptr = prev_bg->bg_next_group;
+ fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
+ bg_ptr = le64_to_cpu(bg->bg_next_group);
+ prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
status = ocfs2_journal_access(handle, alloc_inode, prev_bg_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
@@ -845,9 +849,9 @@
status = 0;
bail:
if (status < 0) {
- fe->id2.i_chain.cl_recs[chain].c_blkno = fe_ptr;
- bg->bg_next_group = bg_ptr;
- prev_bg->bg_next_group = prev_bg_ptr;
+ fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr);
+ bg->bg_next_group = cpu_to_le64(bg_ptr);
+ prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
}
mlog_exit(status);
@@ -857,7 +861,7 @@
static inline int ocfs2_block_group_reasonably_empty(ocfs2_group_desc *bg,
u32 wanted)
{
- return bg->bg_free_bits_count > wanted;
+ return le16_to_cpu(bg->bg_free_bits_count) > wanted;
}
/* return 0 on success, -ENOSPC to keep searching and any other < 0
@@ -933,8 +937,8 @@
bits_wanted, chain, OCFS2_I(alloc_inode)->ip_blkno);
status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
- cl->cl_recs[chain].c_blkno, &group_bh,
- OCFS2_BH_CACHED, alloc_inode);
+ le64_to_cpu(cl->cl_recs[chain].c_blkno),
+ &group_bh, OCFS2_BH_CACHED, alloc_inode);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -955,7 +959,7 @@
brelse(prev_group_bh);
prev_group_bh = NULL;
}
- next_group = bg->bg_next_group;
+ next_group = le64_to_cpu(bg->bg_next_group);
prev_group_bh = group_bh;
group_bh = NULL;
status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
@@ -1019,7 +1023,7 @@
tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
- cl->cl_recs[chain].c_free -= *num_bits;
+ le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));
status = ocfs2_journal_dirty(handle,
ac->ac_bh);
@@ -1042,7 +1046,7 @@
mlog(0, "Allocated %u bits from suballocator %"MLFu64"\n",
*num_bits, fe->i_blkno);
- *bg_blkno = bg->bg_blkno;
+ *bg_blkno = le64_to_cpu(bg->bg_blkno);
bail:
if (group_bh)
brelse(group_bh);
@@ -1101,7 +1105,7 @@
* because we only calculate enough journal credits for one
* relink per alloc. */
ac->ac_allow_chain_relink = 0;
- for (i = 0; i < cl->cl_next_free_rec; i ++) {
+ for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
if (i == victim)
continue;
if (!cl->cl_recs[i].c_free)
@@ -1374,7 +1378,7 @@
ocfs2_set_bit(bit_off + tmp,
(unsigned long *) undo_bg->bg_bitmap);
}
- bg->bg_free_bits_count += num_bits;
+ le16_add_cpu(&bg->bg_free_bits_count, num_bits);
status = ocfs2_journal_dirty(handle, group_bh);
if (status < 0)
@@ -1404,8 +1408,7 @@
mlog_entry_void();
OCFS2_BUG_ON_INVALID_DINODE(fe);
- BUG_ON((count + start_bit) >
- ((u32) cl->cl_cpg * (u32) cl->cl_bpc));
+ BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
mlog(0, "suballocator %"MLFu64": freeing %u bits from group %"MLFu64
", starting at %u\n",
@@ -1421,7 +1424,7 @@
group = (ocfs2_group_desc *) group_bh->b_data;
OCFS2_BUG_ON_INVALID_GROUP_DESC(group);
- BUG_ON((count + start_bit) > group->bg_bits);
+ BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
status = ocfs2_block_group_clear_bits(handle, alloc_inode,
group, group_bh,
@@ -1438,7 +1441,8 @@
goto bail;
}
- cl->cl_recs[group->bg_chain].c_free += count;
+ le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
+ count);
tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
@@ -1468,7 +1472,7 @@
struct buffer_head *inode_alloc_bh,
ocfs2_dinode *di)
{
- u64 blk = di->i_blkno;
+ u64 blk = le64_to_cpu(di->i_blkno);
u16 bit = le16_to_cpu(di->i_suballoc_bit);
u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
Modified: trunk/fs/ocfs2/super.c
===================================================================
--- trunk/fs/ocfs2/super.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/super.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -37,6 +37,7 @@
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/parser.h>
+#include <linux/crc32.h>
#include <cluster/nodemanager.h>
@@ -619,7 +620,7 @@
inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
- -1);
+ OCFS2_INVALID_SLOT);
if (!inode) {
mlog(ML_ERROR, "failed to get bitmap inode\n");
status = -EIO;
@@ -1187,14 +1188,6 @@
osb->s_feature_incompat =
le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_incompat);
-#ifdef CONFIG_ARCH_S390
- if (!OCFS2_HAS_INCOMPAT_FEATURE(osb->sb, OCFS2_FEATURE_INCOMPAT_B0RKEN_ENDIAN)) {
- mlog(ML_ERROR, "couldn't mount because of endian mismatch\n");
- status = -EINVAL;
- goto bail;
- }
-#endif
-
if ((i = OCFS2_HAS_INCOMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_INCOMPAT_SUPP))) {
mlog(ML_ERROR, "couldn't mount because of unsupported "
"optional features (%x).\n", i);
@@ -1252,7 +1245,7 @@
goto bail;
}
- if (ocfs2_clusters_to_blocks(osb->sb, di->i_clusters - 1)
+ if (ocfs2_clusters_to_blocks(osb->sb, le32_to_cpu(di->i_clusters) - 1)
> (u32)~0UL) {
mlog(ML_ERROR, "Volume might try to write to blocks beyond "
"what jbd can address in 32 bits.\n");
@@ -1266,8 +1259,10 @@
status = -ENOMEM;
goto bail;
}
- memcpy(&osb->net_key, &osb->uuid[i], sizeof(osb->net_key));
+ /* only checksuming the first 32 bits to differentiate from dlm */
+ osb->net_key = crc32_be(0, osb->uuid_str, sizeof(osb->net_key));
+
strncpy(osb->vol_label, di->id2.i_super.s_label, 63);
osb->vol_label[63] = '\0';
osb->root_blkno = le64_to_cpu(di->id2.i_super.s_root_blkno);
@@ -1292,8 +1287,8 @@
/*
* global bitmap
*/
- inode = ocfs2_get_system_file_inode(osb,
- GLOBAL_BITMAP_SYSTEM_INODE, -1);
+ inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
if (!inode) {
status = -EINVAL;
mlog_errno(status);
@@ -1311,7 +1306,7 @@
}
di = (ocfs2_dinode *) bitmap_bh->b_data;
- osb->bitmap_cpg = di->id2.i_chain.cl_cpg;
+ osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
osb->num_clusters = le32_to_cpu(di->id1.bitmap1.i_total);
brelse(bitmap_bh);
mlog(0, "cluster bitmap inode: %"MLFu64", clusters per group: %u\n",
@@ -1370,7 +1365,7 @@
le16_to_cpu(di->id2.i_super.s_minor_rev_level),
OCFS2_MAJOR_REV_LEVEL,
OCFS2_MINOR_REV_LEVEL);
- } else if (bh->b_blocknr != di->i_blkno) {
+ } else if (bh->b_blocknr != le64_to_cpu(di->i_blkno)) {
mlog(ML_ERROR, "bad block number on superblock: "
"found %"MLFu64", should be %llu\n",
di->i_blkno, (unsigned long long)bh->b_blocknr);
Modified: trunk/fs/ocfs2/vote.c
===================================================================
--- trunk/fs/ocfs2/vote.c 2005-08-12 22:33:39 UTC (rev 2513)
+++ trunk/fs/ocfs2/vote.c 2005-08-12 23:35:01 UTC (rev 2514)
@@ -55,12 +55,12 @@
#define OCFS2_MESSAGE_TYPE_RESPONSE (0x2)
struct ocfs2_msg_hdr
{
- u32 h_response_id; /* used to lookup message handle on sending
+ __be32 h_response_id; /* used to lookup message handle on sending
* node. */
- u32 h_request;
- u64 h_blkno;
- u32 h_generation;
- u32 h_node_num; /* node sending this particular message. */
+ __be32 h_request;
+ __be64 h_blkno;
+ __be32 h_generation;
+ __be32 h_node_num; /* node sending this particular message. */
};
/* OCFS2_MAX_FILENAME_LEN is 255 characters, but we want to align this
@@ -70,12 +70,12 @@
{
struct ocfs2_msg_hdr v_hdr;
union {
- u32 v_generic1;
- s32 v_orphaned_slot; /* Used during delete votes */
- u32 v_nlink; /* Used during unlink votes */
+ __be32 v_generic1;
+ __be32 v_orphaned_slot; /* Used during delete votes */
+ __be32 v_nlink; /* Used during unlink votes */
} md1; /* Message type dependant 1 */
- u32 v_unlink_namelen;
- u64 v_unlink_parent;
+ __be32 v_unlink_namelen;
+ __be64 v_unlink_parent;
u8 v_unlink_dirent[OCFS2_VOTE_FILENAME_LEN];
};
@@ -88,8 +88,8 @@
struct ocfs2_response_msg
{
struct ocfs2_msg_hdr r_hdr;
- s32 r_response;
- s32 r_orphaned_slot;
+ __be32 r_response;
+ __be32 r_orphaned_slot;
};
struct ocfs2_vote_work {
@@ -378,16 +378,16 @@
struct ocfs2_response_msg response;
/* decode the network mumbo jumbo into local variables. */
- request = ntohl(hdr->h_request);
+ request = be32_to_cpu(hdr->h_request);
blkno = be64_to_cpu(hdr->h_blkno);
- generation = ntohl(hdr->h_generation);
- node_num = ntohl(hdr->h_node_num);
+ generation = be32_to_cpu(hdr->h_generation);
+ node_num = be32_to_cpu(hdr->h_node_num);
if (request == OCFS2_VOTE_REQ_DELETE)
- orphaned_slot = ntohl(msg->md1.v_orphaned_slot);
+ orphaned_slot = be32_to_cpu(msg->md1.v_orphaned_slot);
mlog(0, "processing vote: request = %u, blkno = %"MLFu64", "
"generation = %u, node_num = %u, priv1 = %u\n", request,
- blkno, generation, node_num, ntohl(msg->md1.v_generic1));
+ blkno, generation, node_num, be32_to_cpu(msg->md1.v_generic1));
if (!ocfs2_is_valid_vote_request(request)) {
mlog(ML_ERROR, "Invalid vote request %d from node %u\n",
@@ -454,9 +454,9 @@
/* fall through */
case OCFS2_VOTE_REQ_UNLINK:
parent_blkno = be64_to_cpu(msg->v_unlink_parent);
- namelen = ntohl(msg->v_unlink_namelen);
+ namelen = be32_to_cpu(msg->v_unlink_namelen);
/* new_nlink will be ignored in case of a rename vote */
- new_nlink = ntohl(msg->md1.v_nlink);
+ new_nlink = be32_to_cpu(msg->md1.v_nlink);
ocfs2_process_dentry_request(inode, rename, new_nlink,
parent_blkno, namelen,
msg->v_unlink_dirent);
@@ -474,9 +474,9 @@
response.r_hdr.h_response_id = hdr->h_response_id;
response.r_hdr.h_blkno = hdr->h_blkno;
response.r_hdr.h_generation = hdr->h_generation;
- response.r_hdr.h_node_num = htonl(osb->node_num);
- response.r_response = htonl(vote_response);
- response.r_orphaned_slot = htonl(orphaned_slot);
+ response.r_hdr.h_node_num = cpu_to_be32(osb->node_num);
+ response.r_response = cpu_to_be32(vote_response);
+ response.r_orphaned_slot = cpu_to_be32(orphaned_slot);
net_status = o2net_send_message(OCFS2_MESSAGE_TYPE_RESPONSE,
osb->net_key,
@@ -766,12 +766,12 @@
mlog_errno(-ENOMEM);
} else {
hdr = &request->v_hdr;
- hdr->h_node_num = htonl((unsigned int) osb->node_num);
- hdr->h_request = htonl(type);
+ hdr->h_node_num = cpu_to_be32(osb->node_num);
+ hdr->h_request = cpu_to_be32(type);
hdr->h_blkno = cpu_to_be64(blkno);
- hdr->h_generation = htonl(generation);
+ hdr->h_generation = cpu_to_be32(generation);
- request->md1.v_generic1 = htonl(priv);
+ request->md1.v_generic1 = cpu_to_be32(priv);
}
return request;
@@ -790,7 +790,7 @@
response_id = ocfs2_new_response_id(osb);
hdr = &request->v_hdr;
- hdr->h_response_id = htonl(response_id);
+ hdr->h_response_id = cpu_to_be32(response_id);
status = ocfs2_broadcast_vote(osb, request, response_id, &response,
callback);
@@ -842,8 +842,8 @@
int orphaned_slot, node;
struct inode *inode = priv;
- orphaned_slot = ntohl(resp->r_orphaned_slot);
- node = ntohl(resp->r_hdr.h_node_num);
+ orphaned_slot = be32_to_cpu(resp->r_orphaned_slot);
+ node = be32_to_cpu(resp->r_hdr.h_node_num);
mlog(0, "node %d tells us that inode %"MLFu64" is orphaned in slot "
"%d\n", node, OCFS2_I(inode)->ip_blkno, orphaned_slot);
@@ -861,7 +861,8 @@
OCFS2_I(inode)->ip_orphaned_slot
!= OCFS2_INVALID_SLOT, "Inode %"MLFu64": Node %d "
"says it's orphaned in slot %d, we think it's in %d\n",
- OCFS2_I(inode)->ip_blkno, ntohl(resp->r_hdr.h_node_num),
+ OCFS2_I(inode)->ip_blkno,
+ be32_to_cpu(resp->r_hdr.h_node_num),
orphaned_slot, OCFS2_I(inode)->ip_orphaned_slot);
OCFS2_I(inode)->ip_orphaned_slot = orphaned_slot;
@@ -913,7 +914,7 @@
dentry->d_name.name);
request->v_unlink_parent = cpu_to_be64(OCFS2_I(parent)->ip_blkno);
- request->v_unlink_namelen = htonl(dentry->d_name.len);
+ request->v_unlink_namelen = cpu_to_be32(dentry->d_name.len);
memcpy(request->v_unlink_dirent, dentry->d_name.name,
dentry->d_name.len);
}
@@ -1086,15 +1087,16 @@
resp = (struct ocfs2_response_msg *) msg->buf;
- response_id = ntohl(resp->r_hdr.h_response_id);
- node_num = ntohl(resp->r_hdr.h_node_num);
- response_status = ocfs2_translate_response(ntohl(resp->r_response));
+ response_id = be32_to_cpu(resp->r_hdr.h_response_id);
+ node_num = be32_to_cpu(resp->r_hdr.h_node_num);
+ response_status =
+ ocfs2_translate_response(be32_to_cpu(resp->r_response));
mlog(0, "received response message:\n");
mlog(0, "h_response_id = %u\n", response_id);
- mlog(0, "h_request = %u\n", ntohl(resp->r_hdr.h_request));
+ mlog(0, "h_request = %u\n", be32_to_cpu(resp->r_hdr.h_request));
mlog(0, "h_blkno = %"MLFu64"\n", be64_to_cpu(resp->r_hdr.h_blkno));
- mlog(0, "h_generation = %u\n", ntohl(resp->r_hdr.h_generation));
+ mlog(0, "h_generation = %u\n", be32_to_cpu(resp->r_hdr.h_generation));
mlog(0, "h_node_num = %u\n", node_num);
mlog(0, "r_response = %d\n", response_status);
@@ -1147,13 +1149,15 @@
mlog(0, "scheduling vote request:\n");
mlog(0, "h_response_id = %u\n",
- ntohl(work->w_msg.v_hdr.h_response_id));
- mlog(0, "h_request = %u\n", ntohl(work->w_msg.v_hdr.h_request));
+ be32_to_cpu(work->w_msg.v_hdr.h_response_id));
+ mlog(0, "h_request = %u\n", be32_to_cpu(work->w_msg.v_hdr.h_request));
mlog(0, "h_blkno = %"MLFu64"\n",
be64_to_cpu(work->w_msg.v_hdr.h_blkno));
- mlog(0, "h_generation = %u\n", ntohl(work->w_msg.v_hdr.h_generation));
- mlog(0, "h_node_num = %u\n", ntohl(work->w_msg.v_hdr.h_node_num));
- mlog(0, "v_generic1 = %u\n", ntohl(work->w_msg.md1.v_generic1));
+ mlog(0, "h_generation = %u\n",
+ be32_to_cpu(work->w_msg.v_hdr.h_generation));
+ mlog(0, "h_node_num = %u\n",
+ be32_to_cpu(work->w_msg.v_hdr.h_node_num));
+ mlog(0, "v_generic1 = %u\n", be32_to_cpu(work->w_msg.md1.v_generic1));
spin_lock(&osb->vote_task_lock);
list_add_tail(&work->w_list, &osb->vote_list);
Copied: trunk/kapi-compat/include/sparse_endian_types.h (from rev 2512, branches/endian-safe/kapi-compat/include/sparse_endian_types.h)
More information about the Ocfs2-commits
mailing list