Index: arch/ia64/Makefile.inc
===================================================================
--- arch/ia64/Makefile.inc	(revision 0d8d27c2e6791788594b9fe24fe1c14930fc1bc7)
+++ arch/ia64/Makefile.inc	(revision 36b01bb280f8f00e1705fde42fc2edfaebfbe5ae)
@@ -65,5 +65,7 @@
 	arch/$(ARCH)/src/ivt.S \
 	arch/$(ARCH)/src/interrupt.c \
+	arch/$(ARCH)/src/mm/asid.c \
 	arch/$(ARCH)/src/mm/frame.c \
 	arch/$(ARCH)/src/mm/page.c \
+	arch/$(ARCH)/src/mm/tlb.c \
 	arch/$(ARCH)/src/drivers/it.c
Index: arch/ia64/include/mm/asid.h
===================================================================
--- arch/ia64/include/mm/asid.h	(revision 0d8d27c2e6791788594b9fe24fe1c14930fc1bc7)
+++ arch/ia64/include/mm/asid.h	(revision 36b01bb280f8f00e1705fde42fc2edfaebfbe5ae)
@@ -34,11 +34,20 @@
 typedef __u32 asid_t;
 
-/*
- * ASID_MAX can range from 2^18 - 1 to 2^24 - 1,
- * depending on architecture implementation.
+/** Number of ia64 RIDs (Region Identifiers) per kernel ASID. */
+#define RIDS_PER_ASID		7
+#define RID_OVERFLOW		16777216	/* 2^24 */
+
+/**
+ * The point is to have ASID_MAX_ARCH big enough
+ * so that it is never reached and the ASID allocation
+ * mechanism in asid_get() never resorts to stealing.
  */
-#define ASID_MAX_ARCH	16777215	/* 2^24 - 1 */
+#define ASID_MAX_ARCH		((asid_t) -1)	/**< This value is never reached. */
 
-#define asid_find_free()	ASID_MAX_ARCH
+/**
+ * Value used to recognize the situation when all ASIDs were already allocated.
+ */
+#define ASID_OVERFLOW		(RID_OVERFLOW/RIDS_PER_ASID)
+
 #define asid_put_arch(x)
 
Index: arch/ia64/src/mm/asid.c
===================================================================
--- arch/ia64/src/mm/asid.c	(revision 36b01bb280f8f00e1705fde42fc2edfaebfbe5ae)
+++ arch/ia64/src/mm/asid.c	(revision 36b01bb280f8f00e1705fde42fc2edfaebfbe5ae)
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ASID management.
+ *
+ * Because ia64 has much wider ASIDs (18-24 bits) compared to other
+ * architectures (e.g. 8 bits on mips32 and 12 bits on sparc32), it is
+ * inappropriate to use same methods (i.e. genarch/mm/asid_fifo.c) for
+ * all of them.
+ *
+ * Instead, ia64 assigns ASID values from a counter that eventually
+ * overflows. When this happens, the counter is reset and all TLBs are
+ * entirely invalidated. Furthermore, all address space structures,
+ * except for the one with asid == ASID_KERNEL, are assigned new ASID.
+ *
+ * It is important to understand that, in SPARTAN, one ASID represents
+ * RIDS_PER_ASID consecutive hardware RIDs (Region ID's).
+ *
+ * Note that the algorithm used can handle only the maximum of
+ * ASID_OVERFLOW-ASID_START address spaces at a time.
+ */
+
+#include <arch/mm/asid.h>
+#include <mm/asid.h>
+#include <mm/as.h>
+#include <mm/tlb.h>
+#include <list.h>
+#include <typedefs.h>
+#include <debug.h>
+
+/**
+ * Stores the ASID to be returned next.
+ * Must be only accessed when asidlock is held.
+ */
+static asid_t next_asid = ASID_START;
+
+/** Assign next ASID.
+ *
+ * On ia64, this function is used only to allocate ASID
+ * for a newly created address space. As a side effect,
+ * it might attempt to shootdown TLBs and reassign
+ * ASIDs to existing address spaces.
+ *
+ * When calling this function, interrupts must be disabled
+ * and the asidlock must be held.
+ *
+ * @return ASID for new address space.
+ */
+asid_t asid_find_free(void)
+{
+	as_t *as;
+	link_t *cur;
+
+	if (next_asid == ASID_OVERFLOW) {
+		/*
+		 * The counter has overflown.
+		 */
+		 
+		/*
+		 * Reset the counter.
+		 */
+		next_asid = ASID_START;
+
+		/*
+		 * Initiate TLB shootdown.
+		 */
+		tlb_shootdown_start(TLB_INVL_ALL, 0, 0, 0);
+
+		/*
+		 * Reassign ASIDs to existing address spaces.
+		 */
+		for (cur = as_with_asid_head.next; cur != &as_with_asid_head; cur = cur->next) {
+			ASSERT(next_asid < ASID_OVERFLOW);
+			
+			as = list_get_instance(cur, as_t, as_with_asid_link);
+			
+			spinlock_lock(&as->lock);
+			as->asid = next_asid++;
+			spinlock_unlock(&as->lock);
+		}
+
+		/*
+		 * Finish TLB shootdown.
+		 */
+		tlb_shootdown_finalize();
+		tlb_invalidate_all();
+	}
+	
+	ASSERT(next_asid < ASID_OVERFLOW);
+	return next_asid++;
+}
Index: arch/ia64/src/mm/tlb.c
===================================================================
--- arch/ia64/src/mm/tlb.c	(revision 36b01bb280f8f00e1705fde42fc2edfaebfbe5ae)
+++ arch/ia64/src/mm/tlb.c	(revision 36b01bb280f8f00e1705fde42fc2edfaebfbe5ae)
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2006 Jakub Jermar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TLB management.
+ */
+
+#include <mm/tlb.h>
+#include <arch/mm/asid.h>
+
+/** Invalidate all TLB entries.
+ *
+ * Because of ASID management, region registers must be reset
+ * with new RIDs derived from the potentionally new ASID.
+ */
+void tlb_invalidate_all(void)
+{
+	/* TODO */
+}
+
+/** Invalidate entries belonging to an address space.
+ *
+ * @param asid Address space identifier.
+ */
+void tlb_invalidate_asid(asid_t asid)
+{
+	/* TODO */
+}
Index: genarch/src/mm/asid.c
===================================================================
--- genarch/src/mm/asid.c	(revision 0d8d27c2e6791788594b9fe24fe1c14930fc1bc7)
+++ genarch/src/mm/asid.c	(revision 36b01bb280f8f00e1705fde42fc2edfaebfbe5ae)
@@ -126,4 +126,5 @@
 		tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
 		tlb_shootdown_finalize();
+		tlb_invalidate_asid(asid);
 		as->asid = ASID_INVALID;
 		
Index: generic/src/mm/tlb.c
===================================================================
--- generic/src/mm/tlb.c	(revision 0d8d27c2e6791788594b9fe24fe1c14930fc1bc7)
+++ generic/src/mm/tlb.c	(revision 36b01bb280f8f00e1705fde42fc2edfaebfbe5ae)
@@ -55,23 +55,10 @@
 	
 	/*
-	 * TODO: assemble shootdown message.
+	 * TODO: wrap parameters into a message and
+	 * dispatch it to all CPUs excluding this one.
 	 */
+	
 	tlb_shootdown_ipi_send();
 
-	switch (type) {
-	    case TLB_INVL_ALL:
-		tlb_invalidate_all();
-		break;
-	    case TLB_INVL_ASID:
-		tlb_invalidate_asid(asid);
-		break;
-	    case TLB_INVL_PAGES:
-		tlb_invalidate_pages(asid, page, cnt);
-		break;
-	    default:
-		panic("unknown tlb_invalidate_type_t value: %d\n", type);
-		break;
-	}
-	
 busy_wait:	
 	for (i = 0; i<config.cpu_count; i++)
@@ -96,5 +83,5 @@
 	spinlock_lock(&tlblock);
 	spinlock_unlock(&tlblock);
-	tlb_invalidate_all();	/* TODO: use valid ASID */
+	tlb_invalidate_all();	/* TODO: be more finer-grained in what to invalidate */
 	CPU->tlb_active = 1;
 }
