[BACK]Return to hypervisor_machdep.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / xen / x86

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/arch/xen/x86/hypervisor_machdep.c between version 1.11 and 1.11.8.4

version 1.11, 2008/10/21 15:46:32 version 1.11.8.4, 2009/11/01 21:43:28
Line 13 
Line 13 
  * 2. Redistributions in binary form must reproduce the above copyright   * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the   *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.   *    documentation and/or other materials provided with the distribution.
  * 3. All advertising materials mentioning features or use of this software  
  *    must display the following acknowledgement:  
  *      This product includes software developed by Christian Limpach.  
  * 4. The name of the author may not be used to endorse or promote products  
  *    derived from this software without specific prior written permission.  
  *   *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES   * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
Line 77  __KERNEL_RCSID(0, "$NetBSD$");
Line 72  __KERNEL_RCSID(0, "$NetBSD$");
   
 #include "opt_xen.h"  #include "opt_xen.h"
   
 #ifdef XEN3  
 /*  /*
  * arch-dependent p2m frame lists list (L3 and L2)   * arch-dependent p2m frame lists list (L3 and L2)
  * used by Xen for save/restore mappings   * used by Xen for save/restore mappings
Line 88  static int l2_p2m_page_size; /* size of 
Line 82  static int l2_p2m_page_size; /* size of 
   
 static void build_p2m_frame_list_list(void);  static void build_p2m_frame_list_list(void);
 static void update_p2m_frame_list_list(void);  static void update_p2m_frame_list_list(void);
 #endif /* XEN3 */  
   
 // #define PORT_DEBUG 4  // #define PORT_DEBUG 4
 // #define EARLY_DEBUG_EVENT  // #define EARLY_DEBUG_EVENT
Line 132  stipending(void)
Line 125  stipending(void)
                 vci->evtchn_upcall_pending = 0;                  vci->evtchn_upcall_pending = 0;
                 /* NB. No need for a barrier here -- XCHG is a barrier                  /* NB. No need for a barrier here -- XCHG is a barrier
                  * on x86. */                   * on x86. */
 #ifdef XEN3  
                 l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);                  l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);
 #else  
                 l1 = xen_atomic_xchg(&s->evtchn_pending_sel, 0);  
 #endif  
                 while ((l1i = xen_ffs(l1)) != 0) {                  while ((l1i = xen_ffs(l1)) != 0) {
                         l1i--;                          l1i--;
                         l1 &= ~(1UL << l1i);                          l1 &= ~(1UL << l1i);
Line 212  do_hypervisor_callback(struct intrframe 
Line 201  do_hypervisor_callback(struct intrframe 
                 vci->evtchn_upcall_pending = 0;                  vci->evtchn_upcall_pending = 0;
                 /* NB. No need for a barrier here -- XCHG is a barrier                  /* NB. No need for a barrier here -- XCHG is a barrier
                  * on x86. */                   * on x86. */
 #ifdef XEN3  
                 l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);                  l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);
 #else  
                 l1 = xen_atomic_xchg(&s->evtchn_pending_sel, 0);  
 #endif  
                 while ((l1i = xen_ffs(l1)) != 0) {                  while ((l1i = xen_ffs(l1)) != 0) {
                         l1i--;                          l1i--;
                         l1 &= ~(1UL << l1i);                          l1 &= ~(1UL << l1i);
Line 264  do_hypervisor_callback(struct intrframe 
Line 249  do_hypervisor_callback(struct intrframe 
 #ifdef DIAGNOSTIC  #ifdef DIAGNOSTIC
         if (level != ci->ci_ilevel)          if (level != ci->ci_ilevel)
                 printf("hypervisor done %08x level %d/%d ipending %08x\n",                  printf("hypervisor done %08x level %d/%d ipending %08x\n",
 #ifdef XEN3  
                     (uint)vci->evtchn_pending_sel,                      (uint)vci->evtchn_pending_sel,
 #else  
                     (uint)HYPERVISOR_shared_info->evtchn_pending_sel,  
 #endif  
                     level, ci->ci_ilevel, ci->ci_ipending);                      level, ci->ci_ilevel, ci->ci_ipending);
 #endif  #endif
 }  }
Line 291  hypervisor_unmask_event(unsigned int ev)
Line 272  hypervisor_unmask_event(unsigned int ev)
          * interrupt edge' if the channel is masked.           * interrupt edge' if the channel is masked.
          */           */
         if (xen_atomic_test_bit(&s->evtchn_pending[0], ev) &&          if (xen_atomic_test_bit(&s->evtchn_pending[0], ev) &&
 #ifdef XEN3  
             !xen_atomic_test_and_set_bit(&vci->evtchn_pending_sel, ev>>LONG_SHIFT)) {              !xen_atomic_test_and_set_bit(&vci->evtchn_pending_sel, ev>>LONG_SHIFT)) {
 #else  
             !xen_atomic_test_and_set_bit(&s->evtchn_pending_sel, ev>>LONG_SHIFT)) {  
 #endif  
                 xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0);                  xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0);
                 if (!vci->evtchn_upcall_mask)                  if (!vci->evtchn_upcall_mask)
                         hypervisor_force_callback();                          hypervisor_force_callback();
Line 380  hypervisor_set_ipending(uint32_t iplmask
Line 357  hypervisor_set_ipending(uint32_t iplmask
 }  }
   
 void  void
 hypervisor_machdep_attach(void) {  hypervisor_machdep_attach(void)
   {
 #ifdef XEN3  
         /* dom0 does not require the arch-dependent P2M translation table */          /* dom0 does not require the arch-dependent P2M translation table */
         if ( !xendomain_is_dom0() ) {          if ( !xendomain_is_dom0() ) {
                 build_p2m_frame_list_list();                  build_p2m_frame_list_list();
                   sysctl_xen_sleepstate_setup();
         }          }
 #endif  }
   
   void
   hypervisor_machdep_resume(void)
   {
           /* dom0 does not require the arch-dependent P2M translation table */
           if ( !(xen_start_info.flags & SIF_INITDOMAIN) )
                   update_p2m_frame_list_list();
 }  }
   
 #ifdef XEN3  
 /*  /*
  * Generate the p2m_frame_list_list table,   * Generate the p2m_frame_list_list table,
  * needed for guest save/restore   * needed for guest save/restore
  */   */
 static void  static void
 build_p2m_frame_list_list(void) {  build_p2m_frame_list_list(void)
   {
           int fpp; /* number of page (frame) pointer per page */
           unsigned long max_pfn;
           /*
            * The p2m list is composed of three levels of indirection,
            * each layer containing MFNs pointing to lower level pages
            * The indirection is used to convert a given PFN to its MFN
            * Each N level page can point to @fpp (N-1) level pages
            * For example, for x86 32bit, we have:
            * - PAGE_SIZE: 4096 bytes
            * - fpp: 1024 (one L3 page can address 1024 L2 pages)
            * A L1 page contains the list of MFN we are looking for
            */
           max_pfn = xen_start_info.nr_pages;
           fpp = PAGE_SIZE / sizeof(vaddr_t);
   
           /* we only need one L3 page */
           l3_p2m_page = (vaddr_t *)uvm_km_alloc(kernel_map, PAGE_SIZE,
               PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_NOWAIT);
           if (l3_p2m_page == NULL)
                   panic("could not allocate memory for l3_p2m_page");
   
           /*
            * Determine how many L2 pages we need for the mapping
            * Each L2 can map a total of @fpp L1 pages
            */
           l2_p2m_page_size = howmany(max_pfn, fpp);
   
         int fpp; /* number of page (frame) pointer per page */          l2_p2m_page = (vaddr_t *)uvm_km_alloc(kernel_map,
         unsigned long max_pfn;              l2_p2m_page_size * PAGE_SIZE,
         /*              PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_NOWAIT);
          * The p2m list is composed of three levels of indirection,          if (l2_p2m_page == NULL)
          * each layer containing MFNs pointing to lower level pages                  panic("could not allocate memory for l2_p2m_page");
          * The indirection is used to convert a given PFN to its MFN  
          * Each N level page can point to @fpp (N-1) level pages  
          * For example, for x86 32bit, we have:  
          * - PAGE_SIZE: 4096 bytes  
          * - fpp: 1024 (one L3 page can address 1024 L2 pages)  
          * A L1 page contains the list of MFN we are looking for  
          */  
         max_pfn = xen_start_info.nr_pages;  
         fpp = PAGE_SIZE / sizeof(paddr_t);  
   
         /* we only need one L3 page */  
         l3_p2m_page = kmem_alloc(PAGE_SIZE, KM_NOSLEEP);  
         if (l3_p2m_page == NULL)  
                 panic("could not allocate memory for l3_p2m_page");  
   
         /*  
          * Determine how many L2 pages we need for the mapping  
          * Each L2 can map a total of @fpp L1 pages  
          */  
         l2_p2m_page_size = howmany(max_pfn, fpp);  
   
         l2_p2m_page = kmem_alloc(l2_p2m_page_size * PAGE_SIZE, KM_NOSLEEP);  
         if (l2_p2m_page == NULL)  
                 panic("could not allocate memory for l2_p2m_page");  
   
         /* We now have L3 and L2 pages ready, update L1 mapping */          /* We now have L3 and L2 pages ready, update L1 mapping */
         update_p2m_frame_list_list();          update_p2m_frame_list_list();
   
 }  }
   
Line 438  build_p2m_frame_list_list(void) {
Line 423  build_p2m_frame_list_list(void) {
  * Update the L1 p2m_frame_list_list mapping (during guest boot or resume)   * Update the L1 p2m_frame_list_list mapping (during guest boot or resume)
  */   */
 static void  static void
 update_p2m_frame_list_list(void) {  update_p2m_frame_list_list(void)
   {
           int i;
           int fpp; /* number of page (frame) pointer per page */
           unsigned long max_pfn;
   
           max_pfn = xen_start_info.nr_pages;
           fpp = PAGE_SIZE / sizeof(vaddr_t);
   
           for (i = 0; i < l2_p2m_page_size; i++) {
                   /*
                    * Each time we start a new L2 page,
                    * store its MFN in the L3 page
                    */
                   if ((i % fpp) == 0) {
                           l3_p2m_page[i/fpp] = vtomfn(
                                   (vaddr_t)&l2_p2m_page[i]);
                   }
                   /*
                    * we use a shortcut
                    * since @xpmap_phys_to_machine_mapping array
                    * already contains PFN to MFN mapping, we just
                    * set the l2_p2m_page MFN pointer to the MFN of the
                    * according frame of @xpmap_phys_to_machine_mapping
                    */
                   l2_p2m_page[i] = vtomfn((vaddr_t)
                           &xpmap_phys_to_machine_mapping[i*fpp]);
           }
   
         int i;          HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
         int fpp; /* number of page (frame) pointer per page */                                          vtomfn((vaddr_t)l3_p2m_page);
         unsigned long max_pfn;          HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
   
         max_pfn = xen_start_info.nr_pages;  
         fpp = PAGE_SIZE / sizeof(paddr_t);  
   
         for (i = 0; i < l2_p2m_page_size; i++) {  
                 /*  
                  * Each time we start a new L2 page,  
                  * store its MFN in the L3 page  
                  */  
                 if ((i % fpp) == 0) {  
                         l3_p2m_page[i/fpp] = vtomfn(  
                                 (vaddr_t)&l2_p2m_page[i]);  
                 }  
                 /*  
                  * we use a shortcut  
                  * since @xpmap_phys_to_machine_mapping array  
                  * already contains PFN to MFN mapping, we just  
                  * set the l2_p2m_page MFN pointer to the MFN of the  
                  * according frame of @xpmap_phys_to_machine_mapping  
                  */  
                 l2_p2m_page[i] = vtomfn((vaddr_t)  
                         &xpmap_phys_to_machine_mapping[i*fpp]);  
         }  
   
         HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =  
                                         vtomfn((vaddr_t)l3_p2m_page);  
         HYPERVISOR_shared_info->arch.max_pfn = max_pfn;  
   
 }  }
 #endif /* XEN3 */  

Legend:
Removed from v.1.11  
changed lines
  Added in v.1.11.8.4

CVSweb <webmaster@jp.NetBSD.org>