[BACK]Return to uvm_map.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / uvm

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/uvm/uvm_map.c between version 1.328 and 1.329

version 1.328, 2014/03/05 05:35:55 version 1.329, 2014/07/18 12:19:09
Line 4183  uvmspace_free(struct vmspace *vm)
Line 4183  uvmspace_free(struct vmspace *vm)
         pool_cache_put(&uvm_vmspace_cache, vm);          pool_cache_put(&uvm_vmspace_cache, vm);
 }  }
   
   static struct vm_map_entry *
   uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
       int flags)
   {
           struct vm_map_entry *new_entry;
   
           new_entry = uvm_mapent_alloc(new_map, 0);
           /* old_entry -> new_entry */
           uvm_mapent_copy(old_entry, new_entry);
   
           /* new pmap has nothing wired in it */
           new_entry->wired_count = 0;
   
           /*
            * gain reference to object backing the map (can't
            * be a submap, already checked this case).
            */
   
           if (new_entry->aref.ar_amap)
                   uvm_map_reference_amap(new_entry, flags);
   
           if (new_entry->object.uvm_obj &&
               new_entry->object.uvm_obj->pgops->pgo_reference)
                   new_entry->object.uvm_obj->pgops->pgo_reference(
                           new_entry->object.uvm_obj);
   
           /* insert entry at end of new_map's entry list */
           uvm_map_entry_link(new_map, new_map->header.prev,
               new_entry);
   
           return new_entry;
   }
   
   /*
    * share the mapping: this means we want the old and
    * new entries to share amaps and backing objects.
    */
   static void
   uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map,
       struct vm_map_entry *old_entry)
   {
           /*
            * if the old_entry needs a new amap (due to prev fork)
            * then we need to allocate it now so that we have
            * something we own to share with the new_entry.   [in
            * other words, we need to clear needs_copy]
            */
   
           if (UVM_ET_ISNEEDSCOPY(old_entry)) {
                   /* get our own amap, clears needs_copy */
                   amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
                       0, 0);
                   /* XXXCDC: WAITOK??? */
           }
   
           uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
   }
   
   
   static void
   uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map,
       struct vm_map_entry *old_entry)
   {
           struct vm_map_entry *new_entry;
   
           /*
            * copy-on-write the mapping (using mmap's
            * MAP_PRIVATE semantics)
            *
            * allocate new_entry, adjust reference counts.
            * (note that new references are read-only).
            */
   
           new_entry = uvm_mapent_clone(new_map, old_entry, 0);
   
           new_entry->etype |=
               (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
   
           /*
            * the new entry will need an amap.  it will either
            * need to be copied from the old entry or created
            * from scratch (if the old entry does not have an
            * amap).  can we defer this process until later
            * (by setting "needs_copy") or do we need to copy
            * the amap now?
            *
            * we must copy the amap now if any of the following
            * conditions hold:
            * 1. the old entry has an amap and that amap is
            *    being shared.  this means that the old (parent)
            *    process is sharing the amap with another
            *    process.  if we do not clear needs_copy here
            *    we will end up in a situation where both the
            *    parent and child process are refering to the
            *    same amap with "needs_copy" set.  if the
            *    parent write-faults, the fault routine will
            *    clear "needs_copy" in the parent by allocating
            *    a new amap.   this is wrong because the
            *    parent is supposed to be sharing the old amap
            *    and the new amap will break that.
            *
            * 2. if the old entry has an amap and a non-zero
            *    wire count then we are going to have to call
            *    amap_cow_now to avoid page faults in the
            *    parent process.   since amap_cow_now requires
            *    "needs_copy" to be clear we might as well
            *    clear it here as well.
            *
            */
   
           if (old_entry->aref.ar_amap != NULL) {
                   if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
                       VM_MAPENT_ISWIRED(old_entry)) {
   
                           amap_copy(new_map, new_entry,
                               AMAP_COPY_NOCHUNK, 0, 0);
                           /* XXXCDC: M_WAITOK ... ok? */
                   }
           }
   
           /*
            * if the parent's entry is wired down, then the
            * parent process does not want page faults on
            * access to that memory.  this means that we
            * cannot do copy-on-write because we can't write
            * protect the old entry.   in this case we
            * resolve all copy-on-write faults now, using
            * amap_cow_now.   note that we have already
            * allocated any needed amap (above).
            */
   
           if (VM_MAPENT_ISWIRED(old_entry)) {
   
                   /*
                    * resolve all copy-on-write faults now
                    * (note that there is nothing to do if
                    * the old mapping does not have an amap).
                    */
                   if (old_entry->aref.ar_amap)
                           amap_cow_now(new_map, new_entry);
   
           } else {
                   /*
                    * setup mappings to trigger copy-on-write faults
                    * we must write-protect the parent if it has
                    * an amap and it is not already "needs_copy"...
                    * if it is already "needs_copy" then the parent
                    * has already been write-protected by a previous
                    * fork operation.
                    */
                   if (old_entry->aref.ar_amap &&
                       !UVM_ET_ISNEEDSCOPY(old_entry)) {
                           if (old_entry->max_protection & VM_PROT_WRITE) {
                                   pmap_protect(old_map->pmap,
                                       old_entry->start, old_entry->end,
                                       old_entry->protection & ~VM_PROT_WRITE);
                           }
                           old_entry->etype |= UVM_ET_NEEDSCOPY;
                   }
           }
   }
   
 /*  /*
  *   F O R K   -   m a i n   e n t r y   p o i n t   *   F O R K   -   m a i n   e n t r y   p o i n t
  */   */
Line 4200  uvmspace_fork(struct vmspace *vm1)
Line 4362  uvmspace_fork(struct vmspace *vm1)
         struct vm_map *old_map = &vm1->vm_map;          struct vm_map *old_map = &vm1->vm_map;
         struct vm_map *new_map;          struct vm_map *new_map;
         struct vm_map_entry *old_entry;          struct vm_map_entry *old_entry;
         struct vm_map_entry *new_entry;  
         UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);          UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
   
         vm_map_lock(old_map);          vm_map_lock(old_map);
Line 4230  uvmspace_fork(struct vmspace *vm1)
Line 4391  uvmspace_fork(struct vmspace *vm1)
   
                 switch (old_entry->inheritance) {                  switch (old_entry->inheritance) {
                 case MAP_INHERIT_NONE:                  case MAP_INHERIT_NONE:
   
                         /*                          /*
                          * drop the mapping, modify size                           * drop the mapping, modify size
                          */                           */
Line 4238  uvmspace_fork(struct vmspace *vm1)
Line 4398  uvmspace_fork(struct vmspace *vm1)
                         break;                          break;
   
                 case MAP_INHERIT_SHARE:                  case MAP_INHERIT_SHARE:
                           uvm_mapent_forkshared(new_map, old_map, old_entry);
                         /*  
                          * share the mapping: this means we want the old and  
                          * new entries to share amaps and backing objects.  
                          */  
                         /*  
                          * if the old_entry needs a new amap (due to prev fork)  
                          * then we need to allocate it now so that we have  
                          * something we own to share with the new_entry.   [in  
                          * other words, we need to clear needs_copy]  
                          */  
   
                         if (UVM_ET_ISNEEDSCOPY(old_entry)) {  
                                 /* get our own amap, clears needs_copy */  
                                 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,  
                                     0, 0);  
                                 /* XXXCDC: WAITOK??? */  
                         }  
   
                         new_entry = uvm_mapent_alloc(new_map, 0);  
                         /* old_entry -> new_entry */  
                         uvm_mapent_copy(old_entry, new_entry);  
   
                         /* new pmap has nothing wired in it */  
                         new_entry->wired_count = 0;  
   
                         /*  
                          * gain reference to object backing the map (can't  
                          * be a submap, already checked this case).  
                          */  
   
                         if (new_entry->aref.ar_amap)  
                                 uvm_map_reference_amap(new_entry, AMAP_SHARED);  
   
                         if (new_entry->object.uvm_obj &&  
                             new_entry->object.uvm_obj->pgops->pgo_reference)  
                                 new_entry->object.uvm_obj->  
                                     pgops->pgo_reference(  
                                         new_entry->object.uvm_obj);  
   
                         /* insert entry at end of new_map's entry list */  
                         uvm_map_entry_link(new_map, new_map->header.prev,  
                             new_entry);  
   
                         break;                          break;
   
                 case MAP_INHERIT_COPY:                  case MAP_INHERIT_COPY:
                           uvm_mapent_forkcopy(new_map, old_map, old_entry);
                           break;
   
                         /*                  default:
                          * copy-on-write the mapping (using mmap's                          KASSERT(0);
                          * MAP_PRIVATE semantics)  
                          *  
                          * allocate new_entry, adjust reference counts.  
                          * (note that new references are read-only).  
                          */  
   
                         new_entry = uvm_mapent_alloc(new_map, 0);  
                         /* old_entry -> new_entry */  
                         uvm_mapent_copy(old_entry, new_entry);  
   
                         if (new_entry->aref.ar_amap)  
                                 uvm_map_reference_amap(new_entry, 0);  
   
                         if (new_entry->object.uvm_obj &&  
                             new_entry->object.uvm_obj->pgops->pgo_reference)  
                                 new_entry->object.uvm_obj->pgops->pgo_reference  
                                     (new_entry->object.uvm_obj);  
   
                         /* new pmap has nothing wired in it */  
                         new_entry->wired_count = 0;  
   
                         new_entry->etype |=  
                             (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);  
                         uvm_map_entry_link(new_map, new_map->header.prev,  
                             new_entry);  
   
                         /*  
                          * the new entry will need an amap.  it will either  
                          * need to be copied from the old entry or created  
                          * from scratch (if the old entry does not have an  
                          * amap).  can we defer this process until later  
                          * (by setting "needs_copy") or do we need to copy  
                          * the amap now?  
                          *  
                          * we must copy the amap now if any of the following  
                          * conditions hold:  
                          * 1. the old entry has an amap and that amap is  
                          *    being shared.  this means that the old (parent)  
                          *    process is sharing the amap with another  
                          *    process.  if we do not clear needs_copy here  
                          *    we will end up in a situation where both the  
                          *    parent and child process are refering to the  
                          *    same amap with "needs_copy" set.  if the  
                          *    parent write-faults, the fault routine will  
                          *    clear "needs_copy" in the parent by allocating  
                          *    a new amap.   this is wrong because the  
                          *    parent is supposed to be sharing the old amap  
                          *    and the new amap will break that.  
                          *  
                          * 2. if the old entry has an amap and a non-zero  
                          *    wire count then we are going to have to call  
                          *    amap_cow_now to avoid page faults in the  
                          *    parent process.   since amap_cow_now requires  
                          *    "needs_copy" to be clear we might as well  
                          *    clear it here as well.  
                          *  
                          */  
   
                         if (old_entry->aref.ar_amap != NULL) {  
                                 if ((amap_flags(old_entry->aref.ar_amap) &  
                                      AMAP_SHARED) != 0 ||  
                                     VM_MAPENT_ISWIRED(old_entry)) {  
   
                                         amap_copy(new_map, new_entry,  
                                             AMAP_COPY_NOCHUNK, 0, 0);  
                                         /* XXXCDC: M_WAITOK ... ok? */  
                                 }  
                         }  
   
                         /*  
                          * if the parent's entry is wired down, then the  
                          * parent process does not want page faults on  
                          * access to that memory.  this means that we  
                          * cannot do copy-on-write because we can't write  
                          * protect the old entry.   in this case we  
                          * resolve all copy-on-write faults now, using  
                          * amap_cow_now.   note that we have already  
                          * allocated any needed amap (above).  
                          */  
   
                         if (VM_MAPENT_ISWIRED(old_entry)) {  
   
                           /*  
                            * resolve all copy-on-write faults now  
                            * (note that there is nothing to do if  
                            * the old mapping does not have an amap).  
                            */  
                           if (old_entry->aref.ar_amap)  
                             amap_cow_now(new_map, new_entry);  
   
                         } else {  
   
                           /*  
                            * setup mappings to trigger copy-on-write faults  
                            * we must write-protect the parent if it has  
                            * an amap and it is not already "needs_copy"...  
                            * if it is already "needs_copy" then the parent  
                            * has already been write-protected by a previous  
                            * fork operation.  
                            */  
   
                           if (old_entry->aref.ar_amap &&  
                               !UVM_ET_ISNEEDSCOPY(old_entry)) {  
                               if (old_entry->max_protection & VM_PROT_WRITE) {  
                                 pmap_protect(old_map->pmap,  
                                              old_entry->start,  
                                              old_entry->end,  
                                              old_entry->protection &  
                                              ~VM_PROT_WRITE);  
                               }  
                               old_entry->etype |= UVM_ET_NEEDSCOPY;  
                           }  
                         }  
                         break;                          break;
                 }  /* end of switch statement */                  }
                 old_entry = old_entry->next;                  old_entry = old_entry->next;
         }          }
   

Legend:
Removed from v.1.328  
changed lines
  Added in v.1.329

CVSweb <webmaster@jp.NetBSD.org>