Initial commit

This commit is contained in:
Yentl Van Tendeloo 2016-08-04 17:38:43 +02:00
commit 66a6860316
407 changed files with 1254365 additions and 0 deletions

949
src/pypdevs/DEVS.py Normal file
View file

@ -0,0 +1,949 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Classes and tools for DEVS model specification
"""
from pypdevs.logger import debug, warn, info, error
from pypdevs.util import *
import pypdevs.accurate_time as time
class BaseDEVS(object):
"""
Abstract base class for AtomicDEVS and CoupledDEVS classes.
This class provides basic DEVS attributes and query/set methods.
"""
def __init__(self, name):
"""
Constructor
:param name: the name of the DEVS model
"""
# Prevent any attempt to instantiate this abstract class
if self.__class__ == BaseDEVS:
raise DEVSException ("Cannot instantiate abstract class '%s' ... "
% (self.__class__.__name__))
# The parent of the current model
self.parent = None
# The local name of the model
self.name = name
self.IPorts = []
self.OPorts = []
self.ports = []
# Initialise the times
self.time_last = (0.0, 0)
self.time_next = (0.0, 1)
self.location = None
# Variables used for optimisations
self.my_input = {}
self.my_output = {}
# The state queue, used for time warp
self.old_states = []
# List of all memoized states, only useful in distributed simulation
# with memoization enabled
self.memo = []
def simSettings(self, simulator):
"""
Modifies the simulation settings from within the model.
This function is called _before_ direct connection and distribution is performed, so the user can still access the complete hierarchy.
.. note:: This function is *only* called on the root model of the simulation, thus the model passed to the constructor of the Simulator object.
:param simulator: the simulator object on which settings can be configured
"""
pass
def modelTransition(self, state):
"""
DEFAULT function for Dynamic Structure DEVS, always returning False (thus indicating that no structural change is needed)
:param state: a dict that can be used to save some kind of state, this object is maintained by the kernel and will be passed each time
:returns: bool -- whether or not a structural change is necessary
"""
return False
def getVCDVariables(self):
"""
Fetch all the variables, suitable for VCD variable generation
:returns: list -- all variables needed for VCD tracing
"""
var_list = []
for I in self.ports:
var_list.append([self.getModelFullName(), I.getPortName()])
return var_list
def removePort(self, port):
"""
Remove a port (either input or output) from the model, disconnecting all of its connections.
:param port: the port to remove
"""
if not hasattr(self, "full_name"):
raise DEVSException("removePort should only be called during a simulation")
if port.is_input:
self.IPorts.remove(port)
else:
self.OPorts.remove(port)
self.ports.remove(port)
# Also remove all connections to this port
self.server.getSelfProxy().dsRemovePort(port)
def addPort(self, name, is_input):
"""
Utility function to create a new port and add it everywhere where it is necessary
:param name: the name of the port
:param is_input: whether or not this is an input port
"""
name = name if name is not None else "port%s" % len(self.ports)
port = Port(is_input=is_input, name=name)
if is_input:
self.IPorts.append(port)
else:
self.OPorts.append(port)
port.port_id = len(self.ports)
self.ports.append(port)
port.host_DEVS = self
# If simulation is already running, the port should be direct connected too!
return port
def addInPort(self, name=None):
"""
Add an input port to the DEVS model.
addInPort is the only proper way to add input ports to a DEVS model.
As for the CoupledDEVS.addSubModel method, calls
to addInPort and addOutPort can appear in any DEVS'
descriptive class constructor, or the methods can be used with an
instantiated object.
The methods add a reference to the new port in the DEVS' IPorts
attributes and set the port's hostDEVS attribute. The modeler
should typically save the returned reference somewhere.
:param name: the name of the port. A unique ID will be generated in case None is passed
:returns: port -- the generated port
"""
return self.addPort(name, True)
def addOutPort(self, name=None):
"""Add an output port to the DEVS model.
addOutPort is the only proper way to
add output ports to DEVS. As for the CoupledDEVS.addSubModel method, calls
to addInPort and addOutPort can appear in any DEVS'
descriptive class constructor, or the methods can be used with an
instantiated object.
The methods add a reference to the new port in the DEVS'
OPorts attributes and set the port's hostDEVS attribute. The modeler
should typically save the returned reference somewhere.
:param name: the name of the port. A unique ID will be generated in case None is passed
:returns: port -- the generated port
"""
return self.addPort(name, False)
def getModelName(self):
"""
Get the local model name
:returns: string -- the name of the model
"""
if self.name is None:
return str(self.model_id)
else:
return str(self.name)
def getModelFullName(self):
"""
Get the full model name, including the path from the root
:returns: string -- the fully qualified name of the model
"""
return self.full_name
class AtomicDEVS(BaseDEVS):
"""
Abstract base class for all atomic-DEVS descriptive classes.
"""
def __init__(self, name=None):
"""
Constructor for an AtomicDEVS model
:param name: name of the model, can be None to have an automatically generated name
"""
# Prevent any attempt to instantiate this abstract class
if self.__class__ == AtomicDEVS:
raise DEVSException("Cannot instantiate abstract class '%s' ... "
% (self.__class__.__name__))
# The minimal constructor shall first call the superclass
# (i.e., BaseDEVS) constructor.
BaseDEVS.__init__(self, name)
self.elapsed = 0.0
self.state = None
self.relocatable = True
self.last_read_time = (0, 0)
def setLocation(self, location, force=False):
"""
Sets the location of the atomic DEVS model if it was not already set
:param location: the location to set
:param force: whether or not to force this location, even if another is already defined
"""
if self.location is None or force:
self.location = location
def fetchActivity(self, time, activities):
"""
Fetch the activity of the model up to a certain time
:param time: the time up to which the activity should be calculated
:param activities: dictionary containing all activities for the models
"""
accumulator = 0.0
for state in self.old_states:
if state.time_last[0] < time:
accumulator += state.activity
activities[self.model_id] = accumulator
def setGVT(self, gvt, activities, last_state_only):
"""
Set the GVT of the model, cleaning up the states vector as required
for the time warp algorithm
:param gvt: the new value of the GVT
:param activities: dictionary containing all activities for the models
:param last_state_only: whether or not to only use a single state for activity
"""
copy = None
activity = 0
for i in range(len(self.old_states)):
state = self.old_states[i]
if state.time_last[0] >= gvt:
# Possible that all elements should be kept,
# in which case it will return -1 and only keep the last element
# So the copy element should be AT LEAST 0
copy = max(0, i-1)
break
elif not last_state_only:
activity += state.activity
if self.old_states == []:
# We have no memory, so we are normally in sequential simulation
self.old_states = []
elif copy is None:
self.old_states = [self.old_states[-1]]
else:
self.old_states = self.old_states[copy:]
if last_state_only:
activity = self.old_states[0].activity
activities[self.model_id] = activity
def revert(self, time, memorize):
"""
Revert the model to the specified time. All necessary cleanup for this
model will be done (fossil collection).
:param time: the time up to which should be reverted
:param memorize: whether or not the saved states should still be kept for memoization
"""
new_state = len(self.old_states) - 1
for state in reversed(self.old_states[1:]):
if state.time_last < time:
break
new_state -= 1
state = self.old_states[new_state]
self.time_last = state.time_last
self.time_next = state.time_next
self.state = state.loadState()
if memorize:
# Reverse it too
self.memo = self.old_states[:-len(self.old_states) + new_state - 1:-1]
self.old_states = self.old_states[:new_state + 1]
# Check if one of the reverted states was ever read for the termination condition
if self.last_read_time > time:
# It seems it was, so notify the main revertion algorithm of this
self.last_read_time = (0, 0)
return True
else:
return False
# NOTE clearing the myInput happens in the parent
def getState(self, request_time, first_call=True):
"""
For the distributed termination condition: fetch the state of the model at a certain time
:param request_time: the time (including age!) for which the state should be fetched
:param first_call: whether or not this is the first call of a possible recursive call
:returns: state -- the state at that time
"""
if self.location != MPIRedirect.local.name:
return getProxy(self.location).getStateAtTime(self.model_id,
request_time)
elif first_call:
# Shortcut if the call is local
return self.state
self.last_read_time = request_time
while 1:
for state in self.old_states:
if state.time_last > request_time:
return state.loadState()
# State not yet available... wait some time before trying again...
time.sleep(0.01)
def extTransition(self, inputs):
"""
DEFAULT External Transition Function.
Accesses state and elapsed attributes, as well as inputs
through the passed dictionary. Returns the new state.
The elapsed time is accessible as an attribute (i.e., self.elapsed), see :ref:`elapsed_time` for a detailed explanation.
.. note:: Should only write to the *state* attribute.
:param inputs: dictionary containing all ports and their corresponding outputs
:returns: state -- the new state of the model
"""
return self.state
def intTransition(self):
"""
DEFAULT Internal Transition Function.
.. note:: Should only write to the *state* attribute.
:returns: state -- the new state of the model
.. versionchanged:: 2.1 The *elapsed* attribute is no longer guaranteed to be correct as this isn't required by the DEVS formalism.
"""
return self.state
def confTransition(self, inputs):
"""
DEFAULT Confluent Transition Function.
Accesses state and elapsed attributes, as well as inputs
through the passed dictionary. Returns the new state.
.. note:: Should only write to the *state* attribute.
:param inputs: dictionary containing all ports and their corresponding outputs
:returns: state -- the new state of the model
"""
self.state = self.intTransition()
self.state = self.extTransition(inputs)
return self.state
def outputFnc(self):
"""
DEFAULT Output Function.
Accesses only state attribute. Returns the output on the different ports as a dictionary.
.. note:: Should **not** write to any attribute.
:returns: dictionary containing output ports as keys and lists of output on that port as value
.. versionchanged:: 2.1 The *elapsed* attribute is no longer guaranteed to be correct as this isn't required by the DEVS formalism.
"""
return {}
def timeAdvance(self):
"""
DEFAULT Time Advance Function.
.. note:: Should ideally be deterministic, though this is not mandatory for simulation.
:returns: the time advance of the model
.. versionchanged:: 2.1 The *elapsed* attribute is no longer guaranteed to be correct as this isn't required by the DEVS formalism.
"""
# By default, return infinity
return float('inf')
def preActivityCalculation(self):
"""
DEFAULT pre-transition activity fetcher. The returned value is passed to the *postActivityCalculation* function
:returns: something -- passed to the *postActivityCalculation*
"""
return time.time()
def postActivityCalculation(self, prevalue):
"""
DEFAULT post-transition activity fetcher. The returned value will be passed on to the relocator and MUST be an addable (e.g. integer, float, ...)
:param prevalue: the value returned from the *preActivityCalculation* method
:returns: addable (float, integer, ...) -- passed to the relocator
"""
return time.time() - prevalue
def flattenConnections(self):
"""
Flattens the pickling graph, by removing backreference from the ports.
"""
# It doesn't really matter what gets written in these hostDEVS attributes,
# as it will never be used. Though for readability, the model_id will be used
# to make it possible to do some debugging when necessary.
for port in self.IPorts:
port.host_DEVS = self.model_id
for port in self.OPorts:
port.host_DEVS = self.model_id
def unflattenConnections(self):
"""
Unflattens the picking graph, by reconstructing backreferences from the ports.
"""
for port in self.IPorts:
port.host_DEVS = self
for port in self.OPorts:
port.host_DEVS = self
def finalize(self, name, model_counter, model_ids, locations, select_hierarchy):
"""
Finalize the model hierarchy by doing all pre-simulation configuration
.. note:: Parameters *model_ids* and *locations* are updated by reference.
:param name: the name of the hierarchy above
:param model_counter: the model ID counter
:param model_ids: a list with all model_ids and their model
:param locations: dictionary of locations and where every model runs
:param select_hierarchy: hierarchy to perform selections in Classic DEVS
:returns: int -- the new model ID counter
"""
# Give a name
self.full_name = name + str(self.getModelName())
# Give a unique ID to the model itself
self.model_id = model_counter
self.select_hierarchy = select_hierarchy + [self]
# Add the element to its designated place in the model_ids list
model_ids.append(self)
# Do a quick check, since this is vital to correct operation
if model_ids[self.model_id] != self:
raise DEVSException("Something went wrong while initializing models: IDs don't match")
locations[self.location].append(self.model_id)
# Return the unique ID counter, incremented so it stays unique
return model_counter + 1
def getModelLoad(self, lst):
"""
Add this atomic model to the load of its location
:param lst: list containing all locations and their current load
:returns: int -- number of children in this subtree
"""
lst[self.location] += 1
self.num_children = 1
return self.num_children
class CoupledDEVS(BaseDEVS):
"""
Abstract base class for all coupled-DEVS descriptive classes.
"""
def __init__(self, name=None):
"""
Constructor.
:param name: the name of the coupled model, can be None for an automatically generated name
"""
# Prevent any attempt to instantiate this abstract class
if self.__class__ == CoupledDEVS:
raise DEVSException("Cannot instantiate abstract class '%s' ... "
% (self.__class__.__name__))
# The minimal constructor shall first call the superclass
# (i.e., BaseDEVS) constructor.
BaseDEVS.__init__(self, name)
# All components of this coupled model (the submodels)
self.component_set = []
def forceSequential(self):
"""
Force a sequential simulation
"""
self.setLocation(0, force=True)
def select(self, imm_children):
"""
DEFAULT select function, only used when using Classic DEVS simulation
:param imm_children: list of all children that want to transition
:returns: child -- a single child that is allowed to transition
"""
return imm_children[0]
def getModelLoad(self, lst):
"""
Fetch the number of atomic models at this model
:param lst: list containing all locations and their current load
:returns: number of atomic models in this subtree, including non-local ones
"""
children = 0
for i in self.component_set:
children += i.getModelLoad(lst)
self.num_children = children
return self.num_children
def finalize(self, name, model_counter, model_ids, locations, select_hierarchy):
"""
Finalize the model hierarchy by doing all pre-simulation configuration
.. note:: Parameters *model_ids* and *locations* are updated by reference.
:param name: the name of the hierarchy above
:param model_counter: the model ID counter
:param model_ids: a list with all model_ids and their model
:param locations: dictionary of locations and where every model runs
:param select_hierarchy: hierarchy to perform selections in Classic DEVS
:returns: int -- the new model ID counter
"""
# Set name, even though it will never be requested
self.full_name = name + str(self.getModelName())
for i in self.component_set:
model_counter = i.finalize(self.full_name + ".", model_counter,
model_ids, locations, select_hierarchy + [self])
return model_counter
def flattenConnections(self):
"""
Flattens the pickling graph, by removing backreference from the ports.
"""
for i in self.component_set:
i.flattenConnections()
def unflattenConnections(self):
"""
Unflattens the pickling graph, by reconstructing backreference from the ports.
"""
for i in self.component_set:
i.unflattenConnections()
def addSubModel(self, model, location = None):
"""
Adds a specified model to the current coupled model as its child. This
is the function that must be used to make distributed simulation
possible.
:param model: the model to be added as a child
:param location: the location at which the child must run
:returns: model -- the model that was created as a child
.. versionchanged:: 2.1.3
model can no longer be a string, this was previously a lot more efficient in partial distribution, though this functionality was removed together with the partial distribution functionality.
"""
model.parent = self
if location is not None:
location = int(location)
model.location = location if location is not None else self.location
if model.location is not None and isinstance(model, CoupledDEVS):
# Set the location of all children
for i in model.component_set:
i.setLocation(model.location)
if hasattr(self, "server"):
self.server.getSelfProxy().dsScheduleModel(model)
else:
self.component_set.append(model)
return model
def removeSubModel(self, model):
"""
Remove a specified model from the current coupled model, only callable while in a simulation.
:param model: the model to remove as a child
"""
if not hasattr(self, "full_name"):
raise DEVSException("removeSubModel can only be called _during_ a simulation run")
self.server.getSelfProxy().dsUnscheduleModel(model)
def disconnectPorts(self, p1, p2):
"""
Disconnect two ports
.. note:: If these ports are connected multiple times, **only one** of them will be removed.
:param p1: the port at the start of the connection
:param p2: the port at the end of the connection
"""
if not hasattr(self, "full_name"):
raise DEVSException("removeSubModel can only be called _during_ a simulation run")
new_connection = []
found = False
for p in p1.outline:
if p == p2 and not found:
found = True
else:
new_connection.append(p)
p1.outline = new_connection
new_connection = []
found = False
for p in p2.inline:
if p == p1 and not found:
found = True
else:
new_connection.append(p)
p2.inline = new_connection
self.server.getSelfProxy().dsDisconnectPorts(p1, p2)
def connectPorts(self, p1, p2, z = None):
"""
Connects two ports together. The coupling is to begin at p1 and
to end at p2.
:param p1: the port at the start of the new connection
:param p2: the port at the end of the new connection
:param z: the translation function for the events
either input-to-input, output-to-input or output-to-output.
"""
# For a coupling to be valid, two requirements must be met:
# 1- at least one of the DEVS the ports belong to is a child of the
# coupled-DEVS (i.e., self), while the other is either the
# coupled-DEVS itself or another of its children. The DEVS'
# 'parenthood relationship' uniquely determine the type of coupling;
# 2- the types of the ports are consistent with the 'parenthood' of the
# associated DEVS. This validates the coupling determined above.
# Internal Coupling:
if ((p1.host_DEVS.parent == self and p2.host_DEVS.parent == self) and
(p1.type() == 'OUTPORT' and p2.type() == 'INPORT')):
if p1.host_DEVS is p2.host_DEVS:
raise DEVSException(("In coupled model '%s', connecting ports" +
" '%s' and '%s' belong to the same model" +
" '%s'. " +
" Direct feedback coupling not allowed") % (
self.getModelFullName(),
p1.getPortFullName(),
p2.getPortFullName(),
p1.host_DEVS.getModelFullName()))
else:
p1.outline.append(p2)
p2.inline.append(p1)
# External input couplings:
elif ((p1.host_DEVS == self and p2.host_DEVS.parent == self) and
(p1.type() == p2.type() == 'INPORT')):
p1.outline.append(p2)
p2.inline.append(p1)
# Eternal output couplings:
elif ((p1.host_DEVS.parent == self and p2.host_DEVS == self) and
(p1.type() == p2.type() == 'OUTPORT')):
p1.outline.append(p2)
p2.inline.append(p1)
# Other cases (illegal coupling):
else:
raise DEVSException(("Illegal coupling in coupled model '%s' " +
"between ports '%s' and '%s'") % (
self.getModelName(), p1.getPortName(),
p2.getPortName()))
p1.z_functions[p2] = z
if hasattr(self, "full_name"):
# TODO modify
self.server.getSelfProxy().dsConnectPorts(p1, p2)
def setLocation(self, location, force=False):
"""
Sets the location of this coupled model and its submodels if they don't have their own preference.
:param location: the location to set
:param force: whether or not to force this location, even if another is already defined
"""
if self.location is None or force:
self.location = location
for child in self.component_set:
child.setLocation(location, force)
class RootDEVS(BaseDEVS):
"""
The artificial RootDEVS model is the only 'coupled' model in the simulation after direct connection is performed.
"""
def __init__(self, components, models, scheduler_type):
"""
Basic constructor.
:param components: the atomic DEVS models that are the cildren, only those that are ran locally should be mentioned
:param models: all models that have to be passed to the scheduler, thus all models, even non-local ones
:param scheduler_type: type of scheduler to use (string representation)
"""
BaseDEVS.__init__(self, "ROOT model")
self.component_set = components
self.time_next = (float('inf'), 1)
self.local_model_ids = set([None])
for i in self.component_set:
self.local_model_ids.add(i.model_id)
self.models = models
self.scheduler_type = scheduler_type
self.listeners = {}
def redoDirectConnection(self, ports):
"""
Redo direct connection for a specified port, and all ports connected to it.
:param ports: the ports that have changed.
"""
# Find all changed ports and redo their direct connection
worklist = list(ports)
for outport in worklist:
worklist.extend(outport.inline)
for p in set(worklist):
directConnectPort(p, self.listeners)
def directConnect(self):
"""
Perform direct connection on the models again
"""
directConnect(self.models, self.listeners)
def setScheduler(self, scheduler_type):
"""
Set the scheduler to the desired type. Will overwite the previously present scheduler.
:param scheduler_type: type of scheduler to use (string representation)
"""
if isinstance(scheduler_type, tuple):
try:
exec("from pypdevs.schedulers.%s import %s" % scheduler_type)
except:
exec("from %s import %s" % scheduler_type)
nr_models = len(self.models)
self.scheduler = eval("%s(self.component_set, EPSILON, nr_models)"
% scheduler_type[1])
else:
raise DEVSException("Unknown Scheduler: " + str(scheduler_type))
def setGVT(self, gvt, activities, last_state_only):
"""
Sets the GVT of this coupled model
:param gvt: the time to which the GVT should be set
:param activities: dictionary containing all activities for the models
:param last_state_only: whether or not to use the last state for activity
"""
for i in self.component_set:
i.setGVT(gvt, activities, last_state_only)
def fetchActivity(self, time, activities):
"""
Fetch the activity of the model up to a certain time
:param time: the time up to which the activity should be calculated
:param activities: dictionary containing all activities for the models
"""
for i in self.component_set:
i.fetchActivity(time, activities)
def revert(self, time, memorize):
"""
Revert the coupled model to the specified time, all submodels will also
be reverted.
:param time: the time up to which revertion should happen
:param memorize: whether or not the saved states should still be kept for memoization
"""
reschedules = set()
controller_revert = False
for child in self.component_set:
if child.time_last >= time:
controller_revert |= child.revert(time, memorize)
# Was reverted, so reschedule
reschedules.add(child)
# Always clear the inputs, as it is possible that there are only
# partial results, which doesn't get found in the time_last >= time
child.my_input = {}
self.scheduler.massReschedule(reschedules)
self.setTimeNext()
return controller_revert
def setTimeNext(self):
"""
Reset the timeNext
"""
try:
self.time_next = self.scheduler.readFirst()
except IndexError:
# No element found in the scheduler, so put it to INFINITY
self.time_next = (float('inf'), 1)
class Port(object):
"""
Class for DEVS model ports (both input and output). This class provides basic port attributes and query methods.
"""
def __init__(self, is_input, name=None):
"""
Constructor. Creates an input port if isInput evaluates to True, and
an output port otherwise.
:param is_input: whether or not this is an input port
:param name: the name of the port. If None is provided, a unique ID is generated
"""
self.inline = []
self.outline = []
self.host_DEVS = None
self.msg_count = 0
# The name of the port
self.name = name
self.is_input = is_input
self.z_functions = {}
def getPortName(self):
"""
Returns the name of the port
:returns: local name of the port
"""
return self.name
def getPortFullName(self):
"""
Returns the complete name of the port
:returns: fully qualified name of the port
"""
return "%s.%s" % (self.host_DEVS.getModelFullName(), self.getPortName())
def type(self):
"""
Returns the 'type' of the object
:returns: either 'INPORT' or 'OUTPORT'
"""
if self.is_input:
return 'INPORT'
else:
return 'OUTPORT'
def appendZ(first_z, new_z):
if first_z is None:
return new_z
elif new_z is None:
return first_z
else:
return lambda x: new_z(first_z(x))
class ExternalWrapper(AtomicDEVS):
def __init__(self, function):
AtomicDEVS.__init__(self, "Fake")
self.f = function
self.model_id = None
self.full_name = None
def extTransition(self, inputs):
# Fake object is created with a single fake port, so unpack that
self.f(self.my_input.values()[0])
def directConnectPort(outport, listeners):
# The new contents of the line
outport.routing_outline = []
worklist = [(p, outport.z_functions.get(p, None))
for p in outport.outline]
for outline, z in worklist:
if outline in listeners.keys():
# This port is being listened on, so just add it as a fake model
fake_port = Port(is_input=False,name="Fake")
fake_port.host_DEVS = ExternalWrapper(listeners[outline])
outport.routing_outline.append((fake_port, z))
# If it is a coupled model, we must expand this model
if isinstance(outline.host_DEVS, CoupledDEVS):
for inline in outline.outline:
# Add it to the current iterating list, so we can just continue
entry = (inline, appendZ(z, outline.z_functions.get(inline, None)))
worklist.append(entry)
# If it is a Coupled model, we should just continue
# expanding it and not add it to the finished line
if not isinstance(inline.host_DEVS, CoupledDEVS):
entry = (inline, appendZ(z, outline.z_functions.get(inline, None)))
outport.routing_outline.append(entry)
else:
for ol, z in outport.routing_outline:
if ol == outline:
break
else:
# Add to the new line if it isn't already there
# Note that it isn't really mandatory to check for this,
# it is a lot cleaner to do so.
# This will greatly increase the complexity of the connector though
outport.routing_outline.append((outline, z))
def directConnect(component_set, listeners):
"""
Perform direct connection on this CoupledDEVS model
:param component_set: the iterable to direct connect
:returns: the direct connected component_set
"""
new_list = []
# Search for root model
root = component_set[0]
while root.parent is not None:
root = root.parent
component_set = [root]
for i in component_set:
if isinstance(i, CoupledDEVS):
component_set.extend(i.component_set)
else:
# Found an atomic model
new_list.append(i)
# Also perform direct connection on all ports of the Coupled DEVS models, should injection ever be wanted
for i in component_set:
# Remap the output ports
for outport in i.OPorts:
directConnectPort(outport, listeners)
if isinstance(i, CoupledDEVS):
for inport in i.IPorts:
directConnectPort(inport, listeners)
return new_list

252
src/pypdevs/MPIRedirect.py Normal file
View file

@ -0,0 +1,252 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Class containing a kind of RMI implementation over MPI.
"""
oneways = frozenset(["simulate",
"receiveControl",
"receive",
"finishSimulation",
"notifyWait",
"notifyRun",
"prepare",
"receiveAntiMessages",
"migrationUnlock",
"notifyMigration",
"requestMigrationLock",
"setGVT"])
import pypdevs.middleware as middleware
class MPIFaker(object):
"""
A dummy implementation of MPI4Py if none is found
"""
# Don't follow coding style here, as we need to be compatible with the mpi4py interface
@staticmethod
def Get_size():
"""
Return the size of the MPI world. Always 1, since it is only used in cases where local simulation is done.
:returns: int -- number of MPI processes running
"""
return 1
@staticmethod
def Get_rank():
"""
Return the rank of the current process in the MPI world. Always 0, since it is only used in cases where local simulation is done.
:returns: int -- rank of the current process
"""
return 0
try:
from mpi4py import MPI
COMM_WORLD = MPI.COMM_WORLD
except ImportError:
# MPI4Py not found, fall back to the dummy implementation
COMM_WORLD = MPIFaker()
import threading
from pypdevs.logger import *
def cleaning():
"""
Clean up the list of all waiting asynchronous connections
Should be ran on a seperate thread and will simply wait on the connection status to be 'complete'. This is necessary for the MPI specification.
"""
import pypdevs.accurate_time as time
while 1:
try:
# This is atomic (at least where it matters)
MPI.Request.Wait(MPIRedirect.lst.pop())
except IndexError:
# List is empty
time.sleep(1)
except:
# Can happen during shutdown, though it won't be recognized as 'AttributeError'
pass
class MPIRedirect(object):
"""
Redirect all calls to an instantiation of this class to the server for which it was created, uses MPI (or the dummy implementation).
For speed, it contains an optimisation when the call is actually done locally (it will simply start a thread then). This complete
implemenation is based on so called 'magic functions' from Python.
"""
# Reserve 50 slots, this is (hopefully) way too much, though the backend would crash if we run out of these...
# Honestly, if you have 50 connections for which you are waiting, you will have worse problems than running out of IDs
waiting = [None] * 50
# Don't use range itself, as this doesn't work in Python3
free_ids = [i for i in range(50)]
noproxy = frozenset(["__getnewargs__",
"__getinitargs__",
"__str__",
"__repr__"])
local = None
lst = []
if COMM_WORLD.Get_size() > 1:
thrd = threading.Thread(target=cleaning, args=[])
thrd.daemon = True
thrd.start()
def __init__(self, rank):
"""
Constructor.
:param rank: the rank of the server to redirect the call to
:param oneways: iterable containing all functions that should be done without waiting for completion
"""
self.rank = int(rank)
self.oneway = oneways
def __getinitargs__(self):
"""
For pickling
:returns: list containing the rank
"""
return [self.rank]
def __getstate__(self):
"""
For pickling
:returns: dictionary containing the rank and the oneway list
"""
return {"rank": self.rank, "oneway": self.oneway}
def __setstate__(self, state):
"""
For pickling
:param state: the dictionary provided by the *__getstate__* method
"""
self.rank = state["rank"]
self.oneway = state["oneway"]
def __getattr__(self, name):
"""
Determine whether or not we should redirect the call to the local or the remote server
:param name: the name of the function to call
:returns: function -- function to be actually called to perform the action
"""
if name in MPIRedirect.noproxy:
raise AttributeError(name)
def newcall(*args, **kwargs):
"""
A call to a remote location
"""
return MPIRedirect.remoteCall(self, name, *args, **kwargs)
return newcall
def remoteCall(self, method, *args, **kwargs):
"""
Make the remote call
:param method: method name to call (as a string)
:returns: return value of the called method; always None in case it is a one-way call
"""
# Unique tag, but at least 2 (0 reserved for exit, 1 is reserved for calls)
wait = str(method) not in self.oneway
if wait:
call_id = MPIRedirect.free_ids.pop()
else:
# Mention that we are not waiting for a reply
call_id = None
data = [call_id, method, args, kwargs]
if wait:
MPIRedirect.waiting[call_id] = event = threading.Event()
MPIRedirect.lst.append(COMM_WORLD.isend(data, dest=self.rank, tag=1))
if wait:
event.wait()
response = MPIRedirect.waiting[call_id]
# Clear the object from memory
MPIRedirect.waiting[call_id] = None
MPIRedirect.free_ids.append(call_id)
return response
class LocalRedirect(object):
"""
Local redirector class
"""
def localCall(self, method, *args, **kwargs):
"""
Actually perform the local call
:param method: the name of the method
:returns: the return value of the function, None if it is a oneway call
"""
func = getattr(self.server, method)
if str(method) in self.oneway:
threading.Thread(target=func, args=args, kwargs=kwargs).start()
else:
return func(*args, **kwargs)
def __init__(self, server):
"""
Constructor.
:param server: the local server
"""
self.server = server
self.oneway = oneways
def __getattr__(self, name):
"""
Determine whether or not we should redirect the call to the local or the remote server
:param name: the name of the function to call
:returns: function -- function to be actually called to perform the action
"""
if name in MPIRedirect.noproxy:
raise AttributeError(name)
def localcall(*args, **kwargs):
"""
A call to a local location
"""
return LocalRedirect.localCall(self, name, *args, **kwargs)
return localcall
def __getinitargs__(self):
"""
For pickling
:returns: list containing the rank
"""
return [self.server]
def __getstate__(self):
"""
For pickling
:returns: dictionary containing the rank and the oneway list
"""
return {"oneway": self.oneway}
def __setstate__(self, state):
"""
For pickling
:param state: the dictionary provided by the *__getstate__* method
"""
self.oneway = state["oneway"]
# No need to save the server, as it is impossible to restore it anyway

19
src/pypdevs/__init__.py Normal file
View file

@ -0,0 +1,19 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyPDEVS source code, create atomic and/or coupled models and simulate
them using the Simulator class.
"""

View file

@ -0,0 +1,11 @@
import time as python_time
import sys
def time():
if sys.platform == "win32":
return python_time.clock()
else:
return python_time.time()
def sleep(t):
python_time.sleep(t)

View file

@ -0,0 +1,89 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions to visualize various kinds of data in a Cell DEVS way, that is: by creating a matrix containing single values. This matrix can then be processed by e.g. gnuplot to create a heatmap. Note that it is limited to 2D maps, as these are the most frequent and simplest to conceptually grasp.
These functions are supposed to be used later on in development for the Activity-Aware part.
"""
def visualizeLocations(kernel):
"""
Visualize the locations in a Cell DEVS way
:param kernel: a basesimulator object, to fetch the location of every model
"""
location_map = [[0] * kernel.y_size for _ in range(kernel.x_size)]
for i, loc in enumerate(kernel.destinations):
try:
model = kernel.model_ids[i]
if isinstance(loc, int):
locationMap[model.x][model.y] = loc
else:
locationMap[model.x][model.y] = kernel.name
except AttributeError:
pass
visualizeMatrix(location_map, "%i", "locations-%f" % max(0, kernel.gvt))
def visualizeActivity(sim):
"""
Visualize the activity in a Cell DEVS way
:param sim: the simulator object, to access the model and their activity
"""
activities = []
cached = {}
import pypdevs.middleware as middleware
for i in range(len(sim.server.proxies)):
proxy = sim.controller.getProxy(i)
cached.update(proxy.getTotalActivity((float('inf'), float('inf'))))
for aDEVS in sim.model.component_set:
model_id = aDEVS.model_id
activities.append([cached[model_id], aDEVS])
if sim.x_size > 0 and sim.y_size > 0:
activity_map = [[0.0] * sim.y_size for i in range(sim.x_size)]
for entry in activities:
try:
activity_map[entry[1].x][entry[1].y] = entry[0]
except AttributeError:
pass
visualizeMatrix(activity_map, "%.6f", "activity")
else:
activities.sort(key=lambda i: i[1].getModelFullName())
for entry in activities:
print("%30s -- %.6f" % (entry[1].getModelFullName(), entry[0]))
def visualizeMatrix(matrix, formatstring, filename):
"""
Perform the actual visualisation in a matrix style
:param matrix: the 2D matrix to visualize, should be a list of lists
:param formatstring: the string to use to format the values, most likely something like "%f"
:param filename: file to write the matrix to. Can be both a string to create a new file with that name, or an opened file handle.
"""
if isinstance(filename, str):
outfile = open(filename, 'w')
openfile = False
else:
outfile = filename
openfile = True
formatstring = formatstring + " "
for x in matrix:
for y in x:
outfile.write(formatstring % y)
outfile.write("\n")
if not openfile:
outfile.close()

View file

@ -0,0 +1,15 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -0,0 +1,66 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class AutoAllocator(object):
"""
Allocate all models in a static manner, simply trying to divide the number of models equally.
Our 'heuristic' is to allocate in chunks as defined in the root coupled model.
"""
def allocate(self, models, edges, nr_nodes, total_activities):
"""
Calculate allocations for the nodes, using the information provided.
:param models: the models to allocte
:param edges: the edges between the models
:param nr_nodes: the number of nodes to allocate over. Simply an upper bound!
:param total_activities: activity tracking information from each model
:returns: allocation that was found
"""
allocation = {}
allocated_topmost = {}
current_node = 0
total_models = len(models)
for model in models:
# Not yet allocated, so allocate it somewhere
child = model
searchmodel = model
while searchmodel.parent is not None:
child = searchmodel
searchmodel = searchmodel.parent
# searchmodel is now the root model
# child is its 1st decendant, on which we will allocate
try:
node = allocated_topmost[child]
except KeyError:
current_node = (current_node + 1) % nr_nodes
allocated_topmost[child] = current_node
node = current_node
allocation[model.model_id] = node
return allocation
def getTerminationTime(self):
"""
Returns the time it takes for the allocator to make an 'educated guess' of the advised allocation.
This time will not be used exactly, but as soon as the GVT passes over it. While this is not exactly
necessary, it avoids the overhead of putting such a test in frequently used code.
:returns: float -- the time at which to perform the allocations (and save them)
"""
# No need for any run time information
return 0.0

View file

@ -0,0 +1,145 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
class GreedyAllocator(object):
"""
Allocate all models in a greedy manner: make the most heavy link local and extend from there on until an average load is reached.
"""
def allocate(self, models, edges, nr_nodes, total_activities):
"""
Calculate allocations for the nodes, using the information provided.
:param models: the models to allocte
:param edges: the edges between the models
:param nr_nodes: the number of nodes to allocate over. Simply an upper bound!
:param total_activities: activity tracking information from each model
:returns: allocation that was found
"""
# Run over all edges to create the nodes and link in their edges
nodes = {}
remaining_edges = set()
to_alloc = set()
for source in edges:
for destination in edges[source]:
# A connection from 'source' to 'destination'
edge = edges[source][destination]
nodes.setdefault(source, []).append((edge, destination))
nodes.setdefault(destination, []).append((edge, source))
remaining_edges.add((edge, source, destination))
to_alloc.add(destination)
to_alloc.add(source)
# OK, nodes are constructed
# Allocate 1 node too much for spilling
nr_nodes += 1
# Find average activity (our target)
avg_activity = sum([total_activities[i] for i in total_activities]) / nr_nodes
# Get the strongest edge
alloc_node = 0
node_load = []
allocation = {}
allocation_rev = defaultdict(set)
while alloc_node < (nr_nodes - 1):
while remaining_edges:
max_edge = max(remaining_edges)
remaining_edges.remove(max_edge)
edge_weight, source, destination = max_edge
if source in to_alloc and destination in to_alloc:
break
else:
break
activity_source = total_activities[source.model_id]
activity_destination = total_activities[destination.model_id]
node_load.append(activity_source + activity_destination)
allocation[source.model_id] = alloc_node
allocation[destination.model_id] = alloc_node
allocation_rev[alloc_node].add(source)
allocation_rev[alloc_node].add(destination)
to_alloc.remove(source)
to_alloc.remove(destination)
while node_load[alloc_node] < average_activity:
edge_search = []
for edge in remaining_edges:
if ((edge[1] in allocation_rev[alloc_node] and
edge[2] in to_alloc) or
(edge[2] in allocation_rev[alloc_node] and
edge[1] in to_alloc)):
edge_search.append(edge)
if not edge_search:
break
# Allocate some more nodes
max_edge = max(edge_search)
remaining_edges.remove(max_edge)
edge_weight, source, destination = max_edge
# Ok, this is an unbound connection, so add it
if source in to_alloc:
to_alloc.remove(source)
allocation[source.model_id] = alloc_node
allocation_rev[alloc_node].add(source.model_id)
node_load[alloc_node] += total_activities[source.model_id]
if destination in to_alloc:
to_alloc.remove(destination)
allocation[destination.model_id] = alloc_node
allocation_rev[alloc_node].add(destination.model_id)
node_load[alloc_node] += total_activities[destination.model_id]
alloc_node += 1
# All unassigned nodes are for the spill node
# Undo our spilling node
while to_alloc:
changes = False
n = list(to_alloc)
for model in n:
options = set()
for oport in model.OPorts:
for oline, _ in oport.routing_outline:
location = oline.host_DEVS.location
if oline.host_DEVS.location is not None:
options.add((node_load[location], location))
for iport in model.IPorts:
for iline in oport.routing_inline:
location = iline.host_DEVS.location
if iline.host_DEVS.location is not None:
options.add((node_load[location], location))
if not options:
continue
# Get the best option
_, loc = min(options)
node_load[loc] += total_activities[model.model_id]
allocation[model.model_id] = loc
allocation_rev[loc].add(model.model_id)
to_alloc.remove(model)
if not changes:
# An iteration without changes, means that we loop forever
for m in to_alloc:
# Force an allocation to 0
allocation[m.model_id] = 0
# allocation_rev doesn't need to be updated
break
return allocation
def getTerminationTime(self):
"""
Returns the time it takes for the allocator to make an 'educated guess' of the advised allocation.
This time will not be used exactly, but as soon as the GVT passes over it. While this is not exactly
necessary, it avoids the overhead of putting such a test in frequently used code.
:returns: float -- the time at which to perform the allocations (and save them)
"""
return 10.0

1479
src/pypdevs/basesimulator.py Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,68 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A wrapper for AtomicDEVS models that are to be interpreted as Classic DEVS models
"""
class ClassicDEVSWrapper(object):
"""
Wraps around a normal AtomicDEVS model and intercepts the DEVS specific functions. All attribute read/writes need to be redirected to the model itself.
"""
def __init__(self, model):
"""
Constructor
:param model: the model to wrap around
"""
self.model = model
def __getattr__(self, attr):
"""
Fetches the attributes of the model. This is a 'magic' function.
:param attr: the attribute to fetch
:returns: the fetched attributed
"""
return getattr(self.model, attr)
def __setattr__(self, attr, val):
"""
Sets the attribute of the model. This is a 'magic' function. Only the 'model' attribute is not proxied!
:param attr: the attribute to set
:param val: the value to set it to
"""
if attr == "model":
object.__setattr__(self, attr, val)
return setattr(self.model, attr, val)
def extTransition(self, inputs):
"""
Wrap around the extTransition function by changing the input dictionary
:param inputs: the input dictionary with lists as values
:returns: the new state, as the normal extTransition method would do
"""
return self.model.extTransition({i: inputs[i][0] for i in inputs})
def outputFnc(self):
"""
Wrap around the outputFnc function by changing the returned dictionary
:returns: the changed dictionary
"""
retval = self.model.outputFnc()
return {i: [retval[i]] for i in retval}

73
src/pypdevs/colors.py Normal file
View file

@ -0,0 +1,73 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines the colors to be used when drawing the model,
should be colors that are understood by GraphViz.
If more nodes are used than colors are provided here,
they will be shown in white.
"""
colors = [
"red",
"green",
"blue",
"yellow",
"cyan",
"magenta",
"azure",
"violet",
"rose",
"orange",
"chartreuse",
"vermilion",
"amber",
"viridian",
"indigo",
"aliceblue",
"darkkhaki",
"darkgreen",
"darkviolet",
"deepskyblue",
"aquamarine",
"floralwhite",
"deeppink",
"dimgray",
"dodgerblue",
"firebrick",
"forestgreen",
"gold",
"goldenrod",
"greenyellow",
"lightblue",
"lawngreen",
"lavender",
"khaki",
"ivory",
"linen",
"maroon",
"lemonchiffon",
"orchid",
"salmon",
"seagreen",
"skyblue",
"sienna",
"wheat",
"turquoise",
"tomato",
"tan",
"steelblue",
"slategray",
"slateblue"
]

586
src/pypdevs/controller.py Normal file
View file

@ -0,0 +1,586 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Controller used as a specific simulation kernel
"""
from pypdevs.basesimulator import BaseSimulator
from pypdevs.logger import *
import threading
import pypdevs.accurate_time as time
import pypdevs.middleware as middleware
from pypdevs.DEVS import CoupledDEVS, AtomicDEVS
from pypdevs.util import DEVSException
from pypdevs.activityVisualisation import visualizeLocations
from pypdevs.realtime.threadingBackend import ThreadingBackend
from pypdevs.realtime.asynchronousComboGenerator import AsynchronousComboGenerator
class Controller(BaseSimulator):
"""
The controller class, which is a special kind of normal simulation kernel. This should always run on the node labeled 0.
It contains some functions that are only required to be ran on a single node, such as GVT initiation
"""
def __init__(self, name, model, server):
"""
Constructor
:param name: name of the controller
:param model: model to host at the kernel
:param server: the server to make requests on
"""
BaseSimulator.__init__(self, name, model, server)
self.waiting_lock = threading.Lock()
self.accumulator = {}
self.no_finish_ring = threading.Lock()
self.no_finish_ring.acquire()
self.location_cell_view = False
self.graph = None
self.allocations = None
self.running_irreversible = None
self.initial_allocator = None
self.prev_termination_time = 0.0
def __setstate__(self, retdict):
"""
For pickling
:param retdict: dictionary containing attributes and their value
"""
BaseSimulator.__setstate__(self, retdict)
self.waiting_lock = threading.Lock()
self.no_finish_ring = threading.Lock()
self.no_finish_ring.acquire()
def GVTdone(self):
"""
Notify this simulation kernel that the GVT calculation is finished
"""
self.wait_for_gvt.set()
def isFinished(self, running):
"""
Checks if all kernels have indicated that they have finished simulation.
If each kernel has indicated this, a final (expensive) check happens to
prevent premature termination.
:param running: the number of kernels that is simulating
:returns: bool -- whether or not simulation is already finished
"""
# NOTE make sure that GVT algorithm is not running at the moment, otherwise we deadlock!
# it might be possible that the GVT algorithm starts immediately after the wait(), causing deadlock again
# Now we are sure that the GVT algorithm is not running when we start this
# It seems that we should be finished, so just ACK this with every simulation kernel before proceeding
# it might be possible that the kernel's 'notifyRun' command is still on the way, making the simulation
# stop too soon.
self.no_finish_ring.acquire()
msgcount = self.finishRing(0, 0, True)
if msgcount == -1:
# One of the nodes was still busy
self.no_finish_ring.release()
return False
else:
msgcount2 = self.finishRing(0, 0, True)
# If they are equal, we are done
ret = msgcount == msgcount2
if not ret:
self.no_finish_ring.release()
else:
self.waiting = 0
return ret
def waitFinish(self, running):
"""
Wait until the specified number of kernels have all told that simulation
finished.
:param running: the number of kernels that is simulating
"""
while 1:
time.sleep(1)
# Make sure that no relocations are running
if self.isFinished(running):
# All simulation kernels have told us that they are idle at the moment
break
self.run_gvt = False
self.event_gvt.set()
self.gvt_thread.join()
def startGVTThread(self, gvt_interval):
"""
Start the GVT thread
:param gvt_interval: the interval between two successive GVT runs
"""
# We seem to be the controller
# Start up the GVT algorithm then
self.event_gvt = threading.Event()
self.run_gvt = True
self.gvt_thread = threading.Thread(target=Controller.threadGVT,
args=[self, gvt_interval])
self.gvt_thread.daemon = True
self.gvt_thread.start()
def threadGVT(self, freq):
"""
Run the GVT algorithm, this method should be called in its own thread,
because it will block
:param freq: the time to sleep between two GVT calculations
"""
# Wait for the simulation to have done something useful before we start
self.event_gvt.wait(freq)
# Maybe simulation already finished...
while self.run_gvt:
self.receiveControl([float('inf'),
float('inf'),
self.accumulator,
{}],
True)
# Wait until the lock is released elsewhere
print("Waiting for clear")
self.wait_for_gvt.wait()
self.wait_for_gvt.clear()
# Limit the GVT algorithm, otherwise this will flood the ring
print("Cleared")
self.event_gvt.wait(freq)
def getVCDVariables(self):
"""
Generate a list of all variables that exist in the current scope
:returns: list -- all VCD variables in the current scope
"""
variables = []
for d in self.total_model.component_set:
variables.extend(d.getVCDVariables())
return variables
def simulate_sync(self):
"""
Synchronous simulation call, identical to the normal call, with the exception that it will be a blocking call as only "simulate" is marked as oneway.
"""
BaseSimulator.simulate_sync(self)
self.no_finish_ring.acquire()
def simulate(self):
"""
Run the actual simulation on the controller. This will simply 'intercept' the call to the original simulate and perform location visualisation when necessary.
"""
self.checkForTemporaryIrreversible()
self.no_finish_ring.release()
if self.location_cell_view:
from pypdevs.activityVisualisation import visualizeLocations
visualizeLocations(self)
# Call superclass (the actual simulation)
BaseSimulator.simulate(self)
self.prev_termination_time = self.termination_time[0]
def getEventGraph(self):
"""
Fetch a graph containing all connections and the number of events between the nodes. This is only useful when an initial allocator is chosen.
:returns: dict -- containing source and destination, it will return the amount of events passed between them
"""
return self.runAllocator()[0]
def getInitialAllocations(self):
"""
Get a list of all initial allocations. Will call the allocator to get the result.
:returns: list -- containing all nodes and the models they host
"""
return self.runAllocator()[1]
def runAllocator(self):
"""
Actually extract the graph of exchanged messages and run the allocator with this information.
Results are cached.
:returns: tuple -- the event graph and the allocations
"""
# Only run this code once
if self.graph is None and self.allocations is None:
# It seems this is the first time
if self.initial_allocator is None:
# No allocator was defined, or it has already issued its allocation code, which resulted into 'nothing'
self.graph = None
self.allocations = None
else:
from pypdevs.util import constructGraph, saveLocations
self.graph = constructGraph(self.model)
allocs = self.initialAllocator.allocate(self.model.component_set,
self.getEventGraph(),
self.kernels,
self.total_activities)
self.allocations = allocs
self.initial_allocator = None
saveLocations("locationsave.txt",
self.allocations,
self.model_ids)
return self.graph, self.allocations
def setCellLocationTracer(self, x, y, location_cell_view):
"""
Sets the Location tracer and all its configuration parameters
:param x: the horizontal size of the grid
:param y: the vertical size of the grid
:param location_cell_view: whether or not to enable it
"""
self.x_size = x
self.y_size = y
self.location_cell_view = location_cell_view
def setRelocator(self, relocator):
"""
Sets the relocator to the one provided by the user
:param relocator: the relocator to use
"""
self.relocator = relocator
# Perform run-time configuration
try:
self.relocator.setController(self)
except AttributeError:
pass
def setActivityTracking(self, at):
"""
Sets the use of activity tracking, which will simply output the activity of all models at the end of the simulation
:param at: whether or not to enable activity tracking
"""
self.activity_tracking = at
def setClassicDEVS(self, classic_DEVS):
"""
Sets the use of Classic DEVS instead of Parallel DEVS.
:param classicDEVS: whether or not to use Classic DEVS
"""
# Do this once, to prevent checks for the classic DEVS formalism
if classic_DEVS:
# Methods, so CamelCase
self.coupledOutputGeneration = self.coupledOutputGenerationClassic
def setAllocator(self, initial_allocator):
"""
Sets the use of an initial relocator.
:param initial_allocator: whether or not to use an initial allocator
"""
self.initial_allocator = initial_allocator
if initial_allocator is not None:
# Methods, so CamelCase
self.atomicOutputGeneration_backup = self.atomicOutputGeneration
self.atomicOutputGeneration = self.atomicOutputGenerationEventTracing
def setDSDEVS(self, dsdevs):
"""
Whether or not to check for DSDEVS events
:param dsdevs: dsdevs boolean
"""
self.use_DSDEVS = dsdevs
def setRealtime(self, input_references):
"""
Sets the use of realtime simulation.
:param input_references: dictionary containing the string to port mapping
"""
self.realtime = True
self.realtime_port_references = input_references
def setTerminationCondition(self, termination_condition):
"""
Sets the termination condition of this simulation kernel.
As soon as the condition is valid, it willl signal all nodes that they have to stop simulation as soon as they have progressed up to this simulation time.
:param termination_condition: a function that accepts two parameters: *time* and *model*. Function returns whether or not to halt simulation
"""
self.termination_condition = termination_condition
self.termination_time_check = False
def findAndPerformRelocations(self, gvt, activities, horizon):
"""
First requests the relocator for relocations to perform, and afterwards actually perform them.
:param gvt: the current GVT
:param activities: list containing all activities of all nodes
:param horizon: the horizon used in this activity tracking
"""
# Now start moving all models according to the provided relocation directives
relocate = self.relocator.getRelocations(gvt, activities, horizon)
#print("Filtered relocate: " + str(relocate))
if relocate:
self.performRelocationsInit(relocate)
def performRelocationsInit(self, relocate):
"""
Perform the relocations specified in the parameter. Split of from the 'findAndPerformRelocations', to make it possible for other parts of the code
to perform relocations too.
:param relocate: dictionary containing the model_id as key and the value is the node to send it to
"""
relocate = {key: relocate[key]
for key in relocate
if self.model_ids[key].location != relocate[key] and
self.model_ids[key].relocatable}
if not relocate:
return
if self.running_irreversible is not None:
self.getProxy(self.running_irreversible).unsetIrreversible()
self.running_irreversible = None
while not self.no_finish_ring.acquire(False):
if not self.run_gvt:
self.GVTdone()
return
time.sleep(0)
kernels = {}
self.locked_kernels = set()
relocation_rules = {}
for model_id in relocate:
source = self.model_ids[model_id].location
destination = relocate[model_id]
if source == destination:
continue
kernels[source] = kernels.get(source, 0) + 1
kernels[destination] = kernels.get(destination, 0) + 1
if kernels[source] == 1:
# We are the first to lock it, so actually send the lock
self.getProxy(source).requestMigrationLock()
if kernels[destination] == 1:
# We are the first to lock it, so actually send the lock
self.getProxy(destination).requestMigrationLock()
relocation_rules.setdefault((source, destination), set()).add(model_id)
while relocation_rules:
# Busy loop until everything is done
# Don't use an iterator, as we will change the list
for source, destination in relocation_rules.keys():
if (source in self.locked_kernels and
destination in self.locked_kernels):
models = relocation_rules[(source, destination)]
self.getProxy(source).migrateTo(destination, models)
del relocation_rules[(source, destination)]
kernels[source] -= len(models)
kernels[destination] -= len(models)
if kernels[source] == 0:
self.getProxy(source).migrationUnlock()
if kernels[destination] == 0:
self.getProxy(destination).migrationUnlock()
# OK, now check whether we need to visualize all locations or not
if self.location_cell_view:
visualizeLocations(self)
# Possibly some node is now hosting all models, so allow this node to become irreversible for some time.
self.checkForTemporaryIrreversible()
# Allow the finishring algorithm again
self.no_finish_ring.release()
def checkForTemporaryIrreversible(self):
"""
Checks if one node is hosting all the models. If this is the case, this node will gain 'temporary irreversibility',
allowing it to skip state saving and thus avoiding the main overhead associated with time warp.
"""
# Check whether or not everything is located at a single node now
if self.relocator.useLastStateOnly():
# If this is the case, we will be unable to know which state to save the activity for
# So disable it for now
# This does offer a slight negative impact, though it isn't really worth fixing for the time being
return
if isinstance(self.destinations[0], int):
current_kernel = self.destinations[0]
else:
current_kernel = 0
for kernel in self.destinations:
if isinstance(kernel, int):
loc = kernel
else:
loc = 0
if loc != current_kernel:
break
else:
# We didn't break, so one of the nodes runs all at once
self.getProxy(current_kernel).setIrreversible()
self.running_irreversible = current_kernel
def notifyLocked(self, remote):
"""
Notify this kernel that the model is locked
:param remote: the node that is locked
"""
self.locked_kernels.add(remote)
def dsRemovePort(self, port):
"""
Remove a port from the simulation
:param port: the port to remove
"""
for iport in port.inline:
iport.outline = [p for p in iport.outline if p != port]
for oport in port.outline:
oport.inline = [p for p in oport.inline if p != port]
self.dc_altered.add(port)
def dsDisconnectPorts(self, p1, p2):
"""
Disconnect two ports
:param p1: source port
:param p2: target port
"""
self.dc_altered.add(p1)
def dsConnectPorts(self, p1, p2):
"""
Connect two ports
:param p1: source port
:param p2: target port
"""
self.dc_altered.add(p1)
def dsUnscheduleModel(self, model):
"""
Dynamic Structure change: remove an existing model
:param model: the model to remove
"""
if isinstance(model, CoupledDEVS):
for m in model.component_set:
self.dsUnscheduleModel(m, False)
for port in model.IPorts:
self.dsRemovePort(port)
for port in model.OPorts:
self.dsRemovePort(port)
elif isinstance(model, AtomicDEVS):
self.model.component_set.remove(model)
self.model.models.remove(model)
# The model is removed, so remove it from the scheduler
self.model.scheduler.unschedule(model)
self.model_ids[model.model_id] = None
self.destinations[model.model_id] = None
self.model.local_model_ids.remove(model.model_id)
for port in model.IPorts:
self.dsRemovePort(port)
for port in model.OPorts:
self.dsRemovePort(port)
else:
raise DEVSException("Unknown model to schedule: %s" % model)
def dsScheduleModel(self, model):
"""
Dynamic Structure change: create a new model
:param model: the model to add
"""
if isinstance(model, CoupledDEVS):
model.full_name = model.parent.full_name + "." + model.getModelName()
for m in model.component_set:
self.dsScheduleModel(m)
for p in model.IPorts:
self.dc_altered.add(p)
for p in model.OPorts:
self.dc_altered.add(p)
elif isinstance(model, AtomicDEVS):
model.model_id = len(self.model_ids)
model.full_name = model.parent.full_name + "." + model.getModelName()
model.location = self.name
self.model_ids.append(model)
self.destinations.append(model)
self.model.component_set.append(model)
self.model.models.append(model)
self.model.local_model_ids.add(model.model_id)
self.atomicInit(model, self.current_clock)
p = model.parent
model.select_hierarchy = [model]
while p != None:
model.select_hierarchy = [p] + model.select_hierarchy
p = p.parent
if model.time_next[0] == self.current_clock[0]:
# If scheduled for 'now', update the age manually
model.time_next = (model.time_next[0], self.current_clock[1])
# It is a new model, so add it to the scheduler too
self.model.scheduler.schedule(model)
for p in model.IPorts:
self.dc_altered.add(p)
for p in model.OPorts:
self.dc_altered.add(p)
else:
raise DEVSException("Unknown model to schedule: %s" % model)
def setRealTime(self, subsystem, generator_file, ports, scale, listeners, args=[]):
"""
Set the use of realtime simulation
:param subsystem: defines the subsystem to use
:param generator_file: filename to use for generating external inputs
:param ports: input port references
:param scale: the scale factor for realtime simulation
:param listeners: the ports on which we should listen for output
:param args: additional arguments for the realtime backend
"""
self.realtime = True
self.threading_backend = ThreadingBackend(subsystem, args)
self.rt_zerotime = time.time()
async = AsynchronousComboGenerator(generator_file, self.threading_backend)
self.asynchronous_generator = async
self.realtime_starttime = time.time()
self.portmap = ports
self.model.listeners = listeners
self.realtime_scale = scale
def gameLoop(self):
"""
Perform all computations up to the current time. Only applicable for the game loop realtime backend.
"""
self.threading_backend.step()
def realtimeInterrupt(self, string):
"""
Create an interrupt from other Python code instead of using stdin or the file
:param string: the value to inject
"""
self.threading_backend.interrupt(string)
def stateChange(self, model_id, variable, value):
"""
Notification function for when a variable's value is altered. It will notify the node that is responsible for simulation of this model AND also notify the tracers of the event.
:param model_id: the model_id of the model whose variable was changed
:param variable: the name of the variable that was changed (as a string)
:param value: the new value of the variable
"""
# Call the node that hosts this model and order it to recompute timeAdvance
proxy = self.getProxy(self.model_ids[model_id].location)
proxy.recomputeTA(model_id, self.prev_termination_time)
self.tracers.tracesUser(self.prev_termination_time,
self.model_ids[model_id],
variable,
value)

21
src/pypdevs/infinity.py Normal file
View file

@ -0,0 +1,21 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A shortcut for infinity
Mainly here for legacy purposes
"""
INFINITY = float('inf')

108
src/pypdevs/logger.py Normal file
View file

@ -0,0 +1,108 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Logger for Syslog
"""
logger = None
location = None
queue = []
import threading
loglock = threading.Lock()
def setLogger(loc, address, loglevel):
"""
Sets the logger object
:param loc: location of the server, to prepend to every logged message
:param address: the address of the syslog server in the form of (ip-address, port)
:param loglevel: the level of logging to perform, should be one specified in the logging module
"""
if loglevel is None:
return
global logger
if logger is not None:
# A logger is already set, so ignore this one
return
import logging
import logging.handlers
handler = logging.handlers.SysLogHandler(address, facility=19)
local_logger = logging.getLogger('PyPDEVS-logging')
local_logger.addHandler(handler)
local_logger.setLevel(loglevel)
global location
location = loc
# Now make the logger 'public'
logger = local_logger
def log(level, msg, logger):
"""
Do the actual logging at the specified level, but save it in case no logger exists yet
:param level: string representation of the function to call on the logger
:param msg: the message to log
:returns: True -- to allow it as an #assert statement
"""
with loglock:
global location
global queue
if len(msg) > 80:
msg = msg[:79]
if logger is not None:
# Flush the queue first
for level1, msg1 in queue:
getattr(logger, level1)("%s -- %s" % (location, msg1))
queue = []
getattr(logger, level)("%s -- %s" % (location, msg))
else:
queue.append((level, msg))
return True
def debug(msg):
"""
Debug logging statement
:param msg: the message to print
:returns: True -- to allow it as an #assert statement
"""
return log("debug", msg, logger)
def info(msg):
"""
Informational logging statement
:param msg: the message to print
:returns: True -- to allow it as an #assert statement
"""
return log("info", msg, logger)
def warn(msg):
"""
Warning logging statement
:param msg: the message to print
:returns: True -- to allow it as an #assert statement
"""
return log("warn", msg, logger)
def error(msg):
"""
Error logging statement
:param msg: the message to print
:returns: True -- to allow it as an #assert statement
"""
return log("error", msg, logger)

44
src/pypdevs/message.py Normal file
View file

@ -0,0 +1,44 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Network messages used in the distributed simulation
"""
class NetworkMessage(object):
"""
Network messages used in the distributed simulation, simply a data class.
"""
def __init__(self, timestamp, content, uuid, color, destination):
"""
Constructor
:param timestamp: timestamp of the message
:param content: content of the message
:param uuid: UUID of the message
:param color: color of the message for Mattern's algorithm
:param destination: the model_id of the destination model
"""
self.timestamp = timestamp
self.content = content
self.uuid = uuid
self.color = color
self.destination = destination
def __lt__(self, other):
"""
Comparison of different NetworkMessages, necessary for Python3
"""
return self.timestamp < other.timestamp

View file

@ -0,0 +1,189 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Scheduler for external input messages
"""
from heapq import heappop, heappush, heapify
from pypdevs.logger import *
class MessageScheduler(object):
"""
An efficient implementation of a message scheduler for the inputQueue,
it supports very fast invalidations (O(1)) and fast retrievals of first
element (O(log(n) in average case)
"""
def __init__(self):
"""
Constructor.
"""
# List of processed messages
self.processed = []
# Heap of the to be processed messages
self.heap = []
# All invalidated messages, simply adding a message's UUID will invalidate
# the message. The counter that it keeps is for multiple invalidations
self.invalids = set()
def __getstate__(self):
"""
For pickling
"""
retdict = {}
unpicklable = frozenset(["instancemethod", "lock", "_Event"])
for i in dir(self):
if getattr(self, i).__class__.__name__ in unpicklable:
# unpicklable, so don't copy it
continue
elif str(i).startswith("__"):
continue
else:
retdict[str(i)] = getattr(self, i)
return retdict
def insert(self, extraction, model_list):
"""
Insert several messages that were created elsewhere and merge them in.
:param extraction: the output of the extract method on the other message scheduler
:param model_list: models that are inserted and for which extraction happened
"""
msgs, invalids = extraction
# A simple update suffices, as these messages have a unique ID
self.invalids |= invalids
for msg in msgs:
moddata = {}
for entry in msg.content:
inport = model_list[entry[0]].ports[entry[1]]
moddata[inport] = msg.content[entry]
# Overwrite the original message
msg.content = moddata
self.schedule(msg)
def extract(self, model_ids):
"""
Extract messages from the message scheduler for when a model gets removed from this kernel.
:param model_ids: iterable of model_ids of models that will be removed from this node
:returns: tuple -- extraction that needs to be passed to the insert method of another scheduler
"""
new_heap = []
extracted = []
for msg in self.heap:
for port in msg.content:
if port.host_DEVS.model_id in model_ids:
msg.content = {(i.host_DEVS.model_id, i.port_id):
msg.content[i]
for i in msg.content}
extracted.append(msg)
else:
new_heap.append(msg)
# Break, as this was simply done for a python 2 and python 3 compliant version
break
heapify(new_heap)
self.heap = new_heap
return (extracted, self.invalids)
def schedule(self, msg):
"""
Schedule a message for processing
:param msg: the message to schedule
"""
try:
self.invalids.remove(msg.uuid)
except KeyError:
heappush(self.heap, msg)
def massUnschedule(self, uuids):
"""
Unschedule several messages, this way it will no longer be processed.
:param uuids: iterable of UUIDs that need to be removed
"""
self.invalids = self.invalids.union(uuids)
def readFirst(self):
"""
Returns the first (valid) message. Not necessarily O(1), as it could be
the case that a lot of invalid messages are still to be deleted.
"""
self.cleanFirst()
return self.heap[0]
def removeFirst(self):
"""
Notify that the first (valid) message is processed.
:returns: msg -- the next first message that is valid
"""
self.cleanFirst()
self.processed.append(heappop(self.heap))
def purgeFirst(self):
"""
Notify that the first (valid) message must be removed
:returns: msg -- the next first message that is valid
"""
self.cleanFirst()
heappop(self.heap)
def cleanFirst(self):
"""
Clean all invalid messages at the front of the list. Method MUST be called
before any accesses should happen to the first element, otherwise this
first element might be a message that was just invalidated
"""
try:
while 1:
self.invalids.remove(self.heap[0].uuid)
# If it got removed, it means that the message was indeed invalidated, so we can simply pop it
heappop(self.heap)
except (KeyError, IndexError):
# Seems that the UUID was not invalidated, so we are done
# OR
# Reached the end of the heap and all were invalid
pass
def revert(self, time):
"""
Revert the inputqueue to the specified time, will also clean up the list of processed elements
:param time: time to which revertion should happen
"""
try:
i = 0
while self.processed[i].timestamp < time:
i += 1
for msg in self.processed[i:]:
# All processed messages were valid, so no need for the more expensive check
# Should an invalidation for a processed message have just arrived, it will
# be processed AFTER this revertion, thus using the normal unschedule() function
heappush(self.heap, msg)
self.processed = self.processed[:i]
except IndexError:
# All elements are smaller
pass
def cleanup(self, time):
"""
Clean up the processed list, also removes all invalid elements
:param time: time up to which cleanups are allowed to happen
"""
# We can be absolutely certain that ONLY elements from the processed list should be deleted
self.processed = [i for i in self.processed if i.timestamp >= time]
# Clean up the dictionary too, as otherwise it will start to contain a massive amount of entries, consuming both memory and increasing the amortized worst case

69
src/pypdevs/middleware.py Normal file
View file

@ -0,0 +1,69 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Middleware detection and setup code
"""
import sys
def startupMiddleware():
"""
Do the actual detection and startup, also defines all necessary globals
:returns: tuple -- current server rank and total world size
"""
if "MPI" in globals():
# Force local simulation
return 0, 1
# Try loading MPI
global COMM_WORLD
global MPI
try:
from mpi4py import MPI
COMM_WORLD = MPI.COMM_WORLD
except ImportError:
# No MPI4Py found, so force local MPI simulation
from pypdevs.MPIRedirect import MPIFaker
COMM_WORLD = MPIFaker()
# Now we should take care of the starting of the server
rank = COMM_WORLD.Get_rank()
if rank != 0:
# We should stop immediately, to prevent multiple constructions of the model
# This is a 'good' stop, so return with a zero
from pypdevs.server import Server
server = Server(int(rank), COMM_WORLD.Get_size())
sys.exit(0)
else:
# We should still shutdown every simulation kernel at exit by having the controller send these messages
# Use the atexit code at the end
if COMM_WORLD.Get_size() > 1:
import atexit
atexit.register(cleanupMPI)
return 0, COMM_WORLD.Get_size()
def cleanupMPI():
"""
Shut down the MPI backend by sending a termination message to all listening nodes
"""
for i in range(COMM_WORLD.Get_size()):
if i == COMM_WORLD.Get_rank():
req = COMM_WORLD.isend(0, dest=i, tag=0)
else:
COMM_WORLD.send(0, dest=i, tag=0)
if COMM_WORLD.Get_size() > 1:
MPI.Request.wait(req)

237
src/pypdevs/minimal.py Normal file
View file

@ -0,0 +1,237 @@
# Copyright 2015 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The minimal PythonPDEVS simulation kernel. It only supports simple Parallel DEVS simulation, without any fancy configuration options.
While it behaves exactly the same as the normal simulation kernel with default options, it is a lot faster due to skipping all features.
"""
from collections import defaultdict
from pypdevs.DEVS import CoupledDEVS, AtomicDEVS, RootDEVS
"""
# Uncomment this part to make a completely stand-alone simulation kernel
class BaseDEVS(object):
def __init__(self, name):
self.name = name
self.IPorts = []
self.OPorts = []
self.ports = []
self.parent = None
self.time_last = (0.0, 0)
self.time_next = (0.0, 1)
self.my_input = {}
def addPort(self, name, is_input):
name = name if name is not None else "port%s" % len(self.ports)
port = Port(is_input=is_input, name=name)
if is_input:
self.IPorts.append(port)
else:
self.OPorts.append(port)
port.port_id = len(self.ports)
self.ports.append(port)
port.host_DEVS = self
return port
def addInPort(self, name=None):
return self.addPort(name, True)
def addOutPort(self, name=None):
return self.addPort(name, False)
def getModelName(self):
return self.name
def getModelFullName(self):
return self.full_name
class AtomicDEVS(BaseDEVS):
ID = 0
def __init__(self, name):
BaseDEVS.__init__(self, name)
self.elapsed = 0.0
self.state = None
self.model_id = AtomicDEVS.ID
AtomicDEVS.ID += 1
def extTransition(self, inputs):
return self.state
def intTransition(self):
return self.state
def confTransition(self, inputs):
self.state = self.intTransition()
return self.extTransition(inputs)
def timeAdvance(self):
return float('inf')
def outputFnc(self):
return {}
class CoupledDEVS(BaseDEVS):
def __init__(self, name):
BaseDEVS.__init__(self, name)
self.component_set = []
def addSubModel(self, model):
model.parent = self
self.component_set.append(model)
return model
def connectPorts(self, p1, p2):
p1.outline.append(p2)
p2.inline.append(p1)
class RootDEVS(object):
def __init__(self, components, scheduler):
self.component_set = components
self.time_next = float('inf')
self.scheduler = scheduler(self.component_set, 1e-6, len(self.component_set))
class Port(object):
def __init__(self, is_input, name=None):
self.inline = []
self.outline = []
self.host_DEVS = None
self.name = name
def getPortname(self):
return self.name
"""
def directConnect(component_set):
"""
Perform a trimmed down version of the direct connection algorithm.
It does not support transfer functions, but all the rest is the same.
:param component_set: the iterable to direct connect
:returns: the direct connected component_set
"""
new_list = []
for i in component_set:
if isinstance(i, CoupledDEVS):
component_set.extend(i.component_set)
else:
# Found an atomic model
new_list.append(i)
component_set = new_list
# All and only all atomic models are now direct children of this model
for i in component_set:
# Remap the output ports
for outport in i.OPorts:
# The new contents of the line
outport.routing_outline = set()
worklist = list(outport.outline)
for outline in worklist:
# If it is a coupled model, we must expand this model
if isinstance(outline.host_DEVS, CoupledDEVS):
worklist.extend(outline.outline)
else:
outport.routing_outline.add(outline)
outport.routing_outline = list(outport.routing_outline)
return component_set
class Simulator(object):
"""
Minimal simulation kernel, offering only setTerminationTime and simulate.
Use this Simulator instead of the normal one to use the minimal kernel.
While it has a lot less features, its performance is much higher.
The polymorphic scheduler is also used by default.
"""
def __init__(self, model):
"""
Constructor
:param model: the model to simulate
"""
if isinstance(model, CoupledDEVS):
component_set = directConnect(model.component_set)
ids = 0
for m in component_set:
m.time_last = (-m.elapsed, 0)
m.time_next = (-m.elapsed + m.timeAdvance(), 1)
m.model_id = ids
ids += 1
self.model = RootDEVS(component_set, component_set, None)
elif isinstance(model, AtomicDEVS):
for p in model.OPorts:
p.routing_outline = []
model.time_last = (-model.elapsed, 0)
model.time_next = (model.time_last[0] + model.timeAdvance(), 1)
model.model_id = 0
self.model = RootDEVS([model], [model], None)
self.termination_time = float('inf')
def setTerminationTime(self, time):
"""
Set the termination time of the simulation.
:param time: simulation time at which simulation should terminate
"""
self.termination_time = time
def simulate(self):
"""
Perform the simulation
"""
from schedulers.schedulerAuto import SchedulerAuto
scheduler = SchedulerAuto(self.model.component_set, 1e-6, len(self.model.component_set))
tn = scheduler.readFirst()
tt = self.termination_time
while tt > tn[0]:
# Generate outputs
transitioning = defaultdict(int)
for c in scheduler.getImminent(tn):
transitioning[c] |= 1
outbag = c.outputFnc()
for outport in outbag:
p = outbag[outport]
for inport in outport.routing_outline:
inport.host_DEVS.my_input.setdefault(inport, []).extend(p)
transitioning[inport.host_DEVS] |= 2
# Perform transitions
for aDEVS, ttype in transitioning.iteritems():
if ttype == 1:
aDEVS.state = aDEVS.intTransition()
elif ttype == 2:
aDEVS.elapsed = tn[0] - aDEVS.time_last[0]
aDEVS.state = aDEVS.extTransition(aDEVS.my_input)
elif ttype == 3:
aDEVS.elapsed = 0.
aDEVS.state = aDEVS.confTransition(aDEVS.my_input)
aDEVS.time_next = (tn[0] + aDEVS.timeAdvance(), 1 if tn[0] > aDEVS.time_last[0] else tn[1] + 1)
aDEVS.time_last = tn
aDEVS.my_input = {}
# Do reschedules
scheduler.massReschedule(transitioning)
tn = scheduler.readFirst()
def __getattr__(self, attr):
"""
Wrapper to inform users that they are using the minimal kernel if they zant to do some unsupported configuration option.
"""
if attr.startswith("set"):
raise Exception("You are using the minimal simulation kernel, which does not support any configuration except for the termination time. Please switch to the normal simulation kernel to use this option.")
else:
raise AttributeError()

View file

@ -0,0 +1,100 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module to offer 'really deterministic' (pseudo-)random number generation in a Distributed Time Warp implementation.
For local simulation, using the random library from Python as usual is perfectly ok.
"""
import random
class RandomGenerator(object):
"""
Base class, which implements a random number interface for the 'uniform' and 'random' Python standard library functions.
.. note:: The generated random numbers are **not** equal to those generated by direct calls to the library functions, as we also use a random number to initialize the seed in the next iteration.
"""
def __init__(self, seed):
"""
Constructor
:param seed: the seed to start with, this will simply be passed to the *random* library at every function call
"""
#NOTE: This is implemented using only a seed (and actually, only a number), instead of using the 'getState()' en 'setState(state)'
# functions provided by the library. This was done to allow far more simple comparison (for memoization), hashing (as we
# have overwritten the comparison) and copying (for custom state saving).
self.seed = seed
def __eq__(self, other):
"""
Compare two instances of random number generators.
Needed for memoization.
:param other: the instance to compare with
:returns: bool -- do these random number generators return the same sequence?
"""
return type(self) == type(other) and self.seed == other.seed
def __hash__(self):
"""
Hash this random number generator.
Needed as the comparison method was changed!
:returns: hash
"""
return self.seed
def copy(self):
"""
A copy method to be used when defining custom state saving methods. It will return a complete copy of this random number
generator, which will generate exactly the same sequence of numbers.
"""
return RandomGenerator(self.seed)
def __wrapFunction(self, func, args):
"""
Internal wrapper for most functions, allows easy addition of new functions should the need arise. It updates the internal state and
guarantees determinism even when revertions happen.
:param func: the function to call on the *random* module (a string)
:param args: the arguments to pass (a list)
:returns: random -- the generated value
"""
random.seed(self.seed)
val = getattr(random, func)(*args)
self.seed = random.random()
return val
def uniform(self, a, b):
"""
Call the uniform function of the *random* library.
:param a: lower bound of generated value
:param b: upper bound of generated value
:returns: float -- the generated value
"""
return self.__wrapFunction("uniform", [a, b])
def random(self):
"""
Call the random function of the *random* library.
:returns: float -- a random value between 0 and 1
"""
return self.__wrapFunction("random", [])

View file

@ -0,0 +1,15 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -0,0 +1,82 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import select
import sys
import threading
import pypdevs.accurate_time as time
from pypdevs.util import DEVSException
class AsynchronousComboGenerator(object):
"""
The asynchronous combo generator: it generates events from file input
The name no longer represents what it actually is, as previously it also generated input from stdin (denoting the 'combo').
It does NOT use the default *interrupt()* calls for the threading backend, as this would require the generator to run
on a different thread. The generator should be called at every iteration and its *getNextTime()* value should be taken into
account by every *wait()* call.
"""
def __init__(self, filename, backend):
"""
Constructor.
:param filename: the name of the input file to use for file input. None for no file input.
:param backend: subsystem to use for threading
.. note:: *filename* parameter should not be a file handle
"""
self.backend = backend
if filename is not None:
self.infile = open(filename, 'r')
else:
self.infile = None
self.next_scheduled = float('inf')
self.file_event = None
# Call this here already for time 0, to schedule the first event
self.checkInterrupt(0)
def checkInterrupt(self, current_time):
"""
Checks for whether an interrupt should happen at this time; if so, it also reschedules the next one.
This method must be called before the internal interrupt is fetched, as otherwise it will not be taken into account.
:param current_time: the current simulation time to check for interrupts
"""
if self.infile is not None:
# First check for if the scheduled message happened
if (self.next_scheduled - current_time) <= 0:
if self.backend.setInterrupt(self.file_event):
self.next_scheduled = float('inf')
self.file_event = None
# Now check for the next one
if self.next_scheduled == float('inf'):
# We don't have a scheduled event, so fetch one
line = self.infile.readline()
if line == "":
self.infile.close()
self.infile = None
else:
event = line.split(" ", 1)
if len(event) != 2:
raise DEVSException(
"Inproperly formatted input in file: %s" % event)
self.next_scheduled = float(event[0])
self.file_event = event[1][:-1]
def getNextTime(self):
"""
Return the time of the next event from this generator
"""
return self.next_scheduled

View file

@ -0,0 +1,90 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
class ThreadingBackend(object):
"""
Wrapper around the actual threading backend. It will also handle interrupts and the passing of them to the calling thread.
"""
def __init__(self, subsystem, args):
"""
Constructor
:param subsystem: string specifying the subsystem to use: python, tkinter or loop
:param args: all additional arguments that should be passed to the subsystem's constructor (must be a list)
"""
self.interrupted_value = None
self.value_lock = threading.Lock()
if subsystem == "python":
from pypdevs.realtime.threadingPython import ThreadingPython
self.subsystem = ThreadingPython(*args)
elif subsystem == "tkinter":
from pypdevs.realtime.threadingTkInter import ThreadingTkInter
self.subsystem = ThreadingTkInter(*args)
elif subsystem == "loop":
from pypdevs.realtime.threadingGameLoop import ThreadingGameLoop
self.subsystem = ThreadingGameLoop(*args)
else:
raise Exception("Realtime subsystem not found: " + str(subsystem))
def wait(self, time, func):
"""
A non-blocking call, which will call the *func* parameter after *time* seconds. It will use the provided backend to do this.
:param time: time to wait in seconds, a float is possible
:param func: the function to call after the time has passed
"""
self.subsystem.wait(time, func)
def interrupt(self, value):
"""
Interrupt a running wait call.
:param value: the value that interrupts
"""
self.interrupted_value = value
self.subsystem.interrupt()
def setInterrupt(self, value):
"""
Sets the value of the interrupt. This should not be used manually and is only required to prevent the asynchronous combo generator from making *interrrupt()* calls.
:param value: value with which the interrupt variable should be set
"""
with self.value_lock:
if self.interrupted_value is None:
self.interrupted_value = value
return True
else:
# The interrupt was already set, indicating a collision!
return False
def getInterrupt(self):
"""
Return the value of the interrupt and clear it internally.
:returns: the interrupt
"""
with self.value_lock:
val = self.interrupted_value
self.interrupted_value = None
return val
def step(self):
"""
Perform a step in the backend; only supported for the game loop backend.
"""
self.subsystem.step()

View file

@ -0,0 +1,51 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pypdevs.accurate_time as time
from threading import Lock
class ThreadingGameLoop(object):
"""
Game loop subsystem for realtime simulation. Time will only progress when a *step* call is made.
"""
def __init__(self):
"""
Constructor
"""
self.next_event = float('inf')
def step(self):
"""
Perform a step in the simulation. Actual processing is done in a seperate thread.
"""
if time.time() >= self.next_event:
self.next_event = float('inf')
getattr(self, "func")()
def wait(self, delay, func):
"""
Wait for the specified time, or faster if interrupted
:param time: time to wait
:param func: the function to call
"""
self.func = func
self.next_event = time.time() + delay
def interrupt(self):
"""
Interrupt the waiting thread
"""
self.next_event = 0

View file

@ -0,0 +1,56 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Event, Thread, Lock
import pypdevs.accurate_time as time
class ThreadingPython(object):
"""
Simple Python threads subsystem
"""
def __init__(self):
"""
Constructor
"""
self.evt = Event()
self.evt_lock = Lock()
def wait(self, delay, func):
"""
Wait for the specified time, or faster if interrupted
:param delay: time to wait
:param func: the function to call
"""
#NOTE this call has a granularity of 5ms in Python <= 2.7.x in the worst case, so beware!
# the granularity seems to be much better in Python >= 3.x
p = Thread(target=ThreadingPython.callFunc, args=[self, delay, func])
p.daemon = True
p.start()
def interrupt(self):
"""
Interrupt the waiting thread
"""
self.evt.set()
def callFunc(self, delay, func):
"""
Function to call on a seperate thread: will block for the specified time and call the function afterwards
"""
with self.evt_lock:
self.evt.wait(delay)
func()
self.evt.clear()

View file

@ -0,0 +1,85 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def tkMainThreadPoller(tk, queue):
"""
The polling function to register with Tk at the start. This will do the actual scheduling in Tk.
:param tk: the Tk instance to use
:param queue: the queue to check
"""
global tkRunningID
while 1:
try:
time, func = queue.popleft()
tkRunningID = tk.after(time, func)
except TypeError:
# Was an invalidation call
try:
if tkRunningID is not None:
tk.after_cancel(tkRunningID)
except IndexError:
# Nothing to cancel
pass
tkRunningID = None
except IndexError:
break
tk.after(10, tkMainThreadPoller, tk, queue)
class ThreadingTkInter(object):
"""
Tk Inter subsystem for realtime simulation
"""
def __init__(self, tk):
"""
Constructor
:param queue: the queue object that is also used by the main thread to put events on the main Tk object
"""
self.runningID = None
self.last_infinity = False
import collections
queue = collections.deque()
self.queue = queue
tk.after(10, tkMainThreadPoller, tk, queue)
def unlock(self):
"""
Unlock the waiting thread
"""
# Don't get it normally, as it would seem like a method call
getattr(self, "func")()
def wait(self, t, func):
"""
Wait for the specified time, or faster if interrupted
:param t: time to wait
:param func: the function to call
"""
if t == float('inf'):
self.last_infinity = True
else:
self.last_infinity = False
self.func = func
self.queue.append((int(t*1000), self.unlock))
def interrupt(self):
"""
Interrupt the waiting thread
"""
if not self.last_infinity:
self.queue.append(None)
self.unlock()

View file

@ -0,0 +1,15 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -0,0 +1,111 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pypdevs.relocators.boundaryRelocator import BoundaryRelocator
from heapq import heappop, heappush, heapify
class BasicBoundaryRelocator(BoundaryRelocator):
"""
Basic implementation of a boundary relocator
"""
def __init__(self, swappiness):
"""
Constructor
:param swappiness: the swappiness
"""
BoundaryRelocator.__init__(self)
self.swappiness = swappiness
def setController(self, controller):
"""
Configures the controller of this relocator
:param controller: the controller
"""
BoundaryRelocator.setController(self, controller)
def getRelocations(self, gvt, activities, horizon):
"""
Return all pending relocations
:param gvt: current GVT
:param activities: activities being passed on the GVT ring
:param horizon: the time over which the activities were gathered
:returns: all relocations that should be executed
"""
# Clear all 'semi-global' variables
self.relocate = {}
self.model_activities = {}
self.node_activities = [i[1] for i in activities]
avg_activity = sum(self.node_activities) / len(self.node_activities)
reverts = set()
iterlist = [(activity, node)
for node, activity in enumerate(self.node_activities)
if activity > self.swappiness * avg_activity]
heapify(iterlist)
if sum(self.locations) == 0:
self.locations = [model.location for model in self.model_ids]
self.boundaries = [{} for _ in range(self.kernels)]
self.constructBoundaries(self.model_ids)
while iterlist:
# Keep going as long as there are nodes that are overloaded
srcactivity, node = heappop(iterlist)
# Might have changed in the meantime, though NEVER decreased
srcactivity = self.node_activities[node]
# Now 'node' contains the node that has the most activity of all, so try pushing something away
boundaries = self.boundaries[node]
destactivity, mindest = \
min([(self.node_activities[destination], destination)
for destination in boundaries
if boundaries[destination]])
boundary = boundaries[mindest]
source_deviation = srcactivity - avg_activity
destination_deviation = destactivity - avg_activity
original_heuristic = abs(source_deviation) + \
abs(destination_deviation)
move = None
for option in boundary:
# Swapping the model would give us the following new 'heuristic'
model_activity = self.fetchModelActivity(option)
new_heuristic = abs(source_deviation - model_activity) + \
abs(destination_deviation + model_activity)
if new_heuristic < original_heuristic:
move = option.model_id
original_heuristic = new_heuristic
if move is not None:
# Will migrate model 'move' to 'mindest'
self.scheduleMove(move, mindest)
if srcactivity - model_activity > avg_activity:
heappush(iterlist, (srcactivity - model_activity, node))
if destactivity + model_activity > avg_activity:
# The destination now also became overloaded, so push from this node as well
heappush(iterlist, (destactivity + model_activity, mindest))
return self.relocate
def useLastStateOnly(self):
"""
Determines whether or not the activities of all steps should be accumulated, or only a single state should be used.
:returns: boolean -- True if the relocator works with a single state
"""
return False

View file

@ -0,0 +1,152 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base class for a relocator that supports boundary construction and maintenance
"""
class BoundaryRelocator(object):
"""
Main class
"""
def __init__(self):
"""
Constructor
"""
pass
def setController(self, controller):
"""
Set the controller of this relocator
:param controller: the controller object which can be used to fetch all required information about the model
"""
self.server = controller.server
self.model_ids = controller.model_ids
self.kernels = controller.kernels
# All location queries should happen on this CACHE
# This is NOT a live version of the locations and is only a temporary
# version for testing some possible relocations.
# However, this version SHOULD be stable, that is: it is never updated again
self.locations = [model.location for model in self.model_ids]
# Create all boundaries for all nodes
self.boundaries = [{} for _ in range(controller.kernels)]
self.constructBoundaries(self.model_ids)
def fetchModelActivity(self, model):
"""
Get the activity of a specific model.
It will also cache the activity of all models at the same node to make subsequent calls much faster.
:param model: the model to fetch the activity of, can be remote
:returns: the activity of the model
"""
try:
# Try locally
return self.model_activities[model.model_id]
except KeyError:
# 'Cache miss'
proxy = self.server.getProxy(model.location)
self.model_activities.update(proxy.getCompleteActivity())
return self.model_activities[model.model_id]
def constructBoundaries(self, models):
"""
Construct the boundaries for the specified models
:param models: the models to be added to the boundary
"""
for model in models:
location = self.locations[model.model_id]
for iport in model.IPorts:
for port in iport.inline:
if self.locations[port.host_DEVS.model_id] != location:
self.boundaries[location].setdefault(
self.locations[port.host_DEVS.model_id],
set()).add(model)
for oport in model.OPorts:
for port, _ in oport.routing_outline:
if self.locations[port.host_DEVS.model_id] != location:
self.boundaries[location].setdefault(
self.locations[port.host_DEVS.model_id],
set()).add(model)
def removeBoundaries(self, models):
"""
Remove the boundaries provided by the specified models
:param models: the models to be removed from the boundaries list
"""
for model in models:
location = self.locations[model.model_id]
boundaries = self.boundaries[location]
# Only here for efficiency
ms = set([model])
for dest in boundaries:
boundaries[dest] -= ms
def scheduleMove(self, model_id, destination):
"""
Schedule the move of a model to another destination; this operation is reversible
:param model_id: the model_id of the model to move
:param destination: the destination of the model
"""
self.relocate[model_id] = destination
model = self.model_ids[model_id]
source = self.locations[model_id]
update = set([model])
self.removeBoundaries(update)
for iport in model.IPorts:
for port in iport.inline:
update.add(port.host_DEVS)
for oport in model.OPorts:
for port, _ in oport.routing_outline:
update.add(port.host_DEVS)
# Now update contains all the models that should be updated
# Perform the update 'in cache'
self.locations[model_id] = destination
self.removeBoundaries(update)
self.constructBoundaries(update)
activity = self.fetchModelActivity(model)
self.node_activities[source] -= activity
self.node_activities[destination] += activity
def getRelocations(self, gvt, activities, horizon):
"""
Return all pending relocations
:param gvt: current GVT
:param activities: activities being passed on the GVT ring
:param horizon: the activity horizon
:returns: all relocations that should be executed
"""
# This is only a base 'abstract' class
raise NotImplementedError()
def useLastStateOnly(self):
"""
Determines whether or not the activities of all steps should be accumulated, or only a single state should be used.
:returns: boolean -- True if the relocator works with a single state
"""
raise NotImplementedError()

View file

@ -0,0 +1,74 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Relocator for user-provided relocation directives
"""
class ManualRelocator(object):
"""
Main class
"""
def __init__(self):
"""
Initialize the relocator
"""
self.directives = []
def setController(self, controller):
"""
Sets the controller
"""
pass
def getRelocations(self, gvt, activities, horizon):
"""
Fetch the relocations that are pending for the current GVT
:param gvt: current GVT
:param activities: the activities being passed on the GVT ring
:param horizon: the activity horizon that was used
:returns: dictionary containing all relocations
"""
relocate = {}
for index, directive in enumerate(self.directives):
if directive[0] < gvt:
relocate[directive[1]] = directive[2]
else:
self.directives = self.directives[index:]
break
else:
self.directives = []
return relocate
def addDirective(self, time, model, destination):
"""
Add a relocation directive, this relocation will be scheduled and will be executed as soon as the GVT passes over the provided time.
:param time: the time at which this should happen
:param model: the model that has to be moved (its model_id)
:param destination: the destination kernel to move it to
"""
self.directives.append([time, model, destination])
self.directives.sort()
def useLastStateOnly(self):
"""
Determines whether or not the activities of all steps should be accumulated, or only a single state should be used.
:returns: boolean -- True if the relocator works with a single state
"""
# Set to false to allow activity tracking plots
return False

View file

@ -0,0 +1,15 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -0,0 +1,212 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Activity Heap is based on a heap, though allows for reschedules.
To allow reschedules to happen, a model is accompagnied by a flag to
indicate whether or not it is still valid.
As soon as a model is rescheduled, the flag of the previously scheduled
time is set and another entry is added. This causes the heap to become *dirty*,
requiring a check for the flag as soon as the first element is requested.
Due to the possibility for a dirty heap, the heap will be cleaned up as
soon as the number of invalid elements becomes too high.
This cleanup method has O(n) complexity and is therefore only
ran when the heap becomes way too dirty.
Another problem is that it might consume more memory than other schedulers,
due to invalid elements being kept in memory.
However, the actual model and states are not duplicated as they are references.
The additional memory requirement should not be a problem in most situations.
The 'activity' part from the name stems from the fact that only models where
the *time_next* attribute is smaller than infinity will be scheduled.
Since these elements are not added to the heap, they aren't taken into account
in the complexity. This allows for severe optimisations in situations where
a lot of models can be scheduled for infinity.
Of all provided schedulers, this one is the most mature due to it being the
oldest and also the default scheduler. It is also applicable in every situation
and it offers sufficient performance in most cases.
This scheduler is ideal in situations where (nearly) no reschedules happen
and where most models transition at a different time.
It results in slow behaviour in situations requiring lots of rescheduling,
and thus lots of dirty elements.
This method is also applied in the VLE simulator and is the common approach
to heap schedulers that require invalidation. It varies from the scheduler in
ADEVS due to the heap from the heapq library being used, which doesn't offer
functions to restructure the heap.
Reimplementing these methods in pure Python would be unnecessarily slow.
"""
from heapq import heappush, heappop, heapify
from pypdevs.logger import *
class SchedulerAH(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: all models in the simulation
"""
self.heap = []
self.id_fetch = [None] * total_models
for model in models:
if model.time_next[0] != float('inf'):
self.id_fetch[model.model_id] = [model.time_next,
model.model_id,
True,
model]
heappush(self.heap, self.id_fetch[model.model_id])
else:
self.id_fetch[model.model_id] = [model.time_next,
model.model_id,
False,
model]
self.invalids = 0
self.max_invalids = len(models)*2
self.epsilon = epsilon
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
#assert debug("Scheduling " + str(model))
# Create the entry, as we have accepted the model
elem = [model.time_next, model.model_id, False, model]
try:
self.id_fetch[model.model_id] = elem
except IndexError:
# A completely new model
self.id_fetch.append(elem)
self.max_invalids += 2
# Check if it requires to be scheduled
if model.time_next[0] != float('inf'):
self.id_fetch[model.model_id][2] = True
heappush(self.heap, self.id_fetch[model.model_id])
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
#assert debug("Unscheduling " + str(model))
if model.time_next != float('inf'):
self.invalids += 1
# Update the referece still in the heap
self.id_fetch[model.model_id][2] = False
# Remove the reference in our id_fetch
self.id_fetch[model.model_id] = None
self.max_invalids -= 2
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
#NOTE rather dirty, though a lot faster for huge models
#assert debug("Mass rescheduling")
inf = float('inf')
for model in reschedule_set:
if model.model_id is None:
continue
event = self.id_fetch[model.model_id]
if event[2]:
if model.time_next == event[0]:
continue
elif event[0][0] != inf:
self.invalids += 1
event[2] = False
if model.time_next[0] != inf:
self.id_fetch[model.model_id] = [model.time_next,
model.model_id,
True,
model]
heappush(self.heap, self.id_fetch[model.model_id])
#assert debug("Optimizing heap")
if self.invalids >= self.max_invalids:
#assert info("Heap compaction in progress")
self.heap = [i for i in self.heap if i[2] and (i[0][0] != inf)]
heapify(self.heap)
self.invalids = 0
#assert info("Heap compaction complete")
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
#assert debug("Reading first element from heap")
self.cleanFirst()
return self.heap[0][0]
def cleanFirst(self):
"""
Clean up the invalid elements in front of the list
"""
#assert debug("Cleaning list")
try:
while not self.heap[0][2]:
heappop(self.heap)
self.invalids -= 1
except IndexError:
# Nothing left, so it as clean as can be
#assert debug("None in list")
pass
def getImminent(self, time):
"""
Returns a list of all models that transition at the provided time, with a specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
#assert debug("Asking all imminent models")
imm_children = []
t, age = time
try:
# Age must be exactly the same
first = self.heap[0]
while (abs(first[0][0] - t) < self.epsilon) and (first[0][1] == age):
# Check if the found event is actually still active
if(first[2]):
# Active, so event is imminent
imm_children.append(first[3])
first[2] = False
else:
# Wasn't active, but we will have to pop this to get the next
# So we can lower the number of invalids
self.invalids -= 1
# Advance the while loop
heappop(self.heap)
first = self.heap[0]
except IndexError:
pass
return imm_children

View file

@ -0,0 +1,107 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Automaticly polymorphic scheduler. It will automatically adapt to your scheduling requests, though at a slight overhead due to the indirection and statistics gathering. If you know what is your optimal scheduler, please choose this one. If the access pattern varies throughout the simulation, this scheduler is perfect for you. It will choose between the HeapSet and Minimal List scheduler.
.. warning:: Barely tested, certainly not with distribution and relocation!!! **Use with caution!!!***
"""
from pypdevs.schedulers.schedulerHS import SchedulerHS
from pypdevs.schedulers.schedulerML import SchedulerML
class SchedulerAuto(object):
"""
The polymorphic scheduler class
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: the models to schedule
:param epsilon: the allowed deviation
"""
self.epsilon = epsilon
self.models = list(models)
self.total_models = total_models
self.scheduler_type = SchedulerHS
self.subscheduler = SchedulerHS(self.models, self.epsilon, total_models)
# Statistics
self.total_schedules = 0
self.colliding_schedules = 0
def swapSchedulerTo(self, scheduler):
"""
Swap the current subscheduler to the provided one. If the scheduler is already in use, no change happens.
:param scheduler: the *class* to switch to
"""
if scheduler == self.scheduler_type:
return
self.scheduler_type = scheduler
self.subscheduler = scheduler(self.models, self.epsilon, self.total_models)
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
self.models.append(model)
return self.subscheduler.schedule(model)
def unschedule(self, model):
"""
Unschedule a model
:param model: the mode to unschedule
"""
self.models.remove(model)
return self.subscheduler.unschedule(model)
def massReschedule(self, reschedule_set):
"""
Reschedule all models
:param reschedule_set: the set of models to reschedule
"""
self.colliding_schedules += len(reschedule_set)
self.total_schedules += 1
if self.total_schedules > 100:
if self.colliding_schedules > 15.0 * len(self.models):
# This means that 5/100 of the models is scheduled in every iteration
self.swapSchedulerTo(SchedulerML)
elif self.colliding_schedules < 500:
self.swapSchedulerTo(SchedulerHS)
self.colliding_schedules = 0
self.total_schedules = 0
return self.subscheduler.massReschedule(reschedule_set)
def readFirst(self):
"""
Fetch the time of the first model
:returns: (time, age) -- time of the first scheduled model
"""
return self.subscheduler.readFirst()
def getImminent(self, time):
"""
Returns the imminent models for the provided time
:param time: time to check for
"""
return self.subscheduler.getImminent(time)

View file

@ -0,0 +1,210 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Activity Heap is based on a heap, though allows for reschedules.
To allow reschedules to happen, a model is accompagnied by a flag to
indicate whether or not it is still valid.
As soon as a model is rescheduled, the flag of the previously scheduled
time is set and another entry is added. This causes the heap to become *dirty*,
requiring a check for the flag as soon as the first element is requested.
Due to the possibility for a dirty heap, the heap will be cleaned up as
soon as the number of invalid elements becomes too high.
This cleanup method has O(n) complexity and is therefore only
ran when the heap becomes way too dirty.
Another problem is that it might consume more memory than other schedulers,
due to invalid elements being kept in memory.
However, the actual model and states are not duplicated as they are references.
The additional memory requirement should not be a problem in most situations.
The 'activity' part from the name stems from the fact that only models where
the *time_next* attribute is smaller than infinity will be scheduled.
Since these elements are not added to the heap, they aren't taken into account
in the complexity. This allows for severe optimisations in situations where
a lot of models can be scheduled for infinity.
Of all provided schedulers, this one is the most mature due to it being the
oldest and also the default scheduler. It is also applicable in every situation
and it offers sufficient performance in most cases.
This scheduler is ideal in situations where (nearly) no reschedules happen
and where most models transition at a different time.
It results in slow behaviour in situations requiring lots of rescheduling,
and thus lots of dirty elements.
This method is also applied in the VLE simulator and is the common approach
to heap schedulers that require invalidation. It varies from the scheduler in
ADEVS due to the heap from the heapq library being used, which doesn't offer
functions to restructure the heap.
Reimplementing these methods in pure Python would be unnecessarily slow.
"""
from heapq import heappush, heappop, heapify
from pypdevs.logger import *
class SchedulerChibi(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: all models in the simulation
"""
self.heap = []
self.id_fetch = [None] * total_models
inf = float('inf')
for model in models:
if model.time_next != inf:
self.id_fetch[model.model_id] = [model.time_next,
model.model_id,
True,
model]
heappush(self.heap, self.id_fetch[model.model_id])
else:
self.id_fetch[model.model_id] = [model.time_next,
model.model_id,
False,
model]
self.invalids = 0
self.max_invalids = len(models)*2
self.epsilon = epsilon
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
#assert debug("Scheduling " + str(model))
# Create the entry, as we have accepted the model
elem = [model.time_next, model.model_id, False, model]
try:
self.id_fetch[model.model_id] = elem
except IndexError:
# A completely new model
self.id_fetch.append(elem)
self.max_invalids += 2
# Check if it requires to be scheduled
if model.time_next != float('inf'):
self.id_fetch[model.model_id][2] = True
heappush(self.heap, self.id_fetch[model.model_id])
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
#assert debug("Unscheduling " + str(model))
if model.time_next != float('inf'):
self.invalids += 1
# Update the referece still in the heap
self.id_fetch[model.model_id][2] = False
# Remove the reference in our id_fetch
self.id_fetch[model.model_id] = None
self.max_invalids -= 2
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
#NOTE rather dirty, though a lot faster for huge models
#assert debug("Mass rescheduling")
inf = float('inf')
for model in reschedule_set:
event = self.id_fetch[model.model_id]
if event[2]:
if model.time_next == event[0]:
continue
elif event[0] != inf:
self.invalids += 1
event[2] = False
if model.time_next != inf:
self.id_fetch[model.model_id] = [model.time_next,
model.model_id,
True,
model]
heappush(self.heap, self.id_fetch[model.model_id])
#assert debug("Optimizing heap")
if self.invalids >= self.max_invalids:
#assert info("Heap compaction in progress")
self.heap = [i for i in self.heap if i[2] and (i[0] != inf)]
heapify(self.heap)
self.invalids = 0
#assert info("Heap compaction complete")
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
#assert debug("Reading first element from heap")
self.cleanFirst()
return self.heap[0][0]
def cleanFirst(self):
"""
Clean up the invalid elements in front of the list
"""
#assert debug("Cleaning list")
try:
while not self.heap[0][2]:
heappop(self.heap)
self.invalids -= 1
except IndexError:
# Nothing left, so it as clean as can be
#assert debug("None in list")
pass
def getImminent(self, t):
"""
Returns a list of all models that transition at the provided time, with a specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
#assert debug("Asking all imminent models")
imm_children = []
try:
# Age must be exactly the same
first = self.heap[0]
while (abs(first[0] - t) < self.epsilon):
# Check if the found event is actually still active
if(first[2]):
# Active, so event is imminent
imm_children.append(first[3])
first[2] = False
else:
# Wasn't active, but we will have to pop this to get the next
# So we can lower the number of invalids
self.invalids -= 1
# Advance the while loop
heappop(self.heap)
first = self.heap[0]
except IndexError:
pass
return imm_children

View file

@ -0,0 +1,98 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Minimal List scheduler is the simplest scheduler available,
though it has extremely bad performance in most cases.
It simply keeps a list of all models. As soon as a reschedule happens,
the list is checked for the minimal value, which is stored.
When the imminent models are requested, the lowest value that was found
is used to immediatelly return [],
or it iterates the complete list in search of models that qualify.
"""
class SchedulerChibiList(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: all models in the simulation
"""
# Make a copy!
self.models = list(models)
self.minval = float('inf')
self.epsilon = epsilon
self.massReschedule([])
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
self.models.append(model)
if model.time_next < self.minval:
self.minval = model.time_next
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
self.models.remove(model)
if model.time_next == self.minval:
self.minval = (float('inf'), float('inf'))
for m in self.models:
if m.time_next < self.minval:
self.minval = m.time_next
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
self.minval = float('inf')
for m in self.models:
if m.time_next < self.minval:
self.minval = m.time_next
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
return self.minval
def getImminent(self, t):
"""
Returns a list of all models that transition at the provided time, with the specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
imm_children = []
for model in self.models:
if abs(model.time_next - t) < self.epsilon:
imm_children.append(model)
return imm_children

View file

@ -0,0 +1,179 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Dirty Heap is based on a heap, though allows for reschedules.
To allow reschedules to happen, a model is accompagnied by a flag to indicate
whether or not it is still valid. As soon as a model is rescheduled,
the flag of the previously scheduled time is set and another entry is added.
This causes the heap to become *dirty*, requiring a check for the flag
as soon as the first element is requested.
Another problem is that it might consume more memory than other schedulers,
due to invalid elements being kept in memory. However, the actual model and
states are not duplicated as they are references.
The additional memory requirement should not be a problem in most situations.
The 'activity' part from the name stems from the fact that only models where
the *time_next* attribute is smaller than infinity will be scheduled.
Since these elements are not added to the heap, they aren't taken into account
in the complexity. This allows for severe optimisations in situations where
a lot of models can be scheduled for infinity.
Of all provided schedulers, this one is the most mature due to it being the
oldest and also the default scheduler. It is also applicable in every situation
and it offers sufficient performance in most cases.
This scheduler is ideal in situations where (nearly) no reschedules happen and
where most models transition at a different time.
It results in slow behaviour in situations requiring lots of rescheduling,
and thus lots of dirty elements.
This method is also applied in the VLE simulator and is the common approach to
heap schedulers that require invalidation. It varies from the scheduler in ADEVS
due to the heap from the heapq library being used, which doesn't offer functions
to restructure the heap.
Reimplementing these methods in pure Python would be unnecessarily slow.
"""
from heapq import heappush, heappop
from pypdevs.logger import debug
class SchedulerDH(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: all models in the simulation
"""
self.heap = []
self.id_fetch = [None] * total_models
for model in models:
self.id_fetch[model.model_id] = [model.time_next,
model.model_id,
False,
model]
self.schedule(model)
self.epsilon = epsilon
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
#assert debug("Scheduling " + str(model))
# Create the entry, as we have accepted the model
elem = [model.time_next, model.model_id, False, model]
try:
self.id_fetch[model.model_id] = elem
except IndexError:
# A completely new model
self.id_fetch.append(elem)
# Check if it requires to be scheduled
if model.time_next[0] != float('inf'):
self.id_fetch[model.model_id][2] = True
heappush(self.heap, self.id_fetch[model.model_id])
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
#assert debug("Unscheduling " + str(model))
# Update the referece still in the heap
self.id_fetch[model.model_id][2] = False
# Remove the reference in our id_fetch
self.id_fetch[model.model_id] = None
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
#NOTE rather dirty, though a lot faster for huge models
#assert debug("Mass rescheduling")
inf = float('inf')
for model in reschedule_set:
event = self.id_fetch[model.model_id]
if event[2]:
if model.time_next == event[0]:
continue
event[2] = False
if model.time_next[0] != inf:
self.id_fetch[model.model_id] = [model.time_next,
model.model_id,
True,
model]
heappush(self.heap, self.id_fetch[model.model_id])
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
#assert debug("Reading first element from heap")
self.cleanFirst()
return self.heap[0][0]
def cleanFirst(self):
"""
Clean up the invalid elements in front of the list
"""
#assert debug("Cleaning list")
try:
while not self.heap[0][2]:
heappop(self.heap)
except IndexError:
# Nothing left, so it as clean as can be
#assert debug("None in list")
pass
def getImminent(self, time):
"""
Returns a list of all models that transition at the provided time, with a specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
#assert debug("Asking all imminent models")
imm_children = []
t, age = time
try:
# Age must be exactly the same
first = self.heap[0]
while (abs(first[0][0] - t) < self.epsilon) and (first[0][1] == age):
# Check if the found event is actually still active
if(first[2]):
# Active, so event is imminent
imm_children.append(first[3])
first[2] = False
# Advance the while loop
heappop(self.heap)
first = self.heap[0]
except IndexError:
pass
return imm_children

View file

@ -0,0 +1,102 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. warning:: Do **not** use this scheduler!
This scheduler will only work if all models are scheduled at exactly the same time, or are not scheduled at all (scheduling at infinity is allowed though).
"""
from heapq import heappush, heappop
from pypdevs.logger import *
class SchedulerDT(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: all models in the simulation
"""
self.ready = set()
self.infinite = float('inf')
for m in models:
if m.time_next[0] != self.infinite:
self.ready.add(m)
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
if model.time_next[0] != self.infinite:
self.ready.add(model)
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
try:
self.ready.remove(model)
except KeyError:
pass
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
for model in reschedule_set:
try:
if model.time_next[0] != self.infinite:
self.ready.add(model)
else:
self.ready.remove(model)
except KeyError:
pass
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
val = self.ready.pop()
self.ready.add(val)
return val.time_next
def getImminent(self, time):
"""
Returns a list of all models that transition at the provided time, with the specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
t, age = time
try:
val = self.ready.pop()
self.ready.add(val)
cpy = self.ready
self.ready = set()
return cpy
except KeyError:
return []

View file

@ -0,0 +1,146 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Heapset scheduler is based on a small heap, combined with two dictionaries.
The heap will contain only the timestamps of events that should happen. One of the dictionaries will contain the actual models that transition at the specified time. The second dictionary than contains a reverse relation: it maps the models to their time_next. This reverse relation is necessary to know the *old* time_next value of the model. Because as soon as the model has its time_next changed, its previously scheduled time will be unknown. This 'previous time' is **not** equal to the *timeLast*, as it might be possible that the models wait time was interrupted.
For a schedule, the model is added to the dictionary at the specified time_next. In case it is the first element at this location in the dictionary, we also add the timestamp to the heap. This way, the heap only contains *unique* timestamps and thus the actual complexity is reduced to the number of *different* timestamps. Furthermore, the reverse relation is also updated.
Unscheduling is done similarly by simply removing the element from the dictionary.
Rescheduling is a slight optimisation of unscheduling, followed by scheduling.
This scheduler does still schedule models that are inactive (their time_next is infinity), though this does not influence the complexity. The complexity is not affected due to infinity being a single element in the heap that is always present. Since a heap has O(log(n)) complexity, this one additional element does not have a serious impact.
The main advantage over the Activity Heap is that it never gets dirty and thus doesn't require periodical cleanup. The only part that gets dirty is the actual heap, which only contains small tuples. Duplicates of these will also be reduced to a single element, thus memory consumption should not be a problem in most cases.
This scheduler is ideal in situations where most transitions happen at exactly the same time, as we can then profit from the internal structure and simply return the mapped elements. It results in sufficient efficiency in most other cases, mainly due to the code base being a lot smaller then the Activity Heap.
"""
from heapq import heappush, heappop
from pypdevs.logger import *
class SchedulerHS(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: all models in the simulation
"""
self.heap = []
self.reverse = [None] * total_models
self.mapped = {}
self.infinite = (float('inf'), 1)
# Init the basic 'inactive' entry here, to prevent scheduling in the heap itself
self.mapped[self.infinite] = set()
self.epsilon = epsilon
for model in models:
self.schedule(model)
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
try:
self.mapped[model.time_next].add(model)
except KeyError:
self.mapped[model.time_next] = set([model])
heappush(self.heap, model.time_next)
try:
self.reverse[model.model_id] = model.time_next
except IndexError:
self.reverse.append(model.time_next)
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
try:
self.mapped[self.reverse[model.model_id]].remove(model)
except KeyError:
pass
self.reverse[model.model_id] = None
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
#NOTE the usage of exceptions is a lot better for the PyPy JIT and nets a noticable speedup
# as the JIT generates guard statements for an 'if'
for model in reschedule_set:
model_id = model.model_id
if model_id is None:
continue
try:
self.mapped[self.reverse[model_id]].remove(model)
except KeyError:
# Element simply not present, so don't need to unschedule it
pass
self.reverse[model_id] = tn = model.time_next
try:
self.mapped[tn].add(model)
except KeyError:
# Create a tuple with a single entry and use it to initialize the mapped entry
self.mapped[tn] = set((model, ))
heappush(self.heap, tn)
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
first = self.heap[0]
while len(self.mapped[first]) == 0:
del self.mapped[first]
heappop(self.heap)
first = self.heap[0]
return first
def getImminent(self, time):
"""
Returns a list of all models that transition at the provided time, with the specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
t, age = time
imm_children = set()
try:
first = self.heap[0]
if (abs(first[0] - t) < self.epsilon) and (first[1] == age):
#NOTE this would change the original set, though this doesn't matter as it is no longer used
imm_children = self.mapped.pop(first)
heappop(self.heap)
first = self.heap[0]
while (abs(first[0] - t) < self.epsilon) and (first[1] == age):
imm_children |= self.mapped.pop(first)
heappop(self.heap)
first = self.heap[0]
except IndexError:
pass
return imm_children

View file

@ -0,0 +1,100 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Minimal List scheduler is the simplest scheduler available,
though it has extremely bad performance in most cases.
It simply keeps a list of all models. As soon as a reschedule happens,
the list is checked for the minimal value, which is stored.
When the imminent models are requested, the lowest value that was found
is used to immediatelly return [],
or it iterates the complete list in search of models that qualify.
"""
class SchedulerML(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: all models in the simulation
"""
# Make a copy!
self.models = list(models)
self.minval = (float('inf'), float('inf'))
self.epsilon = epsilon
self.massReschedule([])
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
self.models.append(model)
if model.time_next < self.minval:
self.minval = model.time_next
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
self.models.remove(model)
if model.time_next == self.minval:
self.minval = (float('inf'), float('inf'))
for m in self.models:
if m.time_next < self.minval:
self.minval = m.time_next
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
self.minval = (float('inf'), float('inf'))
for m in self.models:
if m.time_next < self.minval:
self.minval = m.time_next
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
return self.minval
def getImminent(self, time):
"""
Returns a list of all models that transition at the provided time, with the specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
imm_children = []
t, age = time
for model in self.models:
if (abs(model.time_next[0] - t) < self.epsilon and
model.time_next[1] == age):
imm_children.append(model)
return imm_children

View file

@ -0,0 +1,149 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The No Age scheduler is based on the Heapset scheduler, though it does not take age into account.
.. warning:: This scheduler does not take the age into account, making it **unusable** in simulations where the *timeAdvance* function can return (exactly) 0. If unsure, do **not** use this scheduler, but the more general Heapset scheduler.
The heap will contain only the timestamps of events that should happen. One of the dictionaries will contain the actual models that transition at the specified time. The second dictionary than contains a reverse relation: it maps the models to their time_next. This reverse relation is necessary to know the *old* time_next value of the model. Because as soon as the model has its time_next changed, its previously scheduled time will be unknown. This 'previous time' is **not** equal to the *timeLast*, as it might be possible that the models wait time was interrupted.
For a schedule, the model is added to the dictionary at the specified time_next. In case it is the first element at this location in the dictionary, we also add the timestamp to the heap. This way, the heap only contains *unique* timestamps and thus the actual complexity is reduced to the number of *different* timestamps. Furthermore, the reverse relation is also updated.
Unscheduling is done similarly by simply removing the element from the dictionary.
Rescheduling is a slight optimisation of unscheduling, followed by scheduling.
This scheduler does still schedule models that are inactive (their time_next is infinity), though this does not influence the complexity. The complexity is not affected due to infinity being a single element in the heap that is always present. Since a heap has O(log(n)) complexity, this one additional element does not have a serious impact.
The main advantage over the Activity Heap is that it never gets dirty and thus doesn't require periodical cleanup. The only part that gets dirty is the actual heap, which only contains small tuples. Duplicates of these will also be reduced to a single element, thus memory consumption should not be a problem in most cases.
This scheduler is ideal in situations where most transitions happen at exactly the same time, as we can then profit from the internal structure and simply return the mapped elements. It results in sufficient efficiency in most other cases, mainly due to the code base being a lot smaller then the Activity Heap.
"""
from heapq import heappush, heappop
from pypdevs.logger import *
class SchedulerNA(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, total_models):
"""
Constructor
:param models: all models in the simulation
"""
self.heap = []
self.reverse = [None] * total_models
self.mapped = {}
self.infinite = float('inf')
# Init the basic 'inactive' entry here, to prevent scheduling in the heap itself
self.mapped[self.infinite] = set()
self.epsilon = epsilon
for m in models:
self.schedule(m)
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
try:
self.mapped[model.time_next[0]].add(model)
except KeyError:
self.mapped[model.time_next[0]] = set([model])
heappush(self.heap, model.time_next[0])
try:
self.reverse[model.model_id] = model.time_next[0]
except IndexError:
self.reverse.append(model.time_next[0])
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
try:
self.mapped[self.reverse[model.model_id]].remove(model)
except KeyError:
pass
self.reverse[model.model_id] = None
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
#NOTE the usage of exceptions is a lot better for the PyPy JIT and nets a noticable speedup
# as the JIT generates guard statements for an 'if'
for model in reschedule_set:
model_id = model.model_id
if model_id is None:
continue
try:
self.mapped[self.reverse[model_id]].remove(model)
except KeyError:
# Element simply not present, so don't need to unschedule it
pass
self.reverse[model_id] = tn = model.time_next[0]
try:
self.mapped[tn].add(model)
except KeyError:
# Create a tuple with a single entry and use it to initialize the mapped entry
self.mapped[tn] = set((model, ))
heappush(self.heap, tn)
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
first = self.heap[0]
while len(self.mapped[first]) == 0:
del self.mapped[first]
heappop(self.heap)
first = self.heap[0]
# The age was stripped of
return (first, 1)
def getImminent(self, time):
"""
Returns a list of all models that transition at the provided time, with the specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
t, age = time
imm_children = set()
try:
first = self.heap[0]
if (abs(first - t) < self.epsilon):
#NOTE this would change the original set, though this doesn't matter as it is no longer used
imm_children = self.mapped.pop(first)
heappop(self.heap)
first = self.heap[0]
while (abs(first - t) < self.epsilon):
imm_children |= self.mapped.pop(first)
heappop(self.heap)
first = self.heap[0]
except IndexError:
pass
return imm_children

View file

@ -0,0 +1,90 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Sorted List scheduler is the simplest scheduler available, though it has extremely bad performance in several situations.
It simply keeps a list of all models, which is sorted on time_next. No operations have any influence on this heap itself, as there is no real internal representation. As soon as the imminent models are requested, this list is sorted again and the first elements are returned.
"""
from pypdevs.logger import *
class SchedulerSL(object):
"""
Scheduler class itself
"""
def __init__(self, models, epsilon, totalModels):
"""
Constructor
:param models: all models in the simulation
"""
self.models = list(models)
self.epsilon = epsilon
def schedule(self, model):
"""
Schedule a model
:param model: the model to schedule
"""
self.models.append(model)
self.models.sort(key=lambda i: i.time_next)
def unschedule(self, model):
"""
Unschedule a model
:param model: model to unschedule
"""
self.models.remove(model)
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided.
Equivalent to calling unschedule(model); schedule(model) on every element in the iterable.
:param reschedule_set: iterable containing all models to reschedule
"""
self.models.sort(key=lambda i: i.time_next)
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
return self.models[0].time_next
def getImminent(self, time):
"""
Returns a list of all models that transition at the provided time, with the specified epsilon deviation allowed.
:param time: timestamp to check for models
.. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set.
"""
imm_children = []
t, age = time
try:
# Age must be exactly the same
count = 0
while (abs(self.models[count].time_next[0] - t) < self.epsilon and
self.models[count].time_next[1] == age):
# Don't pop, as we want to keep all models in the list
imm_children.append(self.models[count])
count += 1
except IndexError:
pass
return imm_children

310
src/pypdevs/server.py Normal file
View file

@ -0,0 +1,310 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Server for DEVS simulation
"""
from pypdevs.basesimulator import BaseSimulator
from pypdevs.controller import Controller
import pypdevs.middleware as middleware
from pypdevs.threadpool import ThreadPool
import threading
import sys
from pypdevs.util import *
from pypdevs.logger import *
class Server(object):
"""
A server to host MPI, will delegate all of its calls to the active simulation kernel.
"""
# Don't forward some of the internally provided functions, but simply raise an AttributeError
noforward = frozenset(["__str__",
"__getstate__",
"__setstate__",
"__repr__"])
def __init__(self, name, total_size):
"""
Constructor
:param name: the name of the server, used for addressing (in MPI terms, this is the rank)
:param total_size: the total size of the network in which the model lives
"""
self.name = name
self.kernel = None
self.size = total_size
self.proxies = [MPIRedirect(i) for i in range(total_size)]
from pypdevs.MPIRedirect import LocalRedirect
self.proxies[name] = LocalRedirect(self)
self.queued_messages = []
self.queued_time = None
if total_size > 1:
self.threadpool = ThreadPool(2)
self.bootMPI()
def getProxy(self, rank):
"""
Get a proxy to a specified rank.
This rank is allowed to be the local server, in which case a local shortcut is created.
:param rank: the rank to return a proxy to, should be an int
:returns: proxy to the server, either of type MPIRedirect or LocalRedirect
"""
return self.proxies[rank]
def checkLoadCheckpoint(self, name, gvt):
"""
Reconstruct the server from a checkpoint.
:param name: name of the checkpoint
:param gvt: the GVT to restore to
:returns: bool -- whether or not the checkpoint was successfully loaded
"""
rank = self.name
#assert debug("Accessing file " + str("%s_%s_%s.pdc" % (name, gvt, rank)))
try:
infile = open("%s_%s_%s.pdc" % (name, gvt, rank), 'r')
pickle.load(infile)
return True
except KeyboardInterrupt:
# If the user interrupts, still reraise
raise
except Exception as e:
# Something went wrong
print("Error found: " + str(e))
return False
def loadCheckpoint(self, name, gvt):
"""
Reconstruct the server from a checkpoint.
:param name: name of the checkpoint
:param gvt: the GVT to restore to
"""
rank = self.name
#assert debug("Accessing file " + str("%s_%s_%s.pdc" % (name, gvt, rank)))
infile = open("%s_%s_%s.pdc" % (name, gvt, rank), 'r')
self.kernel = pickle.load(infile)
self.kernel.server = self
from pypdevs.MPIRedirect import LocalRedirect
self.proxies[self.name] = LocalRedirect(self)
infile.close()
#assert debug("Closing file")
self.kernel.loadCheckpoint()
def setPickledData(self, pickled_data):
"""
Set the pickled representation of the model.
For use on the controller itself, as this doesn't need to unpickle the model.
:param pickled_data: the pickled model
"""
self.kernel.pickled_model = pickled_data
def prepare(self, scheduler):
"""
Prepare the server to receive the complete model over MPI
:param scheduler: the scheduler to use
"""
data = middleware.COMM_WORLD.bcast(None, root=0)
if data is not None:
self.saveAndProcessModel(data, scheduler)
middleware.COMM_WORLD.barrier()
def saveAndProcessModel(self, pickled_model, scheduler):
"""
Receive the model and set it on the server, but also saves it for further reinitialisation.
:param pickled_model: pickled representation of the model
:param scheduler: the scheduler to use
"""
self.sendModel(pickle.loads(pickled_model), scheduler)
self.kernel.pickled_model = pickled_model
def getName(self):
"""
Returns the name of the server
Is practically useless, since the server is previously addressed using its name. This does have a use as a ping function though.
"""
# Actually more of a ping function...
return self.name
# All calls to this server are likely to be forwarded to the currently
# active simulation kernel, so provide an easy forwarder
def __getattr__(self, name):
"""
Remote calls happen on the server object, though it is different from the simulation kernel itself. Therefore, forward the actual function call to the correct kernel.
:param name: the name of the method to call
:returns: requested attribute
"""
# For accesses that are actually meant for the currently running kernel
if name in Server.noforward:
raise AttributeError()
return getattr(self.kernel, name)
def processMPI(self, data, comm, remote):
"""
Process an incomming MPI message and reply to it if necessary
:param data: the data that was received
:param comm: the MPI COMM object
:param remote: the location from where the message was received
"""
# Receiving a new request
resend_tag = data[0]
function = data[1]
args = data[2]
kwargs = data[3]
result = getattr(self, function)(*args, **kwargs)
if resend_tag is not None:
if result is None:
result = 0
comm.send(result, dest=remote, tag=resend_tag)
def listenMPI(self):
"""
Listen for incomming MPI messages and process them as soon as they are received
"""
comm = middleware.COMM_WORLD
status = middleware.MPI.Status()
while 1:
#assert debug("[" + str(comm.Get_rank()) + "]Listening to remote " + str(middleware.MPI.ANY_SOURCE) + " -- " + str(middleware.MPI.ANY_TAG))
# First check if a message is present, otherwise we would have to do busy polling
data = comm.recv(source=middleware.MPI.ANY_SOURCE,
tag=middleware.MPI.ANY_TAG, status=status)
tag = status.Get_tag()
#assert debug("Got data from " + str(status.Get_source()) + " (" + str(status.Get_tag()) + "): " + str(data))
if tag == 0:
# Flush all waiters, as we will never receive an answer when we close the receiver...
self.finishWaitingPool()
break
elif tag == 1:
# NOTE Go back to listening ASAP, so do the processing on another thread
if data[1] == "receive" or data[1] == "receiveAntiMessages":
self.threadpool.addTask(Server.processMPI,
self,
list(data),
comm,
status.Get_source())
else:
# Normal 'control' commands are immediately executed, as they would otherwise have the potential to deadlock the node
threading.Thread(target=Server.processMPI,
args=[self,
list(data),
comm,
status.Get_source()]
).start()
else:
# Receiving an answer to a previous request
try:
event = MPIRedirect.waiting[tag]
MPIRedirect.waiting[tag] = data
event.set()
except KeyError:
# Probably processed elsewhere already, just skip
pass
except AttributeError:
# Key was already set elsewhere
pass
def finishWaitingPool(self):
"""
Stop the complete MPI request queue from blocking, used when stopping simulation is necessary while requests are still outstanding.
"""
for i in MPIRedirect.waiting:
try:
i.set()
except AttributeError:
# It was not a lock...
pass
except KeyError:
# It was deleted in the meantime
pass
def bootMPI(self):
"""
Boot the MPI receivers when necessary, on an other thread to prevent blocking
"""
if self.size > 1:
listener = threading.Thread(target=Server.listenMPI, args=[self])
# Make sure that this is a daemon on the controller, as otherwise this thread will prevent the atexit from stopping
# Though on every other node this should NOT be a daemon, as this is the only part still running
if middleware.COMM_WORLD.Get_rank() == 0:
listener.daemon = True
listener.start()
def sendModel(self, data, scheduler):
"""
Receive a complete model and set it.
:param data: a tuple containing the model, the model_ids dictionary, scheduler name, and a flag for whether or not the model was flattened to allow pickling
:param scheduler: the scheduler to use
"""
model, model_ids, flattened = data
if self.name == 0:
self.kernel = Controller(self.name, model, self)
else:
self.kernel = BaseSimulator(self.name, model, self)
self.kernel.sendModel(model, model_ids, scheduler, flattened)
def finish(self):
"""
Stop the currently running simulation
"""
sim = self.kernel
with sim.simlock:
# Shut down all threads on the topmost simulator
sim.finished = True
sim.should_run.set()
self.finishWaitingPool()
# Wait until they are done
sim.sim_finish.wait()
def queueMessage(self, time, model_id, action):
"""
Queue a delayed action from being sent, to make it possible to batch them.
Will raise an exception if previous messages form a different time were not yet flushed!
This flushing is not done automatically, as otherwise the data would be received at a further timestep
which causes problems with the GVT algorithm.
:param time: the time at which the action happens
:param model_id: the model_id that executed the action
:param action: the action to execute (as a string)
"""
if self.queued_time is None:
self.queued_time = time
elif time != self.queued_time:
raise DEVSException("Queued message at wrong time! Probably forgot a flush")
self.queued_messages.append([model_id, action])
def flushQueuedMessages(self):
"""
Flush all queued messages to the controller. This will block until all of them are queued.
It is required to flush all messages right after all of them happened and this should happen within the critical section!
"""
if self.queued_time is not None:
self.getProxy(0).massDelayedActions(self.queued_time,
self.queued_messages)
self.queued_messages = []
self.queued_time = None

729
src/pypdevs/simconfig.py Normal file
View file

@ -0,0 +1,729 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module with the specific aim of creating a more simple configuration interface for the simulator.
"""
import pypdevs.middleware as middleware
from pypdevs.util import DEVSException
from pypdevs.DEVS import CoupledDEVS, AtomicDEVS
def local(sim):
"""
Test whether or not the simulation is done locally
:param sim: the simulator with the locations
:returns: bool -- whether or not the simulation is local
"""
if len(sim.locations) == 0:
raise DEVSException("There are no Atomic DEVS models present in your provided model")
return sim.server.size == 1
class SimulatorConfiguration(object):
"""
All necessary simulator configuration options are provided. The necessary checks will be made and the simulator will be adapted accordingly.
"""
def __init__(self, sim):
"""
Constructor
:param sim: the simulator to alter with actions on this configurator
"""
self.simulator = sim
def setClassicDEVS(self, classicDEVS=True):
"""
Use Classic DEVS instead of Parallel DEVS. This option does not affect the use of Dynamic Structure DEVS or realtime simulation. Not usable with distributed simulation.
:param classicDEVS: whether or not to use Classic DEVS
"""
if not local(self.simulator) and classicDEVS:
raise DEVSException("Classic DEVS simulations cannot be distributed!")
self.simulator.classicDEVS = classicDEVS
def setMemoization(self, memo=True):
"""
Use memoization to prevent repeated int/ext/confTransition calls when revertion was performed.
:param memo: enable or not
"""
# Local simulation will never profit from memoization, so ignore it
if not local(self.simulator):
self.simulator.memoization = memo
def setDSDEVS(self, dsdevs=True):
"""
Whether or not to enable Dynamic Structure DEVS simulation. If this is set to True, the modelTransition method will be called on all transitioned models.
If this is False, the modelTransition method will not be called, even if one is defined! Enabling this incurs a (slight) slowdown in the simulation,
due to the additional function calls and checks that have to be made. Currently only available in local simulation.
:param dsdevs: enable or not
"""
if local(self.simulator):
self.simulator.dsdevs = dsdevs
elif not dsdevs:
raise DEVSException("Dynamic Structure DEVS is currently only available in local simulation!")
def setAllowLocalReinit(self, allowed=True):
"""
Allow a model to be reinitialized in local simulation.
This is not the case by default, as it would be required to save a copy of the model in memory during setup. Generating such a copy can be time consuming and the additional memory consumption could be unacceptable.
Distributed simulation is unaffected, since this always requires the creation of a copy.
If this is False and reinitialisation is done in a local simulation, an exception will be thrown.
.. warning:: The state that is accessible after the simulation will **NOT** be updated if this configuration parameter is used. If you want to have fully up to date states, you should also set the *setFetchAllAfterSimulation()* configuration parameter.
:param allowed: whether or not to allow reinitialization
"""
#TODO check whether or not simulation has already happened...
if not isinstance(allowed, bool):
raise DEVSException("The allow local reinit call requires a boolean as parameter")
self.simulator.allow_local_reinit = allowed
def setManualRelocator(self):
"""
Sets the use of the manual relocator (the default). This mode allows the user to add manual *relocation directives*.
"""
self.setActivityRelocatorCustom("manualRelocator", "ManualRelocator")
def setRelocationDirective(self, time, model, destination):
"""
Creates a relocation directive, stating that a relocation of a certain model should happen at or after the specified time (depending on when the GVT progresses over this time).
If multiple directives exist for the same model, the one with the highest time will be executed.
:param time: time after which the relocation should happen
:param model: the model to relocate at the specified time. Can either be its ID, or an AtomicDEVS or CoupledDEVS model. Note that providing a CoupledDEVS model is simply a shortcut for relocating the COMPLETE subtree elsewhere, as this does not stop at kernel boundaries.
:param destination: the location to where the model should be moved
"""
if not isinstance(destination, int) and not isinstance(destination, str):
raise DEVSException("Relocation directive destination should be an integer or string")
destination = int(destination)
if destination not in range(self.simulator.server.size):
raise DEVSException("Relocation directive got an unknown destination, got: %s, expected one of %s" % (destination, range(self.simulator.server.size)))
from pypdevs.relocators.manualRelocator import ManualRelocator
if not isinstance(self.simulator.activity_relocator, ManualRelocator):
raise DEVSException("Relocation directives can only be set when using a manual relocator (the default)\nYou seem to have changed the relocator, so please revert it back by calling the 'setManualRelocator()' first!")
if isinstance(model, int):
self.simulator.activity_relocator.addDirective(time=time,
model=model,
destination=destination)
elif isinstance(model, AtomicDEVS):
self.simulator.activity_relocator.addDirective(time=time,
model=model.model_id,
destination=destination)
elif isinstance(model, CoupledDEVS):
for m in model.component_set:
self.simulator.setRelocationDirective(time, m, destination)
def setRelocationDirectives(self, directives):
"""
Sets multiple relocation directives simultaneously, easier for batch processing. Behaviour is equal to running setRelocationDirective on every element of the iterable.
:param directives: an iterable containing all directives, in the form [time, model, destination]
"""
for directive in directives:
self.setRelocationDirective(directive[0], directive[1], directive[2])
def setSchedulerCustom(self, filename, scheduler_name, locations=None):
"""
Use a custom scheduler
:param filename: filename of the file containing the scheduler class
:param scheduler_name: class name of the scheduler contained in the file
:param locations: if it is an iterable, the scheduler will only be applied to these locations. If it is None, all nodes will be affected.
"""
if not isinstance(filename, str):
raise DEVSException("Custom scheduler filename should be a string")
if not isinstance(scheduler_name, str):
raise DEVSException("Custom scheduler classname should be a string")
if locations is None:
# Set global scheduler, so overwrite all previous configs
self.simulator.scheduler_type = (filename, scheduler_name)
self.simulator.scheduler_locations = {}
else:
# Only for a subset of models, but keep the default scheduler
for location in locations:
self.simulator.scheduler_locations[location] = (filename, scheduler_name)
def setSchedulerActivityHeap(self, locations=None):
"""
Use the basic activity heap scheduler, this is the default.
:param locations: if it is an iterable, the scheduler will only be applied to these locations. If it is None, all nodes will be affected.
"""
self.setSchedulerCustom("schedulerAH", "SchedulerAH", locations)
def setSchedulerPolymorphic(self, locations=None):
"""
Use a polymorphic scheduler, which chooses at run time between the HeapSet scheduler or the Minimal List scheduler. Slight overhead due to indirection and statistics gathering.
.. warning:: Still unstable, don't use!
:param locations: if it is an iterable, the scheduler will only be applied to these locations. If it is None, all nodes will be affected.
"""
self.setSchedulerCustom("schedulerAuto", "SchedulerAuto", locations)
def setSchedulerDirtyHeap(self, locations=None):
"""
Use the basic activity heap scheduler, but without periodic cleanup. The same scheduler as the one used in VLE.
:param locations: if it is an iterable, the scheduler will only be applied to these locations. If it is None, all nodes will be affected.
"""
self.setSchedulerCustom("schedulerDH", "SchedulerDH", locations)
def setSchedulerDiscreteTime(self, locations=None):
"""
Use a basic 'discrete time' style scheduler. If the model is scheduled, it has to be at the same time as all other scheduled models. It isn't really discrete time in the sense that it allows variable step sizes, only should ALL models agree on it.
:param locations: if it is an iterable, the scheduler will only be applied to these locations. If it is None, all nodes will be affected.
.. warning:: Only use in local simulation!
"""
if not local(self.simulator):
raise DEVSException("Do not use this scheduler for distributed simulation")
self.setSchedulerCustom("schedulerDT", "SchedulerDT", locations)
def setSchedulerSortedList(self, locations=None):
"""
Use an extremely simple scheduler that simply sorts the list of all models. Useful if lots of invalidations happen and nearly all models are active.
:param locations: if it is an iterable, the scheduler will only be applied to these locations. If it is None, all nodes will be affected.
"""
self.setSchedulerCustom("schedulerSL", "SchedulerSL", locations)
def setSchedulerMinimalList(self, locations=None):
"""
Use a simple scheduler that keeps a list of all models and traverses it each time in search of the first one. Slight variation of the sorted list scheduler.
:param locations: if it is an iterable, the scheduler will only be applied to these locations. If it is None, all nodes will be affected.
"""
self.setSchedulerCustom("schedulerML", "SchedulerML", locations)
def setSchedulerNoAge(self, locations=None):
"""
.. warning:: do not use this scheduler if the time advance can be equal to 0. This scheduler strips of the age from every scheduled model, which means that ages do not influence the scheduling.
Use a stripped scheduler that doesn't care about the age in a simulation. It is equivalent in design to the HeapSet scheduler,
but uses basic floats instead of tuples.
:param locations: if it is an iterable, the scheduler will only be applied to these locations. If it is None, all nodes will be affected.
"""
self.setSchedulerCustom("schedulerNA", "SchedulerNA", locations)
def setSchedulerHeapSet(self, locations=None):
"""
Use a scheduler containing 3 different datastructures. It is still experimental, though can provide noticeable performance boosts.
:param locations: if it is an iterable, the scheduler will only be applied to these locations. If it is None, all nodes will be affected.
"""
self.setSchedulerCustom("schedulerHS", "SchedulerHS", locations)
def setShowProgress(self, progress=True):
"""
Shows progress in ASCII in case a termination_time is given
:param progress: whether or not to show progress
"""
self.simulator.progress = progress
def setTerminationModel(self, model):
"""
Marks a specific AtomicDEVS model as being used in a termination condition. This is never needed in case no termination_condition is used. It will _force_ the model to run at the controller, ignoring the location that was provided in the model itself. Furthermore, it will prevent the model from migrating elsewhere.
:param model: an AtomicDEVS model that needs to run on the controller and shouldn't be allowed to migrate
"""
if self.simulator.setup:
raise DEVSException("Termination models cannot be changed after the first simulation was already ran!")
if isinstance(model, AtomicDEVS):
self.simulator.termination_models.add(model.model_id)
elif isinstance(model, int):
# A model_id in itself is passed, so just add this
self.simulator.termination_models.add(model)
else:
raise DEVSException("Only AtomicDEVS models can be used in termination conditions!")
def registerState(self, variable, model):
"""
Registers the state of a certain model to an attribute of the simulator AFTER simulation has finished
:param variable: name of the attribute to assign to
:param model: the AtomicDEVS model or its model id to fetch the state from
"""
if isinstance(model, AtomicDEVS):
model = model.model_id
self.simulator.callbacks.append((variable, model))
def setDrawModel(self, draw_model=True, output_file="model.dot", hide_edge_labels=False):
"""
Whether or not to draw the model and its distribution before simulation starts.
:param draw_model: whether or not to draw the model
:param output_file: file to output to
:param hide_edge_labels: whether or not to hide the labels of the connections, this speeds up the model drawing and allows for reasonably sized diagrams
"""
if self.simulator.setup:
raise DEVSException("Model can only be drawn at the first simulation run due to the model being optimized before simulation")
self.simulator.draw_model = draw_model
self.simulator.draw_model_file = output_file
self.simulator.hide_edge_labels = hide_edge_labels
def setFetchAllAfterSimulation(self, fetch=True):
"""
Update the complete model by fetching all states from all remote locations. This is different from 'registerState', as it will fetch everything and it will modify the original model instead of adding an attribute to the Simulator object
:param fetch: whether or not to fetch all states from all models
"""
self.simulator.fetch_all = fetch
def setActivityTrackingVisualisation(self, visualize, x = 0, y = 0):
"""
Set the simulation to visualize the results from activity tracking. An x and y parameter can be given to visualize it in a cell style.
:param visualize: whether or not to visualize it
:param x: the horizontal size of the grid (optional)
:param y: the vertical size of the grid (optional)
"""
if not isinstance(visualize, bool):
raise DEVSException("Activity Tracking visualisation requires a boolean")
if visualize and ((x > 0 and y <= 0) or (y > 0 and x <= 0)):
raise DEVSException("Activity Tracking cell view requires both a positive x and y parameter for the maximal size of the grid")
self.simulator.activity_visualisation = visualize
self.simulator.activity_tracking = visualize
self.simulator.x_size = int(x)
self.simulator.y_size = int(y)
def setLocationCellMap(self, locationmap, x = 0, y = 0):
"""
Set the simulation to produce a nice Cell DEVS like output file of the current location. This file will be regenerated as soon as some relocations are processed.
:param locationmap: whether or not to generate this file
:param x: the horizontal size of the grid
:param y: the vertical size of the grid
"""
if locationmap and (x <= 0 or y <= 0):
raise DEVSException("Location cell view requires a positive x and y parameter for the maximal size of the grid")
self.simulator.location_cell_view = locationmap
self.simulator.x_size = int(x)
self.simulator.y_size = int(y)
def setTerminationCondition(self, condition):
"""
Sets the termination condition for the simulation. Setting this will remove a previous termination time and condition. This condition will be executed on the controller
:param condition: a function to call that returns a boolean whether or not to halt simulation
"""
self.simulator.termination_condition = condition
self.simulator.termination_time = float('inf')
def setTerminationTime(self, time):
"""
Sets the termination time for the simulation. Setting this will remove a previous termination time and condition.
:param time: time at which simulation should be halted
"""
if not isinstance(time, float) and not isinstance(time, int):
raise DEVSException("Simulation termination time should be either an integer or a float")
if time < 0:
raise DEVSException("Simulation termination time cannot be negative")
self.simulator.termination_condition = None
# Convert to float, as otherwise we would have to do this conversion implicitly at every iteration
self.simulator.termination_time = float(time)
def setVerbose(self, filename=None):
"""
Sets the use of a verbose tracer.
Calling this function multiple times will register a tracer for each of them (thus output to multiple files is possible, though more inefficient than simply (manually) copying the file at the end).
:param filename: string representing the filename to write the trace to, None means stdout
"""
if not isinstance(filename, str) and filename is not None:
raise DEVSException("Verbose filename should either be None or a string")
self.setCustomTracer("tracerVerbose", "TracerVerbose", [filename])
def setRemoveTracers(self):
"""
Removes all currently registered tracers, might be useful in reinitialised simulation.
"""
self.simulator.tracers = []
self.simulator.removeTracers()
def setCell(self, x_size = None, y_size = None, cell_file = "celltrace", multifile = False):
"""
Sets the cell tracing flag of the simulation
:param cell: whether or not verbose output should be generated
:param x_size: the horizontal length of the grid
:param y_size: the vertical length of the grid
:param cell_file: the file to save the generated trace to
:param multifile: if True, each timestep will be save to a seperate file (nice for visualisations)
"""
if x_size is None or y_size is None:
raise DEVSException("Cell Tracing requires both an x and y size")
if x_size < 1 or y_size < 1:
raise DEVSException("Cell Tracing sizes should be positive")
self.setCustomTracer("tracerCell", "TracerCell", [cell_file, int(x_size), int(y_size), multifile])
def setXML(self, filename):
"""
Sets the use of a XML tracer.
Calling this function multiple times will register a tracer for each of them (thus output to multiple files is possible, though more inefficient than simply (manually) copying the file at the end).
:param filename: string representing the filename to write the trace to
"""
if not isinstance(filename, str):
raise DEVSException("XML filename should be a string")
self.setCustomTracer("tracerXML", "TracerXML", [filename])
def setVCD(self, filename):
"""
Sets the use of a VCD tracer.
Calling this function multiple times will register a tracer for each of them (thus output to multiple files is possible, though more inefficient than simply (manually) copying the file at the end).
:param filename: string representing the filename to write the trace to
"""
if not isinstance(filename, str):
raise DEVSException("VCD filename should be a string")
self.setCustomTracer("tracerVCD", "TracerVCD", [filename])
def setCustomTracer(self, tracerfile, tracerclass, args):
"""
Sets the use of a custom tracer, loaded at run time.
Calling this function multiple times will register a tracer for each of them (thus output to multiple files is possible, though more inefficient than simply (manually) copying the file at the end).
:param tracerfile: the file containing the tracerclass
:param tracerclass: the class to instantiate
:param args: arguments to be passed to the tracerclass's constructor
"""
self.simulator.tracers.append((tracerfile, tracerclass, args))
def setLogging(self, destination, level):
"""
Sets the logging destination for the syslog server.
:param destination: A tuple/list containing an address, port pair defining the location of the syslog server. Set to None to prevent modification
:param level: the level at which logging should happen. This can either be a logging level from the logging module, or it can be a string specifying this level. Accepted strings are: 'debug', 'info', 'warning', 'warn', 'error', 'critical'
"""
if self.simulator.nested:
raise DEVSException("Logging in nested simulation is not allowed, the logging settings of the parent are used!")
if (not isinstance(self.destination, tuple) and
not isinstance(self.simulator.destination, list) and
(destination is not None)):
raise DEVSException("Logging destination should be a tuple or a list containing an IP addres, followed by a port address")
if isinstance(level, str):
import logging
# Convert to the correct location
level = level.lower()
loglevels = {"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARN,
"warn": logging.WARN,
"error": logging.ERROR,
"critical": logging.CRITICAL}
try:
level = loglevels[level]
except IndexError:
raise DEVSException("Logging level %s not recognized" % level)
if destination is not None:
self.simulator.address = destination
self.simulator.loglevel = level
def setGVTInterval(self, gvt_int):
"""
Sets the interval in seconds between 2 GVT calculations. This is the time between the ending of the previous run and the start of the next run, to prevent overlapping calculations.
.. note:: Parameter should be at least 1 to prevent an overload of GVT calculations
:param gvt_int: interval in seconds (float or integer)
"""
if not isinstance(gvt_int, float) and not isinstance(gvt_int, int):
raise DEVSException("GVT interval should be an integer or a float")
if gvt_int < 1:
raise DEVSException("GVT interval should be larger than or equal to one")
self.simulator.gvt_interval = gvt_int
def setCheckpointing(self, name, checkpoint_interval):
"""
.. warning:: name parameter will be used as a filename, so avoid special characters
Sets the interval between 2 checkpoints in terms of GVT calculations. This option generates PDC files starting with 'name'. This is only possible when using MPI.
:param name: name to prepend to each checkpoint file
:param checkpoint_interval: number of GVT runs that are required to trigger a checkpoint. For example 3 means that a checkpoint will be created after each third GVT calculation
"""
if not isinstance(checkpoint_interval, int):
raise DEVSException("Checkpoint interval should be an integer")
if not isinstance(name, str):
raise DEVSException("Checkpoint name should be a string")
if checkpoint_interval < 1:
raise DEVSException("Checkpoint interval should be larger than or equal to one")
if self.simulator.realtime:
raise DEVSException("Checkpointing is not possible under realtime simulation")
self.simulator.checkpoint_interval = checkpoint_interval
self.simulator.checkpoint_name = name
def setStateSaving(self, state_saving):
"""
Sets the type of state saving to use, this will have a high impact on performance. It is made customizable as some more general techniques will be much slower, though necessary in certain models.
:param state_saving: Either an ID of the option, or (recommended) a string specifying the method, see options below.
.. glossary::
deepcopy
use the deepcopy module
pickle0
use the (c)pickle module with pickling protocol 0
pickleH
use the (c)pickle module with the highest available protocol
pickle
use the (c)pickle module
copy
use the copy module (only safe for flat states)
assign
simply assign states (only safe for primitive states)
none
equivalent to assign (only safe for primitive states)
custom
define a custom 'copy' function in every state and use this
"""
if not isinstance(state_saving, int) and not isinstance(state_saving, str):
raise DEVSException("State saving should be done using an integer or a string")
if isinstance(state_saving, str):
options = {"deepcopy": 0,
"pickle0": 1,
"pickleH": 2,
"pickle": 2,
"copy": 3,
"assign": 4,
"none": 4,
"custom": 5,
"marshal": 6}
try:
state_saving = options[state_saving]
except IndexError:
raise DEVSException("State saving option %s not recognized" % state_saving)
self.simulator.state_saving = state_saving
def setMessageCopy(self, copy_method):
"""
Sets the type of message copying to use, this will have an impact on performance. It is made customizable as some more general techniques will be much slower.
:param copy_method: Either an ID of the option, or (recommended) a string specifying the method, see options below
.. glossary::
pickle
use the (c)pickle module
custom
define a custom 'copy' function in every message and use this
none
don't use any copying at all, unsafe though most other DEVS simulators only supply this
"""
if not isinstance(copy_method, int) and not isinstance(copy_method, str):
raise DEVSException("Message copy method should be done using an integer or a string")
if isinstance(copy_method, str):
options = {"pickle": 0, "custom": 1, "none": 2}
try:
copy_method = options[copy_method]
except IndexError:
raise DEVSException("Message copy option %s not recognized" % copy_method)
self.simulator.msg_copy = copy_method
def setRealTime(self, realtime = True, scale=1.0):
"""
Sets the use of realtime instead of 'as fast as possible'.
:param realtime: whether or not to use realtime simulation
:param scale: the scale for scaled real time, every delay will be multiplied with this number
"""
if not local(self.simulator):
raise DEVSException("Real time simulation is only possible in local simulation!")
self.simulator.realtime = realtime
self.simulator.realtime_scale = scale
def setRealTimeInputFile(self, generator_file):
"""
Sets the realtime input file to use. If realtime is not yet set, this will auto-enable it.
:param generator_file: the file to use, should be a string, NOT a file handle. None is acceptable if no file should be used.
"""
if not self.simulator.realtime:
self.setRealTime(True)
if not isinstance(generator_file, str) and generator_file is not None:
raise DEVSException("Realtime generator should be a string or None")
self.simulator.generator_file = generator_file
def setRealTimePlatformThreads(self):
"""
Sets the realtime platform to Python Threads. If realtime is not yet set, this will auto-enable it.
"""
if not self.simulator.realtime:
self.setRealTime(True)
self.simulator.subsystem = "python"
self.simulator.realtime_extra = []
def setRealTimePlatformTk(self, tk):
"""
.. note:: this clearly requires Tk to be present.
Sets the realtime platform to Tk events. If realtime is not yet set, this will auto-enable it.
"""
if not self.simulator.realtime:
self.setRealTime(True)
self.simulator.subsystem = "tkinter"
self.simulator.realtime_extra = [tk]
def setRealTimePlatformGameLoop(self):
"""
Sets the realtime platform to Game Loop. If realtime is not yet set, this will auto-enable it.
:param fps: the number of times the game loop call should be made per second
"""
if not self.simulator.realtime:
self.setRealTime(True)
self.simulator.subsystem = "loop"
self.simulator.realtime_extra = []
def setRealTimePorts(self, ports):
"""
Sets the dictionary of ports that can be used to put input on. If realtime is not yet set, this will auto-enable it.
:param ports: dictionary with strings as keys, ports as values
"""
if not self.simulator.realtime:
self.setRealTime(True)
if not isinstance(ports, dict):
raise DEVSException("Realtime input port references should be a dictionary")
self.simulator.realtime_port_references = ports
def setModelState(self, model, new_state):
"""
Reinitialize the state of a certain model
Calling this method will cause a recomputation of the timeAdvance for this model. Its results will be used relative to the time of the last transition.
:param model: model whose state to change
:param new_state: state to assign to the model
"""
if not isinstance(model, int):
model = model.model_id
self.simulator.modifyState(model, new_state)
def setModelStateAttr(self, model, attr, value):
"""
Reinitialize an attribute of the state of a certain model
Calling this method will cause a recomputation of the timeAdvance for this model. Its results will be used relative to the time of the last transition.
:param model: model whose state to change
:param attr: string representation of the attribute to change
:param value: value to assign
"""
if not isinstance(model, int):
model = model.model_id
self.simulator.modifyStateAttr(model, attr, value)
def setModelAttribute(self, model, attr, value):
"""
Reinitialize an attribute of the model itself
Calling this method will cause a recomputation of the timeAdvance for this model. Its results will be used relative to the time of the last transition.
:param model: model whose attribute to set
:param attr: string representation of the attribute to change
:param value: value to assign
"""
if not isinstance(model, int):
model = model.model_id
self.simulator.modifyAttributes(model, attr, value)
def setActivityRelocatorCustom(self, filename, classname, *args):
"""
Sets the use of a custom relocator
:param filename: filename containing the relocator
:param classname: classname of the relocator
:param args: all other args are passed to the constructor
"""
try:
exec("from pypdevs.relocators.%s import %s" % (filename, classname))
except:
exec("from %s import %s" % (filename, classname))
self.simulator.activity_relocator = eval("%s(*args)" % classname)
def setActivityRelocatorBasicBoundary(self, swappiness):
"""
Sets the use of the *activity* relocator called *'Basic Boundary'*.
:param swappiness: how big the deviation from the average should be before scheduling relocations
"""
if swappiness < 1.0:
raise DEVSException("Basic Boundary Activity Relocator should have a swappiness >= 1.0")
self.setActivityRelocatorCustom("basicBoundaryRelocator",
"BasicBoundaryRelocator",
swappiness)
def setGreedyAllocator(self):
"""
Sets the use of the greedy allocator that is contained in the standard PyPDEVS distribution.
"""
from pypdevs.allocators.greedyAllocator import GreedyAllocator
self.setInitialAllocator(GreedyAllocator())
def setAutoAllocator(self):
"""
Sets the use of an initial allocator that simply distributes the root models.
This is a static allocator, meaning that no event activity will be generated.
"""
from pypdevs.allocators.autoAllocator import AutoAllocator
self.setInitialAllocator(AutoAllocator())
def setInitialAllocator(self, allocator):
"""
Sets the use of an initial allocator instead of the manual allocation. Can be set to None to use manual allocation (default).
:param allocator: the allocator to use for assigning the initial locations
"""
self.simulator.allocator = allocator
def setListenPorts(self, port, function):
"""
Sets a listener on a DEVS port. When an event arrives at that output port, the provided function will be called with the bag (as if that function were the extTransition!).
Listening to ports is only allowed in realtime simulation! Remember to return from the calling function as soon as possible to minimize delays.
Only a single listener is supported per port.
:param port: the port to listen to, can be anything.
:param function: the function to call when the event arrives. It should take a single parameter (the event bag).
"""
if not self.simulator.realtime:
raise DEVSException("Need to be in realtime simulation")
self.simulator.listeners[port] = function

820
src/pypdevs/simulator.py Normal file
View file

@ -0,0 +1,820 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Main simulator class to be used as an interface to the user
"""
import pypdevs.middleware as middleware
# Fetch the rank and size of this simulation run
# Don't get it ourself (e.g. from MPI), as we are possibly not using MPI
nested = False
was_main = False
rank, size = middleware.startupMiddleware()
from pypdevs.util import *
from pypdevs.DEVS import *
from pypdevs.basesimulator import *
from pypdevs.server import *
from pypdevs.logger import *
import threading
import pypdevs.accurate_time as time
import os
# Try loading cPickle as it is faster, though otherwise fall back to normal pickling
try:
import cPickle as pickle
except ImportError:
import pickle
# For Python2 and Python3 compliance
try:
import Queue as Queue
except ImportError:
import queue as Queue
def local(sim):
"""
Test whether or not the simulation is done locally
:param sim: the simulator with the locations
:returns: bool -- whether or not the simulation is local
"""
if len(sim.locations) == 0:
raise DEVSException("There are no Atomic DEVS models present in your provided model")
return sim.server.size == 1
def loadCheckpoint(name):
"""
Load a previously created simulation from a saved checkpoint.
:param name: the name of the model to provide some distinction between different simulation models
:returns: either None if no recovery was possible, or the Simulator object after simulation
"""
listdir = os.listdir('.')
if str(name) + "_SIM.pdc" not in listdir:
# Simulator object not even found, don't bother continueing
#assert info("Not even a SIM file was found for the requested name, giving up already")
return
try:
infile = open("%s_SIM.pdc" % (name), 'r')
simulator = pickle.load(infile)
infile.close()
except:
return
# Use an rsplit, as it is possible that the user defined name contains _ too
files = [f for f in listdir
if (f.endswith(".pdc") and
not f.endswith("SIM.pdc") and
f.rsplit('_', 2)[0] == name)]
if len(files) == 0:
return
#assert debug("Got matching files: " + str(files))
max_gvt = 0
nodes = middleware.COMM_WORLD.Get_size()
noncomplete_checkpoint = True
found_files = {}
found_gvts = []
for f in files:
gvt = float(f.split('_')[-2])
if gvt in found_files:
found_files[gvt].append(f)
else:
found_files[gvt] = [f]
found_gvts.append(gvt)
found_gvts.sort()
gvt = 0
# Construct a temporary server
from pypdevs.middleware import COMM_WORLD
server = Server(middleware.COMM_WORLD.Get_rank(),
middleware.COMM_WORLD.Get_size())
while len(found_gvts) > 0:
gvt = found_gvts[-1]
if len(found_files[gvt]) < nodes:
found_gvts.pop()
gvt = 0
else:
if gvt == 0:
return None
for rank in range(server.size):
if not server.getProxy(rank).checkLoadCheckpoint(name, gvt):
# One of the proxies denied, try next
found_gvts.pop()
gvt = 0
break
if gvt != 0:
# If we got here, we are done and can load it
break
if gvt == 0:
#raise DEVSException("All currently present pdc files are unusable, please remove them all to force a fresh simulation run!")
if COMM_WORLD.Get_size() > 1:
# We need to shut down the already started MPI server...
COMM_WORLD.isend(0, dest=0, tag=0)
return None
simulator.server = server
for rank in range(server.size):
server.getProxy(rank).loadCheckpoint(name, gvt)
#assert info("Recovering from time " + str(gvt))
simulator.loadCheckpoint()
return simulator
class Simulator(object):
"""
Associates a hierarchical DEVS model with the simulation engine.
"""
def __init__(self, model):
"""
Constructor of the simulator.
:param model: a valid model (created with the provided functions)
"""
from pypdevs.simconfig import SimulatorConfiguration
self.config = SimulatorConfiguration(self)
from pypdevs.middleware import COMM_WORLD
# Simulator is always started at the controller
self.server = Server(0, size)
self.model = model
self.listeners = {}
global nested
global was_main
if nested:
was_main = False
else:
nested = True
was_main = True
# Initialize all options
self.init_done = False
self.run_gvt = True
self.callbacks = []
self.termination_models = set()
self.fetch_all = False
self.tracers = []
self.cell = False
self.x_size = None
self.y_size = None
self.cell_file = "celldevstrace.txt"
self.cell_multifile = False
self.termination_time = float('inf')
self.termination_condition = None
self.address = ('localhost', 514)
import logging
self.loglevel = logging.DEBUG
self.checkpoint_interval = -1
self.checkpoint_name = "(none)"
self.gvt_interval = 1
self.state_saving = 2
self.msg_copy = 0
self.realtime = False
self.realtime_port_references = {}
self.subsystem = "python"
self.generator_file = None
self.relocations = []
self.progress = False
self.draw_model = False
self.hide_edge_labels = False
self.setup = False
self.allow_local_reinit = False
self.modify_values = {}
self.modify_state_values = {}
self.activity_tracking = False
self.activity_visualisation = False
self.location_cell_view = False
self.sort_on_activity = False
from pypdevs.relocators.manualRelocator import ManualRelocator
self.activity_relocator = ManualRelocator()
self.dsdevs = False
self.memoization = False
self.classicDEVS = False
self.setSchedulerActivityHeap()
self.locations_file = None
self.allocator = None
self.realtime_extra = []
self.model_ids = []
self.locations = defaultdict(list)
self.model.finalize(name="",
model_counter=0,
model_ids=self.model_ids,
locations=self.locations,
select_hierarchy=[])
# Allow the model to provide some of its own configuration
self.model.simSettings(self)
def __getattr__(self, func):
"""
Get the specified attribute, all setters/registers will be redirected to the configuration code.
:param func: function that is called
:returns: requested attribute
"""
if func.startswith("set") or func.startswith("register"):
# Redirect to configuration backend
return getattr(self.config, func)
else:
# Just an unknown attribute
raise AttributeError(func)
def runStartup(self):
"""
Perform startup of the simulator right before simulation
"""
self.startAllocator()
# Controller is the main simulation server
# Remap every mandatory model to the controller
for model_id in self.termination_models:
model = self.model_ids[model_id]
self.locations[model.location].remove(model_id)
self.locations[0].append(model_id)
model.location = 0
model.relocatable = False
# Code without location goes to the controller
# Delay this code as auto allocation could be possible
for model_id in self.locations[None]:
# All these models are not yet initialized, so set them to the default
self.model_ids[model_id].location = 0
self.locations[0].extend(self.locations[None])
del self.locations[None]
self.controller = self.server.getProxy(0)
if self.draw_model:
#assert info("Drawing model hierarchy")
out = open(self.draw_model_file, 'w')
out.write("digraph G {\n")
self.drawModelHierarchy(out, self.model)
self.model.listeners = self.listeners
if isinstance(self.model, CoupledDEVS):
self.model.component_set = directConnect(self.model.component_set, self.listeners)
elif isinstance(self.model, AtomicDEVS):
for p in self.model.IPorts:
p.routing_inline = []
p.routing_outline = []
for p in self.model.OPorts:
p.routing_inline = []
p.routing_outline = []
else:
raise DEVSException("Unkown model being simulated")
if self.allocator is not None and self.allocator.getTerminationTime() == 0.0:
# It is a static allocator, so this can be done right now!
allocs = self.allocator.allocate(self.model.component_set,
None,
self.server.size,
None)
for model_id, location in allocs.items():
self.model_ids[model_id].location = location
saveLocations("locationsave.txt", allocs, self.model_ids)
self.allocator = None
if self.draw_model and self.allocator is None:
out = open(self.draw_model_file, 'a')
self.drawModelConnections(out, self.model, None)
out.write("}")
self.draw_model = False
nodes = len(self.locations.keys())
if None in self.locations:
nodes -= 1
if nodes < self.server.size:
# Less locations used than there exist
#assert warn("Not all initialized MPI nodes are being used in the model setup! Auto allocation could fix this.")
pass
elif nodes > self.server.size:
raise DEVSException(
"Not enough MPI nodes started for the distribution given " +
"in the model! Models requested at location %i, max node = %i"
% (max(self.locations.keys()), self.server.size - 1))
def drawModelHierarchy(self, outfile, model):
"""
Draws the hierarchy by creating a Graphviz Dot file. This merely creates the first part: the hierarchy of all models
:param outfile: a file handle to write text to
:param model: the model to draw
"""
from pypdevs.colors import colors
if isinstance(model, CoupledDEVS):
outfile.write(' subgraph "cluster%s" {\n' % (model.getModelFullName()))
outfile.write(' label = "%s"\n' % model.getModelName())
outfile.write(' color=black\n')
for m in model.component_set:
self.drawModelHierarchy(outfile, m)
outfile.write(' }\n')
elif isinstance(model, AtomicDEVS):
if model.location >= len(colors):
#assert warn("Not enough colors supplied in colors.py for this number of nodes! Defaulting to white")
color = "white"
else:
color = colors[model.location]
outfile.write((' "%s" [\n label = "%s\\nState: %s"\n' +
' color="%s"\n style=filled\n]\n')
% (model.getModelFullName(),
model.getModelName(),
model.state,
color))
def drawModelConnections(self, outfile, model, colors=None):
"""
Draws all necessary connections between the model
:param outfile: the file to output to
:param model: a CoupledDEVS model whose children should be drawn
:param colors: the colors to draw on the connections. Only used when an initial allocator is used.
"""
if colors is not None:
max_events = 0
for i in colors:
for j in colors[i]:
if colors[i][j] > max_events:
max_events = colors[i][j]
for source in model.component_set:
for source_port in source.OPorts:
for destination_port, _ in source_port.routing_outline:
destination = destination_port.host_DEVS
if colors is not None:
#TODO color is not yet perfect
try:
absolute_color = colors[source][destination]
relative_color = '"%s 1 1"' \
% (1 / (absolute_color / float(3 * max_events)))
except KeyError:
# Simply no message transfer
absolute_color = 0
relative_color = '"1 1 1"'
outfile.write(' "%s" -> "%s" '
% (source.getModelFullName(),
destination.getModelFullName()))
if self.hide_edge_labels and colors is None:
outfile.write(';\n')
elif self.hide_edge_labels and colors is not None:
outfile.write('[label="%s",color=%s];\n'
% (absolute_color, relative_color))
elif not self.hide_edge_labels and colors is None:
outfile.write('[label="%s -> %s"];\n'
% (source_port.getPortName(),
destination_port.getPortName()))
elif not self.hide_edge_labels and colors is not None:
outfile.write('[label="%s -> %s (%s)",color=%s];\n'
% (source_port.getPortName(),
destination_port.getPortName(),
absolute_color,
relative_color))
def checkpoint(self):
"""
Create a checkpoint of this object
"""
outfile = open(str(self.checkpoint_name) + "_SIM.pdc", 'w')
if self.flattened:
self.model.flattenConnections()
pickle.dump(self, outfile)
if self.flattened:
self.model.unflattenConnections()
def loadCheckpoint(self):
"""
Alert the Simulator that it was restored from a checkpoint and thus can take some shortcuts
"""
self.controller = self.server.getProxy(0)
self.real_simulate()
def startAllocator(self):
"""
Set the use of an allocator if required, thus forcing all models to run at the controller
"""
if self.allocator is not None:
self.activity_tracking = True
# Make simulation local for event capturing
for model in self.model.component_set:
model.setLocation(0, force=True)
def loadLocationsFromFile(self, filename):
"""
Try to load a file containing the allocation of the nodes. If such a (valid) file is found, True is returned. Otherwise False is returned.
This can thus easily be used in a simulator experiment file as a condition for setting an allocator (e.g. check for an allocation file and if
none is found, create one by running the allocator first).
A partially valid file will not be used; a file does not need to specify an allocation for all models, those that aren't mentioned are simply
skipped and their default allocation is used (as specified in the model itself).
:param filename: the name of the file to use
:returns: bool -- success of the operation
"""
try:
f = open(filename, 'r')
locs = {}
for line in f:
split = line.split(" ", 2)
model_id = int(split[0])
location = int(split[1])
modelname = modelname[:-1]
# Check for compliance first, otherwise the locations are loaded partially
if self.model_ids[model_id].getModelFullName() != modelname:
return False
else:
locs[model_id] = location
f.close()
# Everything seems to be fine, so do the actual allocations now
for model_id in locs:
self.model_ids[model_id].location = locs[model_id]
return True
except:
return False
def reinit(self):
"""
Reinitialize the model, so that a new *simulate()* call will restart the simulation anew.
This is possible in both local and distributed simulation,
though it requires the *setAllowLocalReinit* method to be called first if you are running local simulation.
The additional method call is required as allowing reinitialisation requires the complete model to be saved twice (a reinit copy and a working copy).
**Do NOT call this method directly, but call it through the simconfig file**
"""
loclist = range(self.server.size)
proxylist = [self.server.getProxy(location) for location in loclist]
# Send to very model to clear the simulation memory
if not self.allow_local_reinit and len(proxylist) == 1:
raise DEVSException("Reinitialisation for local simulation is disabled by default, please enable it with the configuration method 'setAllowLocalReinit()'")
for i, proxy in enumerate(proxylist):
proxy.resetSimulation(self.scheduler_locations[i])
def modifyState(self, model_id, state):
"""
Modify the state of a specific model.
**Do NOT call this method directly, but call it through the simconfig interface**
:param model_id: the model_id of the model to modify the state from
:param state: the state to configure
"""
proxy = self.server.getProxy(self.model_ids[model_id].location)
proxy.setAttr(model_id, "state", state)
self.controller.stateChange(model_id, "model.state", state)
def modifyStateAttr(self, model_id, attr, value):
"""
Modify an attribute of the state of a specific model.
**Do NOT call this method directly, but call it through the simconfig interface**
:param model_id: the model_id of the model to modify the state from
:param attr: the name of the attribute of the state to modify
:param value: the value to set as attribute
"""
proxy = self.server.getProxy(self.model_ids[model_id].location)
proxy.setStateAttr(model_id, attr, value)
self.controller.stateChange(model_id, "model.state.%s" % attr, value)
def modifyAttributes(self, model_id, attr, value):
"""
Modify an attribute of a specific model.
**Do NOT call this method directly, but call it through the simconfig interface**
:param model_id: the model_id of the model to modify the state from
:param attr: the name of the attribute of the model to modify
:param value: the value to set as attribute
"""
for dst in range(self.server.size):
self.server.getProxy(dst).setAttr(model_id, attr, value)
self.controller.stateChange(model_id, "model.%s" % attr, value)
def simulate(self):
"""
Start simulation with the previously set options. Can be reran afterwards to continue the simulation of
the model (or reinit it first and restart simulation), possibly after altering some aspects of the model with the provided methods.
"""
loclist = range(self.server.size)
proxylist = [self.server.getProxy(location) for location in loclist]
if not self.setup:
self.runStartup()
self.relocations.sort()
for directive in self.relocations:
if directive[1] in self.termination_models:
raise DEVSException("Termination model was found as a relocation directive!")
# self.locations is now untrusted, as it is possible for migration to happen!
self.locations = defaultdict(list)
# Broadcast the model, do this slightly more intelligent than by iterating over the list by using provided functions and exploiting maximal parallellism
self.flattened = False
# Fill in all schedulers
for location in loclist:
if location not in self.scheduler_locations:
self.scheduler_locations[location] = self.scheduler_type
try:
# Try broadcasting as-is
broadcastModel((self.model,
self.model_ids,
self.flattened),
proxylist,
self.allow_local_reinit,
self.scheduler_locations)
self.flattened = False
except RuntimeError:
# Something went wrong, probably exceeded the maximum recursion depth while pickling
#assert warn("Normal sending not possible due to big recursion, trying auto flattening")
try:
# Try decoupling the ports from the actual models to limit recursion that our simulation framework induced
self.model.flattenConnections()
# Broadcast again, but now mention that the ports were altered
self.flattened = True
broadcastModel((self.model,
self.model_ids,
self.flattened),
proxylist,
self.allow_local_reinit,
self.scheduler_locations)
except RuntimeError as e:
# Even that didn't solve it, user error!
# Stop the nodes from waiting for a broadcast
broadcastCancel()
import sys
raise DEVSException("Could not send model to remote destination due to pickling error: " + str(e))
# Prevent further setups
self.setup = True
for proxy in proxylist:
proxy.setGlobals(tracers=self.tracers,
address=self.address,
loglevel=self.loglevel,
checkpoint_frequency=self.checkpoint_interval,
checkpoint_name = self.checkpoint_name,
kernels=len(loclist),
statesaver=self.state_saving,
memoization=self.memoization,
msg_copy=self.msg_copy)
# Set the verbosity on the controller only, otherwise each kernel
# would open the file itself, causing problems. Furthermore, all
# verbose output will be sent to the controller
self.controller.setAllocator(self.allocator)
self.controller.setRelocator(self.activity_relocator)
self.controller.setDSDEVS(self.dsdevs)
self.controller.setActivityTracking(self.activity_tracking)
self.controller.setClassicDEVS(self.classicDEVS)
self.controller.setCellLocationTracer(self.x_size,
self.y_size,
self.location_cell_view)
# Clear this up as we would reregister them otherwise
self.tracers = []
if self.realtime:
if len(loclist) > 1:
raise DEVSException("Real time simulation only possible locally")
self.controller.setRealTime(self.subsystem,
self.generator_file,
self.realtime_port_references,
self.realtime_scale,
self.listeners,
self.realtime_extra)
# Check whether global or local termination should be used
if self.termination_condition is not None:
# Only set the condition on the controller
proxy = self.server.getProxy(0)
proxy.setTerminationCondition(self.termination_condition)
else:
# Global termination time
for proxy in proxylist:
proxy.setTerminationTime((self.termination_time, float('inf')))
if self.checkpoint_interval > 0:
self.checkpoint()
self.real_simulate()
def removeTracers(self):
"""
Remove all currently registered tracers.
"""
loclist = range(self.server.size)
proxylist = [self.server.getProxy(location) for location in loclist]
for proxy in proxylist:
proxy.removeTracers()
def realtime_loop_call(self):
"""
Perform a single iteration in the loop for real time simulation
"""
self.controller.gameLoop()
def realtime_interrupt(self, string):
"""
Generate an interrupt for the realtime backend using a method call.
:param string: the value to interrupt with, should be of the form "port value"
"""
self.controller.realtimeInterrupt(string)
def showProgress(self, locations):
"""
Shows the progress of the simulation by polling all locations that are passed. Should run on a seperate thread as this blocks!
:param locations: list of all locations to access
"""
# 80 is somewhat default...
consolewidth = 80
# Delete 4 for the prefix, 5 for the suffix
barwidth = consolewidth - 4 - 5
finishtime = self.termination_time
first = True
# Local simulation is always 'committed'
self.fillchar = "=" if len(locations) > 1 else "#"
gvt = 0.0
while 1:
# Several dirty checks for whether or not the simulation is done, if it is finished no more calls should be needed
# Keep doing this until the main thread exits, this should be a thread!
if self.checkpoint_interval > -1:
# Don't use an event while checkpointing, as this is unpicklable
time.sleep(1)
else:
self.progress_event.wait(1)
if not first:
for _ in locations:
sys.stdout.write("\033[F")
first = False
if self.progress_finished and self.fillchar != "E":
gvt = finishtime
elif self.progress_finished and self.fillchar == "E":
# Don't update the GVT variable
if len(locations) == 1:
# The gvt is actually kind of the node time
gvt = nodetime
else:
gvt = max(self.controller.getGVT(), 0)
gvt_percentage = int(gvt / finishtime * 100)
gvt_length = min(barwidth, gvt_percentage * barwidth / 100)
for node in locations:
if self.progress_finished:
nodetime = float('inf')
else:
nodetime = self.controller.getProxy(node).getTime()
if nodetime == float('inf'):
nodetime = finishtime
s = "%2d" % node
s += " |"
percentage = int(nodetime / finishtime * 100)
s += "#" * gvt_length
length = min(barwidth, percentage * barwidth / 100) - gvt_length
s += self.fillchar * length
s += " " * (barwidth - gvt_length - length)
if percentage == 100 and self.fillchar != "E":
s += "|DONE"
elif percentage == 100 and self.fillchar == "E":
s += "|FAIL"
else:
s += "| %2d" % percentage + "%"
print(s)
if self.progress_finished:
return
def real_simulate(self):
"""
The actual simulation part, this is identical for the 'start from scratch' and 'start from checkpoint' algorithm, thus it was split up
"""
locations = range(self.server.size)
try:
## Progress visualisation code
if self.progress:
if self.termination_time == float('inf'):
#assert warning("Progress visualisation is only possible if a termination time is used instead of a termination condition")
self.progress = False
#elif self.verbose and self.verbose_file is None:
# #assert warning("Progress visualisation is not allowed when printing verbose output")
# pass
# self.progress = False
else:
self.progress_finished = False
thread = threading.Thread(target=self.show_progress,
args=[locations])
if self.checkpoint_interval < 0:
self.progress_event = threading.Event()
thread.start()
# Local simulation can take a shortcut
if len(locations) == 1:
if self.checkpoint_interval > 0:
# If we use checkpointing, we will need a GVT thread running
self.controller.startGVTThread(self.gvt_interval)
# Simply do a blocking call, thus preventing the finish ring algorithm
#begin = time.time()
self.controller.getProxy(locations[0]).simulate_sync()
#print(time.time() - begin)
else:
self.controller.startGVTThread(self.gvt_interval)
for location in locations:
# Don't run all of these on a seperate thread, as it returns no result
self.controller.getProxy(location).simulate()
# Here, the simulation is running and we wait for it to end...
self.controller.waitFinish(len(locations))
# It seems that all nodes have finished!
#assert debug("Finished waiting for all processors")
except DEVSException as e:
print(e)
# Should also exit on a DEVSException since this isn't really meant to happen
import sys
# Return an errorcode, as we ended abruptly
sys.exit(1)
except:
# Try to stop the progress bar thread if this exists, otherwise we hang forever
if self.progress:
self.fillchar = "E"
self.progress_finished = True
if self.checkpoint_interval > -1:
# With checkpointing running, we need to do this the hard way...
self.progress_event.set()
# Wait for it to end
thread.join()
# Reraise the initial exception, this code was only here to stop the progress bar :)
raise
# Perform all pending operations
#assert debug("Performing all delayed actions")
self.controller.performActions()
# Stop all running tracers
#assert debug("Stopping all tracers")
self.controller.stopTracers()
# Now the state is stable, fetch all registered states before shutting down
#assert debug("Fetching all requested states")
if len(self.callbacks) > 0:
for variable, model_id in self.callbacks:
# Ask the controller on which node this model currently runs, calls to the controller
# are very fast, as this runs locally. Otherwise a complete location dictionary would
# have to be pickled and unpickled, but also filtered for local models, begin O(n) instead
# of the current O(1), at the cost of more function calls
location = self.controller.getLocation(model_id)
proxy = self.controller.getProxy(location)
state = proxy.getState(model_id)
#assert debug("Setting state for " + str(variable))
setattr(self, variable, state)
if self.fetch_all:
#assert info("Downloading model from locations")
# We must download the state from each and every model
for model in self.model.component_set:
location = self.controller.getLocation(model.model_id)
proxy = self.controller.getProxy(location)
model.state = proxy.getState(model.model_id)
# Shut down every location
#assert debug("Shutting down servers")
for loc in locations:
proxy = self.controller.getProxy(loc)
# If this is oneway, we will stop the simulation even before the finish was called everywhere
proxy.finish()
self.progress_finished = True
# A progress bar was running without checkpointing: set the event to finish it
if self.progress and self.checkpoint_interval <= 0:
self.progress_event.set()
# Activity tracking is enabled, so visualize it in whatever way was configured
if self.activity_visualisation:
visualizeActivity(self)
# Check if the model was to be visualized
if self.draw_model:
# Possibly include event count visualisation
#colors = self.controller.runAllocator()
colors = self.controller.getEventGraph()
#assert info("Drawing model distribution")
out = open(self.draw_model_file, 'a')
self.drawModelConnections(out, self.model, colors)
out.write("}")
global was_main
if was_main:
global nested
nested = False

375
src/pypdevs/solver.py Normal file
View file

@ -0,0 +1,375 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The actual DEVS solvers containing the main DEVS implementation
"""
from collections import defaultdict
from pypdevs.DEVS import *
from pypdevs.util import *
from pypdevs.logger import *
from pypdevs.classicDEVSWrapper import ClassicDEVSWrapper
class Solver(object):
"""
A unified DEVS solver, containing all necessary functions
"""
def __init__(self, listeners = {}):
"""
Constructor
"""
self.activities = {}
self.dsdevs_dict = {}
self.listeners = listeners
def atomicOutputGenerationEventTracing(self, aDEVS, time):
"""
Wrapper for the AtomicDEVS output function, which will save event counts
:param aDEVS: the AtomicDEVS model that generates the output
:param time: the time at which the output must be generated
:returns: dict -- the generated output
"""
retval = Solver.atomicOutputGeneration(self, aDEVS, time)
for port in retval:
port.msg_count += len(retval[port])
return retval
def atomicOutputGeneration(self, aDEVS, time):
"""
AtomicDEVS function to generate output, invokes the outputFnc function of the model.
:param aDEVS: the AtomicDEVS model that generates the output
:param time: the time at which the output must be generated
:returns: dict -- the generated output
"""
aDEVS.my_output = aDEVS.outputFnc()
# Being here means that this model created output, so it triggered its internal transition
# save this knowledge in the basesimulator for usage in the actual transition step
self.transitioning[aDEVS] |= 1
return aDEVS.my_output
def massAtomicTransitions(self, trans, clock):
"""
AtomicDEVS function to perform all necessary transitions,
does so on a collection of models for performance.
:param trans: iterable containing all models and their requested transition
:param clock: the time at which the transition must happen
"""
t, age = clock
partialmod = []
for aDEVS in trans:
ttype = trans[aDEVS]
###########
## Memoization and activity tracking code
## Skipped in local simulation
if not self.temporary_irreversible:
# Memo part
if self.memoization and len(aDEVS.memo) >= 2:
found = False
prev = aDEVS.memo.pop()
memo = aDEVS.memo[-1]
if memo.time_last == clock and prev.loadState() == aDEVS.state:
if ttype == 1:
found = True
elif aDEVS.my_input == memo.my_input:
# Inputs should be equal too
if ttype == 3:
found = True
elif aDEVS.elapsed == memo.elapsed and ttype == 2:
found = True
if found:
aDEVS.state = memo.loadState()
aDEVS.time_last = clock
aDEVS.time_next = memo.time_next
# Just add the copy
aDEVS.old_states.append(memo)
if self.do_some_tracing:
# Completely skip all these calls if no tracing, saves us a lot of function calls
if ttype == 1:
self.tracers.tracesInternal(aDEVS)
elif ttype == 2:
self.tracers.tracesExternal(aDEVS)
elif ttype == 3:
self.tracers.tracesConfluent(aDEVS)
aDEVS.my_input = {}
if self.relocation_pending:
# Quit ASAP by throwing an exception
raise QuickStopException()
continue
else:
aDEVS.memo = []
activity_tracking_prevalue = aDEVS.preActivityCalculation()
elif self.activity_tracking:
activity_tracking_prevalue = aDEVS.preActivityCalculation()
###########
# Make a copy of the message before it is passed to the user
if self.msg_copy != 2:
# Prevent a pass statement, which still consumes some time in CPython
if self.msg_copy == 1:
# Using list comprehension inside of dictionary comprehension...
aDEVS.my_input = {key:
[i.copy() for i in aDEVS.my_input[key]]
for key in aDEVS.my_input}
elif self.msg_copy == 0:
# Dictionary comprehension
aDEVS.my_input = {key:
pickle.loads(pickle.dumps(aDEVS.my_input[key],
pickle.HIGHEST_PROTOCOL))
for key in aDEVS.my_input}
# NOTE ttype mappings: (EI)
# 1 -- Internal transition (01)
# 2 -- External transition (10)
# 3 -- Confluent transition (11)
if ttype == 1:
# Internal only
aDEVS.elapsed = None
aDEVS.state = aDEVS.intTransition()
elif ttype == 2:
# External only
aDEVS.elapsed = t - aDEVS.time_last[0]
aDEVS.state = aDEVS.extTransition(aDEVS.my_input)
elif ttype == 3:
# Confluent
aDEVS.elapsed = 0.
aDEVS.state = aDEVS.confTransition(aDEVS.my_input)
else:
raise DEVSException(
"Problem in transitioning dictionary: unknown element %s"
% ttype)
ta = aDEVS.timeAdvance()
aDEVS.time_last = clock
if ta < 0:
raise DEVSException("Negative time advance in atomic model '" + \
aDEVS.getModelFullName() + "' with value " + \
str(ta) + " at time " + str(t))
# Update the time, this is just done in the timeNext, as this will propagate to the basesimulator
aDEVS.time_next = (t + ta, 1 if ta else (age + 1))
# Save the state
if not self.temporary_irreversible:
partialmod.append(aDEVS)
# But only if there are multiple kernels, since otherwise there would be no other kernel to invoke a revertion
# This can save us lots of time for local simulation (however, all other code is written with parallellisation in mind...)
activity = aDEVS.postActivityCalculation(activity_tracking_prevalue)
aDEVS.old_states.append(self.state_saver(aDEVS.time_last,
aDEVS.time_next,
aDEVS.state,
activity,
aDEVS.my_input,
aDEVS.elapsed))
if self.relocation_pending:
# Quit ASAP by throwing an exception
for m in partialmod:
# Roll back these models to before the transitions
m.time_next = m.old_states[-1].time_next
m.time_last = m.old_states[-1].time_last
m.state = m.old_states[-1].loadState()
self.model.scheduler.massReschedule(trans)
self.server.flushQueuedMessages()
raise QuickStopException()
elif self.activity_tracking:
activity = aDEVS.postActivityCalculation(activity_tracking_prevalue)
self.total_activities[aDEVS.model_id] += activity
if self.do_some_tracing:
# Completely skip all these calls if no tracing, saves us a lot of function calls
if ttype == 1:
self.tracers.tracesInternal(aDEVS)
elif ttype == 2:
self.tracers.tracesExternal(aDEVS)
elif ttype == 3:
self.tracers.tracesConfluent(aDEVS)
# Clear the bag
aDEVS.my_input = {}
self.server.flushQueuedMessages()
def atomicInit(self, aDEVS, time):
"""
AtomicDEVS function to initialise the model
:param aDEVS: the model to initialise
"""
aDEVS.time_last = (time[0] - aDEVS.elapsed, 1)
ta = aDEVS.timeAdvance()
if ta < 0:
raise DEVSException("Negative time advance in atomic model '" + \
aDEVS.getModelFullName() + "' with value " + \
str(ta) + " at initialisation")
aDEVS.time_next = (aDEVS.time_last[0] + ta, 1)
# Save the state
if not self.irreversible:
aDEVS.old_states.append(self.state_saver(aDEVS.time_last,
aDEVS.time_next,
aDEVS.state,
0.0,
{},
0.0))
# All tracing features
self.tracers.tracesInit(aDEVS, time)
def coupledOutputGenerationClassic(self, time):
"""
CoupledDEVS function to generate the output, calls the atomicDEVS models where necessary. Output is routed too.
:param time: the time at which output should be generated
:returns: the models that should be rescheduled
"""
cDEVS = self.model
imminent = cDEVS.scheduler.getImminent(time)
if not imminent:
# For real time simulation, when a model is interrupted
return self.transitioning
reschedule = set(imminent)
for model in imminent:
model.time_next = (model.time_next[0], model.time_next[1] + 1)
# Return value are the models to reschedule
# self.transitioning are the models that must transition
if len(imminent) > 1:
# Perform all selects
imminent.sort()
pending = imminent
level = 1
while len(pending) > 1:
# Take the model each time, as we need to make sure that the selectHierarchy is valid everywhere
model = pending[0]
# Make a set first to remove duplicates
colliding = list(set([m.select_hierarchy[level] for m in pending]))
chosen = model.select_hierarchy[level-1].select(
sorted(colliding, key=lambda i:i.getModelFullName()))
pending = [m for m in pending
if m.select_hierarchy[level] == chosen]
level += 1
child = pending[0]
else:
child = imminent[0]
# Recorrect the timeNext of the model that will transition
child.time_next = (child.time_next[0], child.time_next[1] - 1)
outbag = child.my_output = ClassicDEVSWrapper(child).outputFnc()
self.transitioning[child] = 1
for outport in outbag:
for inport, z in outport.routing_outline:
payload = outbag[outport]
if z is not None:
payload = [z(pickle.loads(pickle.dumps(m))) for m in payload]
aDEVS = inport.host_DEVS
aDEVS.my_input[inport] = list(payload)
self.transitioning[aDEVS] = 2
reschedule.add(aDEVS)
# We have now generated the transitioning variable, though we need some small magic to have it work for classic DEVS
self.transitioning = {ClassicDEVSWrapper(m): self.transitioning[m]
for m in self.transitioning}
return reschedule
def coupledOutputGeneration(self, time):
"""
CoupledDEVS function to generate the output, calls the atomicDEVS models where necessary. Output is routed too.
:param time: the time at which output should be generated
:returns: the models that should be rescheduled
"""
cDEVS = self.model
remotes = {}
for child in cDEVS.scheduler.getImminent(time):
outbag = self.atomicOutputGeneration(child, time)
for outport in outbag:
payload = outbag[outport]
if not hasattr(outport, "routing_outline"):
raise Exception(outport)
for inport, z in outport.routing_outline:
aDEVS = inport.host_DEVS
if z is not None:
payload = [z(pickle.loads(pickle.dumps(m)))
for m in payload]
if aDEVS.model_id in self.model.local_model_ids:
# This setdefault call is responsible for our non-linear runtime in several situations...
aDEVS.my_input.setdefault(inport, []).extend(payload)
self.transitioning[aDEVS] |= 2
else:
remotes.setdefault(aDEVS.model_id,
{}).setdefault(inport.port_id,
[]).extend(payload)
for destination in remotes:
self.send(destination, time, remotes[destination])
return self.transitioning
def coupledInit(self):
"""
CoupledDEVS function to initialise the model, calls all its _local_ children too.
"""
cDEVS = self.model
time_next = (float('inf'), 1)
# This part isn't fast, but it doesn't matter, since it just inits everything, optimizing here doesn't
# matter as it is only called once AND every element has to be initted.
# Only local models should receive this initialisation from us
for d in self.local:
self.atomicInit(d, (0.0, 0))
time_next = min(time_next, d.time_next)
# NOTE do not immediately assign to the timeNext, as this is used in the GVT algorithm to see whether a node has finished
cDEVS.time_next = time_next
self.model.setScheduler(self.model.scheduler_type)
self.server.flushQueuedMessages()
def performDSDEVS(self, transitioning):
"""
Perform Dynamic Structure detection of the model
:param transitioning: iteratable to be checked for a dynamic structure transiton
"""
#TODO setting the server is very dirty
self.dc_altered = set()
for m in transitioning:
m.server = self
iterlist = [aDEVS.parent for aDEVS in transitioning
if aDEVS.modelTransition(self.dsdevs_dict)]
# Contains all models that are already checked, to prevent duplicate checking.
# This was not necessary for atomic models, as they are guaranteed to only be called
# once, as they have no children to induce a structural change on them
checked = set()
while iterlist:
new_iterlist = []
for cDEVS in iterlist:
cDEVS.server = self
if cDEVS is None:
# Problematic
#assert warning("Root DEVS returned True in the modelTransition method; ignoring")
continue
if cDEVS in checked:
continue
checked.add(cDEVS)
if cDEVS.modelTransition(self.dsdevs_dict):
new_iterlist.append(cDEVS.parent)
# Don't update the iterlist while we are iterating over it
iterlist = new_iterlist
if self.dc_altered:
self.model.redoDirectConnection(self.dc_altered)

236
src/pypdevs/statesavers.py Normal file
View file

@ -0,0 +1,236 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Different methods to save the state, this allows for more modularity than just having a big switch statement in the main code.
Note that these classes are not subclasses of a more generic class, as this allows these classes to save data in a completely
different manner from each other. Furthermore, it avoids (slow) inheritance.
"""
from copy import deepcopy, copy
try:
import cPickle as pickle
except ImportError:
import pickle
import marshal
class DeepCopyState(object):
"""
Class to save the state using the Python 'deepcopy' library
"""
def __init__(self, time_last, time_next, state, activity, my_input, elapsed):
"""
Constructor
:param time_last: time_last to save
:param time_next: time_next to save
:param state: state to save
:param activity: the activity of the computation
:param my_input: the state input to save for memorisation
:param elapsed: the time elapsed
"""
self.time_last = time_last
self.time_next = time_next
self.activity = activity
self.state = deepcopy(state)
self.my_input = my_input
self.elapsed = elapsed
def loadState(self):
"""
Load the state from the class, this will make a copy
:returns: state - copy of the state that was saved
"""
return deepcopy(self.state)
class CopyState(object):
"""
Class to save the state using the Python 'copy' library
"""
def __init__(self, time_last, time_next, state, activity, my_input, elapsed):
"""
Constructor
:param time_last: time_last to save
:param time_next: time_next to save
:param state: state to save
:param activity: the activity of the computation
:param my_input: the state input to save for memorisation
:param elapsed: the time elapsed
"""
self.time_last = time_last
self.time_next = time_next
self.activity = activity
self.state = copy(state)
self.my_input = my_input
self.elapsed = elapsed
def loadState(self):
"""
Load the state from the class, this will make a copy
:returns: state - copy of the state that was saved
"""
return copy(self.state)
class AssignState(object):
"""
Class to save the state using a simple assignment, is unsafe for most situations...
"""
def __init__(self, time_last, time_next, state, activity, my_input, elapsed):
"""
Constructor
:param time_last: time_last to save
:param time_next: time_next to save
:param state: state to save
:param activity: the activity of the computation
:param my_input: the state input to save for memorisation
:param elapsed: the time elapsed
"""
self.time_last = time_last
self.time_next = time_next
self.activity = activity
self.state = state
self.my_input = my_input
self.elapsed = elapsed
def loadState(self):
"""
Load the state from the class, this will make a copy
:returns: state - copy of the state that was saved
"""
return self.state
class PickleZeroState(object):
"""
Class to save the state using the Python 'pickle' library, with the option to use the pickling protocol 0.
"""
def __init__(self, time_last, time_next, state, activity, my_input, elapsed):
"""
Constructor
:param time_last: time_last to save
:param time_next: time_next to save
:param state: state to save
:param activity: the activity of the computation
:param my_input: the state input to save for memorisation
:param elapsed: the time elapsed
"""
self.time_last = time_last
self.time_next = time_next
self.activity = activity
self.state = pickle.dumps(state, 0)
self.my_input = my_input
self.elapsed = elapsed
def loadState(self):
"""
Load the state from the class, this will make a copy
:returns: state - copy of the state that was saved
"""
return pickle.loads(self.state)
class PickleHighestState(object):
"""
Class to save the state using the Python 'pickle' library, with the option to use the highest available pickling protocol.
"""
def __init__(self, time_last, time_next, state, activity, my_input, elapsed):
"""
Constructor
:param time_last: time_last to save
:param time_next: time_next to save
:param state: state to save
:param activity: the activity of the computation
:param my_input: the state input to save for memorisation
:param elapsed: the time elapsed
"""
self.time_last = time_last
self.time_next = time_next
self.activity = activity
self.state = pickle.dumps(state, pickle.HIGHEST_PROTOCOL)
self.my_input = my_input
self.elapsed = elapsed
def loadState(self):
"""
Load the state from the class, this will make a copy
:returns: state - copy of the state that was saved
"""
return pickle.loads(self.state)
class CustomState(object):
"""
Class to save the state using a manually defined copy() function of the state. If no such method is provided, an error is raised.
"""
def __init__(self, time_last, time_next, state, activity, my_input, elapsed):
"""
Constructor
:param time_last: time_last to save
:param time_next: time_next to save
:param state: state to save
:param activity: the activity of the computation
:param my_input: the state input to save for memorisation
:param elapsed: the time elapsed
"""
self.time_last = time_last
self.time_next = time_next
self.activity = activity
self.state = state.copy()
self.my_input = my_input
self.elapsed = elapsed
def loadState(self):
"""
Load the state from the class, this will make a copy
:returns: state - copy of the state that was saved
"""
return self.state.copy()
class MarshalState(object):
"""
Class to save the state using the Python 'marshal' library.
"""
def __init__(self, time_last, time_next, state, activity, my_input, elapsed):
"""
Constructor
:param time_last: time_last to save
:param time_next: time_next to save
:param state: state to save
:param activity: the activity of the computation
:param my_input: the state input to save for memorisation
:param elapsed: the time elapsed
"""
self.time_last = time_last
self.time_next = time_next
self.activity = activity
self.state = marshal.dumps(state)
self.my_input = my_input
self.elapsed = elapsed
def loadState(self):
"""
Load the state from the class, this will make a copy
:returns: state - copy of the state that was saved
"""
return marshal.loads(self.state)

View file

@ -0,0 +1,15 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -0,0 +1,43 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MyAllocator(object):
"""
Allocate all models at the start of the simulation. After this, model relocation is handed over to a relocator.
"""
def allocate(self, models, edges, nrnodes, totalActivities):
"""
Calculate allocations for the nodes, using the information provided.
:param models: the models to allocte
:param edges: the edges between the models
:param nrnodes: the number of nodes to allocate over. Simply an upper bound!
:param totalActivities: activity tracking information from each model
:returns: allocation that was found
"""
# Return something of the form: {0: 0, 1: 0, 2: 0, 3: 1}
# To allocate model_ids 0, 1 and 2 to node 0 and model_id 3 to node 1
return {0: 0, 1: 0, 2: 0, 3: 1}
def getTerminationTime(self):
"""
Returns the time it takes for the allocator to make an 'educated guess' of the advised allocation.
This time will not be used exactly, but as soon as the GVT passes over it. While this is not exactly
necessary, it avoids the overhead of putting such a test in frequently used code.
:returns: float -- the time at which to perform the allocations (and save them)
"""
# No need for any run time information means 0.0
return 0.0

View file

@ -0,0 +1,55 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create a simulator with your model
model = Model()
sim = Simulator(model)
# Some of the most common options
# Enable verbose tracing
sim.setVerbose("output")
# End the simulation at simulation time 200
sim.setTerminationTime(200)
# Or use a termination condition to do the same
#def cond(model, time):
# return time >= 200
#sim.setTerminationCondition(cond)
# If you want to reinit it later
sim.setAllowLocalReinit(True)
# Finally simulate it
sim.simulate()
# Now possibly use the altered model by accessing the model attributes
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!! Only possible in local simulation, distributed simulation requires !!!
# !!! another configuration option to achieve this. !!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# You might want to rerun the simulation (for whatever reason)
# Just call the simulate method again, all configuration from before will be
# used again. Altering configuration options is possible (to some extent)
sim.simulate()
# Or if you want to alter a specific attribute
sim.setReinitState(model.generator, GeneratorState())
sim.setReinitStateAttr(model.generator, "generated", 4)
sim.setReinitAttributes(model.generator, "delay", 1)
# Now run it again
sim.simulate()

View file

@ -0,0 +1,58 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Relocator for user-provided relocation directives
"""
class MyRelocator(object):
"""
Main class
"""
def __init__(self):
"""
Initialize the relocator
"""
pass
def setController(self, controller):
"""
Sets the controller
"""
pass
def getRelocations(self, gvt, activities, horizon):
"""
Fetch the relocations that are pending for the current GVT
:param gvt: current GVT
:param activities: the activities being passed on the GVT ring
:returns: dictionary containing all relocations
"""
# Perform a relocation, for example move the model with ID 1 to node 2, and the model with ID 3 to node 0
# Remaps are allowed to happen to the current location, as they will simply be discarded by the actual relocator
relocate = {1: 2, 3: 0}
return relocate
def lastStateOnly(self):
"""
Should the sum of all activities within this horizon be used, or simply the activity from the last state?
This has no effect on performance, but defines which activities the relocator can read.
Use 'last state only' if you require an abstracted view of the activities at a single timestep (equal to the GVT).
Use 'all states' if you require all information to be merged, such as in activity tracking.
"""
# "all states"
return False

View file

@ -0,0 +1,68 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Scheduler(object):
def __init__(self, models, epsilon, totalModels):
"""
Constructor
:param models: all models in the simulation
"""
# Do your initialisation and schedule all models that are passed in the 'models' parameter
# NOTE: make a copy of these lists if you want to modify them
pass
def schedule(self, model):
"""
Schedule a new model, that was NOT present in the scheduler before
:param model: the model to schedule
"""
pass
def unschedule(self, model):
"""
Unschedule a model, so remove it from the scheduler for good
:param model: model to unschedule
"""
pass
def massReschedule(self, reschedule_set):
"""
Reschedule all models provided, all of them should already be scheduled previously and all should still be left in the scheduler after the rescheduling.
:param reschedule_set: iterable containing all models to reschedule
"""
pass
def readFirst(self):
"""
Returns the time of the first model that has to transition
:returns: timestamp of the first model
"""
pass
def getImminent(self, time):
"""
Returns an iterable of all models that transition at the provided time, with the epsilon deviation (from the constructor) allowed.
For efficiency, this method should only check the **first** elements, so trying to invoke this function with a timestamp higher
than the value provided with the *readFirst* method, will **always** return an empty iterable.
:param time: timestamp to check for models
:returns: iterable -- all models for that time
"""
pass

View file

@ -0,0 +1,93 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util import runTraceAtController
import sys
class MyTracer(object):
"""
A custom tracer
"""
def __init__(self, uid, server, myOwnArg1, myOwnArg2):
"""
Constructor
:param uid: the UID of this tracer
:param server: the server object, to make remote calls
:param myOwnArg_: custom arguments for this tracer
"""
self.server = server
self.uid = uid
# Own processing
def startTracer(self, recover):
"""
Starts up the tracer
:param recover: whether or not this is a recovery call (so whether or not the file should be appended to)
"""
pass
def stopTracer(self):
"""
Stop the tracer
"""
pass
def trace(self, time, myCustomParam1, myCustomParam2):
"""
Actual tracing function, will do something that is irreversible. If this function is called,
it is guaranteed that the trace operation will *not* be rolled back.
:param time: time at which this trace happened
:param myCustomParam_: custom parameters
"""
pass
def traceInternal(self, aDEVS):
"""
Tracing done for the internal transition function
:param aDEVS: the model that transitioned
"""
# You should only vary the 'myCustomParam_' part
runTraceAtController(self.server, self.uid, aDEVS, [myCustomParam1, myCustomParam2])
def traceConfluent(self, aDEVS):
"""
Tracing done for the confluent transition function
:param aDEVS: the model that transitioned
"""
# You should only vary the 'myCustomParam_' part
runTraceAtController(self.server, self.uid, aDEVS, [myCustomParam1, myCustomParam2])
def traceExternal(self, aDEVS):
"""
Tracing done for the external transition function
:param aDEVS: the model that transitioned
"""
# You should only vary the 'myCustomParam_' part
runTraceAtController(self.server, self.uid, aDEVS, [myCustomParam1, myCustomParam2])
def traceInit(self, aDEVS):
"""
Tracing done for the initialisation
:param aDEVS: the model that was initialised
"""
# You should only vary the 'myCustomParam_' part
runTraceAtController(self.server, self.uid, aDEVS, [myCustomParam1, myCustomParam2])

88
src/pypdevs/threadpool.py Normal file
View file

@ -0,0 +1,88 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A threadpool to process incomming messages over MPI with a fixed number of
(already running) threads.
Based on threadpool implementation found at http://stackoverflow.com/
questions/3033952/python-thread-pool-similar-to-the-multiprocessing-pool
"""
try:
import Queue as queue
except ImportError:
import queue
from threading import Thread
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
"""
Constructor
:param tasks: queue containing tasks to execute
"""
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
"""
Run the worker thread
"""
while 1:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception as e:
print(e)
finally:
self.tasks.task_done()
class ThreadPool(object):
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
"""
Constructor
:param num_threads: number of threads to start
"""
self.tasks = queue.Queue()
self.num_threads = num_threads
for _ in range(num_threads):
Worker(self.tasks)
def __setstate__(self, state):
"""
For pickling
"""
# Obj will be empty, accept it though
self.__init__(state)
def __getstate__(self):
"""
For pickling
"""
# A queue is unpicklable...
return self.num_threads
def addTask(self, func, *args, **kwargs):
"""
Add a task to the queue
:param func: function to execute
"""
self.tasks.put((func, args, kwargs))

130
src/pypdevs/tracer.py Normal file
View file

@ -0,0 +1,130 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Tracers(object):
"""
Interface for all tracers
"""
def __init__(self):
"""
Constructor
"""
self.tracers = []
self.tracers_init = []
self.uid = 0
def registerTracer(self, tracer, server, recover):
"""
Register a tracer, so that it will also receive all transitions.
:param tracer: tuple of the form (file, classname, [args])
:param server: the server object to be able to make remote calls
:param recover: whether or not this is a recovered registration (used during checkpointing)
"""
try:
exec("from pypdevs.tracers.%s import %s" % tracer[0:2])
except:
exec("from %s import %s" % tracer[0:2])
self.tracers.append(eval("%s(%i, server, *%s)" % (tracer[1],
self.uid,
tracer[2])))
self.tracers_init.append(tracer)
self.uid += 1
self.tracers[-1].startTracer(recover)
def hasTracers(self):
"""
Checks whether or not there are any registered tracers
:returns: bool
"""
return len(self.tracers) > 0
def getByID(self, uid):
"""
Gets a tracer by its UID
:param uid: the UID of the tracer to return
:returns: tracer
"""
return self.tracers[uid]
def stopTracers(self):
"""
Stop all registered tracers
"""
for tracer in self.tracers:
tracer.stopTracer()
def tracesUser(self, time, aDEVS, variable, value):
"""
Perform all tracing actions for a user imposed modification. This is NOT supported by default DEVS, so we don't require tracers to handle this either.
:param time: the time at which the modification happend; this will be the termination time of the previous simulation run and **not** the time at which the timeAdvance was recomputed!
:param aDEVS: the atomic DEVS model that was altered
:param variable: the variable that was altered (as a string)
:param value: the new value of the variable
"""
for tracer in self.tracers:
try:
tracer.traceUser(time, aDEVS, variable, value)
except AttributeError:
# Some tracers choose to ignore this event
pass
def tracesInit(self, aDEVS, t):
"""
Perform all tracing actions for an initialisation
:param aDEVS: the model that was initialised
:param t: the time at which the initialisation should be logged
"""
if aDEVS.full_name is None:
return
for tracer in self.tracers:
tracer.traceInit(aDEVS, t)
def tracesInternal(self, aDEVS):
"""
Perform all tracing actions for an internal transition
:param aDEVS: the model that transitioned
"""
if aDEVS.full_name is None:
return
for tracer in self.tracers:
tracer.traceInternal(aDEVS)
def tracesExternal(self, aDEVS):
"""
Perform all tracing actions for an external transition
:param aDEVS: the model that transitioned
"""
if aDEVS.full_name is None:
return
for tracer in self.tracers:
tracer.traceExternal(aDEVS)
def tracesConfluent(self, aDEVS):
"""
Perform all tracing actions for a confluent transition
:param aDEVS: the model that transitioned
"""
if aDEVS.full_name is None:
return
for tracer in self.tracers:
tracer.traceConfluent(aDEVS)

View file

@ -0,0 +1,15 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -0,0 +1,162 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pypdevs.util import runTraceAtController, toStr
from pypdevs.activityVisualisation import visualizeMatrix
import sys
class TracerCell(object):
"""
A tracer for Cell-DEVS style tracing output
"""
def __init__(self, uid, server, filename, x_size, y_size, multifile):
"""
Constructor
:param uid: the UID of this tracer
:param server: the server to make remote calls on
:param filename: filename to save to
:param x_size: the x size of the grid
:param y_size: the y size of the grid
:param multifile: whether or not multiple files should be generated for each timestep
"""
if server.getName() == 0:
self.filename = filename
else:
self.filename = None
self.server = server
self.uid = uid
self.x_size = x_size
self.y_size = y_size
self.multifile = multifile
self.prevtime = 0.0
def startTracer(self, recover):
"""
Starts up the tracer
:param recover: whether or not this is a recovery call (so whether or not the file should be appended to)
"""
if self.filename is None:
return
elif recover:
if not self.multifile:
self.cell_realfile = open(self.filename, 'a+')
else:
if not self.multifile:
self.cell_realfile = open(self.filename, 'w')
self.cell_count = 0
self.cells = [[None] * self.y_size for _ in range(self.x_size)]
def stopTracer(self):
"""
Stop the tracer
"""
if not self.multifile:
self.cell_realfile.flush()
def traceInit(self, aDEVS, t):
"""
The trace functionality for Cell DEVS output at initialisation
:param aDEVS: the model that was initialised
:param t: time at which it should be traced
"""
try:
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.x,
aDEVS.y,
t,
toStr(aDEVS.state.toCellState())])
except AttributeError:
pass
def traceInternal(self, aDEVS):
"""
The trace functionality for Cell DEVS output at an internal transition
:param aDEVS: the model that transitioned
"""
try:
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.x,
aDEVS.y,
aDEVS.time_last,
toStr(aDEVS.state.toCellState())])
except AttributeError:
pass
def traceExternal(self, aDEVS):
"""
The trace functionality for Cell DEVS output at an external transition
:param aDEVS: the model that transitioned
"""
try:
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.x,
aDEVS.y,
aDEVS.time_last,
toStr(aDEVS.state.toCellState())])
except AttributeError:
pass
def traceConfluent(self, aDEVS):
"""
The trace functionality for Cell DEVS output at a confluent transition
:param aDEVS: the model that transitioned
"""
try:
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.x,
aDEVS.y,
aDEVS.time_last,
toStr(aDEVS.state.toCellState())])
except AttributeError as e:
print(e)
pass
def trace(self, x, y, time, state):
"""
Save the state of the cell
:param x: the x coordinate of the model, to be used when plotting
:param y: the y coordinate of the model, to be used when plotting
:param time: the time when the model assumed this state
:param state: the actual state to print
"""
# Strip of the age for Cell DEVS
time = time[0]
if time != self.prevtime:
# Frist flush the grid
self.cell_count += 1
if self.multifile:
self.cell_realfile = open(self.filename % self.cell_count, 'w')
else:
self.cell_realfile.write("== At time %s ===\n" % (self.prevtime))
visualizeMatrix(self.cells, "%s", self.cell_realfile)
self.prevtime = time
if self.multifile:
self.cell_realfile.close()
self.cells[x][y] = state

View file

@ -0,0 +1,271 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pypdevs.util import runTraceAtController, toStr, DEVSException
from math import floor
class VCDRecord(object):
"""
A data class to keep information about VCD variables
"""
def __init__(self, identifier_nr, model_name, port_name):
"""
Constructor.
:param identifier_nr: the actual identifier
:param model_name: name of the model
:param port_name: name of the port
"""
self.model_name = model_name
self.port_name = port_name
self.identifier = identifier_nr
# BitSize cannot be given since there is no event created on this wire
# Set to None to make sure that it will be changed
self.bit_size = None
class TracerVCD(object):
"""
A tracer for VCD output. Should only be used for binary signals!
"""
def __init__(self, uid, server, filename):
"""
Constructor
:param uid: the UID of the tracer
:param server: the server to make remote requests on
:param filename: file to save the trace to
"""
if server.getName() == 0:
self.filename = filename
else:
self.filename = None
self.server = server
self.uid = uid
def startTracer(self, recover):
"""
Starts up the tracer
:param recover: whether or not this is a recovery call (so whether or not the file should be appended to)
"""
if self.filename is None:
# Nothing to do here as we aren't the controller
return
elif recover:
self.vcd_file = open(self.filename, 'a+')
else:
self.vcd_file = open(self.filename, 'w')
self.vcd_var_list = []
self.vcd_prevtime = 0.0
self.vcdHeader()
def stopTracer(self):
"""
Stop the tracer
"""
self.vcd_file.flush()
def vcdHeader(self):
"""
Create the VCD file header by doing calls to the coordinator
"""
self.vcd_file.write(("$date\n").encode())
from datetime import date
self.vcd_file.write(("\t" + date.today().isoformat() + "\n" +
"$end\n" +
"$version\n" +
"\tPyDEVS VCD export\n" +
"$end\n" +
"$comment\n" +
"\tGenerated from DEVS-code\n" +
"$end\n" +
"$timescale 1ns $end\n").encode())
variables = self.server.getProxy(0).getVCDVariables()
counter = 0
for i in variables:
model, port = i
self.vcd_var_list.append(VCDRecord(counter, model, port))
counter += 1
modelList = []
for i in range(len(self.vcd_var_list)):
if self.vcd_var_list[i].model_name not in modelList:
modelList.append(self.vcd_var_list[i].model_name)
for module in modelList:
self.vcd_file.write(("$scope %s %s $end\n" % (module, module)).encode())
for var in range(len(self.vcd_var_list)):
if self.vcd_var_list[var].model_name == module:
self.vcd_file.write("$var wire ".encode())
if self.vcd_var_list[var].bit_size is None:
self.vcd_file.write("1".encode())
else:
bitsize = str(self.vcd_var_list[var].bit_size)
self.vcd_file.write(bitsize.encode())
self.vcd_file.write((" %s %s $end\n"
% (self.vcd_var_list[var].identifier,
self.vcd_var_list[var].port_name)).encode())
self.vcd_file.write(("$upscope $end\n").encode())
self.vcd_file.write(("$enddefinitions $end\n").encode())
self.vcd_file.write(("$dumpvars \n").encode())
for var in range(len(self.vcd_var_list)):
self.vcd_file.write(("b").encode())
if self.vcd_var_list[var].bit_size is None:
# The wire is a constant error signal, so the wire is never used
# Assume 1 bit long
self.vcd_file.write(("z").encode())
else:
for i in range(self.vcd_var_list[var].bit_size):
self.vcd_file.write(("z").encode())
self.vcd_file.write((" %s\n" % self.vcd_var_list[var].identifier).encode())
self.vcd_file.write(("$end\n").encode())
def trace(self, model_name, time, port_name, vcd_state):
"""
Trace a VCD entry
:param model_name: name of the model
:param time: time at which transition happened
:param port_name: name of the port
:param vcd_state: state to trace on the specified port
"""
# Check if the signal is a valid binary signal
for i in range(len(vcd_state)):
if (i == 0):
if vcd_state[i] == 'b':
continue
else:
raise DEVSException(("Port %s in model does not carry " +
"a binary signal\n" +
"VCD exports require a binary signal," +
"not: ") % (port_name, model_name, vcd_state))
char = vcd_state[i]
if char not in ["0", "1", "E", "x"]:
raise DEVSException(("Port %s in model does not carry " +
"a binary signal\n" +
"VCD exports require a binary signal," +
"not: ") % (port_name, model_name, vcd_state))
# Find the identifier of this wire
for i in range(len(self.vcd_var_list)):
if (self.vcd_var_list[i].model_name == model_name and
self.vcd_var_list[i].port_name == port_name):
identifier = str(self.vcd_var_list[i].identifier)
break
# If the bit_size is not yet defined, define it now
if self.vcd_var_list[i].bit_size is None:
self.vcd_var_list[i].bit_size = len(vcd_state)-1
elif self.vcd_var_list[i].bit_size != len(vcd_state) - 1:
raise DEVSException("Wire has changing bitsize!\n" +
"You are probably not using bit encoding!")
# Now we have to convert between logisim and VCD notation
vcd_state = vcd_state.replace('x', 'z')
vcd_state = vcd_state.replace('E', 'x')
# identifier will be defined, otherwise the record was not in the list
if time > self.vcd_prevtime:
# Convert float to integer without losing precision
# ex. 5.0 --> 50, 5.5 --> 55
t = time[0]
vcd_time = int(str(int(floor(t))) +
str(int(t - floor(t)) * (len(str(t)) - 2)))
if (self.vcd_prevtime != vcd_time):
# The time has passed, so add a new VCD header
self.vcd_file.write(("#" + str(vcd_time) + "\n").encode())
self.vcd_prevtime = vcd_time
self.vcd_file.write((vcd_state + " " + identifier + "\n").encode())
def traceConfluent(self, aDEVS):
"""
The trace functionality for VCD output at a confluent transition
:param aDEVS: the model that transitioned
"""
name = toStr(aDEVS.getModelFullName())
for I in range(len(aDEVS.IPorts)):
port_name = aDEVS.IPorts[I].getPortName()
signal_bag = aDEVS.my_input.get(aDEVS.IPorts[I], [])
if signal_bag is not None:
for port_signal in signal_bag:
runTraceAtController(self.server,
self.uid,
aDEVS,
[name,
aDEVS.time_last,
toStr(port_name),
toStr(port_signal)])
for I in range(len(aDEVS.OPorts) ):
if aDEVS.OPorts[I] in aDEVS.my_output:
port_name = aDEVS.OPorts[I].getPortName()
signal_bag = aDEVS.my_output.get(aDEVS.OPorts[I], [])
if signal_bag is not None:
for port_signal in signal_bag:
runTraceAtController(self.server,
self.uid,
aDEVS,
[name,
aDEVS.time_last,
toStr(port_name),
toStr(port_signal)])
def traceInternal(self, aDEVS):
"""
The trace functionality for VCD output at an internal transition
:param aDEVS: the model that transitioned
"""
name = toStr(aDEVS.getModelFullName())
for I in range(0, len(aDEVS.OPorts) ):
if aDEVS.OPorts[I] in aDEVS.my_output:
port_name = aDEVS.OPorts[I].getPortName()
signal_bag = aDEVS.my_output.get(aDEVS.OPorts[I], [])
if signal_bag is not None:
for port_signal in signal_bag:
runTraceAtController(self.server,
self.uid,
aDEVS,
[name,
aDEVS.time_last,
toStr(port_name),
toStr(port_signal)])
def traceExternal(self, aDEVS):
"""
The trace functionality for VCD output at an external transition
:param aDEVS: the model that transitioned
"""
name = toStr(aDEVS.getModelFullName())
for I in range(len(aDEVS.IPorts)):
port_name = aDEVS.IPorts[I].getPortName()
signal_bag = aDEVS.my_input.get(aDEVS.IPorts[I], [])
if signal_bag is not None:
for port_signal in signal_bag:
runTraceAtController(self.server,
self.uid,
aDEVS,
[name,
aDEVS.time_last,
toStr(port_name),
toStr(port_signal)])
def traceInit(self, aDEVS, t):
"""
The trace functionality for VCD output at initialisation
:param aDEVS: the model that was initialized
:param t: time at which it should be traced
"""
pass

View file

@ -0,0 +1,169 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pypdevs.util import runTraceAtController
import sys
class TracerVerbose(object):
"""
A tracer for simple verbose output
"""
def __init__(self, uid, server, filename):
"""
Constructor
:param uid: the UID of this tracer
:param server: the server to make remote calls on
:param filename: file to save the trace to, can be None for output to stdout
"""
if server.getName() == 0:
self.filename = filename
else:
self.filename = None
self.server = server
self.prevtime = (-1, -1)
self.uid = uid
def startTracer(self, recover):
"""
Starts up the tracer
:param recover: whether or not this is a recovery call (so whether or not the file should be appended to)
"""
if self.filename is None:
self.verb_file = sys.stdout
elif recover:
self.verb_file = open(self.filename, 'a+')
else:
self.verb_file = open(self.filename, 'w')
def stopTracer(self):
"""
Stop the tracer
"""
self.verb_file.flush()
def trace(self, time, text):
"""
Actual tracing function
:param time: time at which this trace happened
:param text: the text that was traced
"""
string = ""
if time > self.prevtime:
string = ("\n__ Current Time: %10.2f " + "_"*42 + " \n\n") % (time[0])
self.prevtime = time
string += "%s\n" % text
try:
self.verb_file.write(string)
except TypeError:
self.verb_file.write(string.encode())
def traceInternal(self, aDEVS):
"""
Tracing done for the internal transition function
:param aDEVS: the model that transitioned
"""
text = "\n"
text += "\tINTERNAL TRANSITION in model <%s>\n" % aDEVS.getModelFullName()
text += "\t\tNew State: %s\n" % str(aDEVS.state)
text += "\t\tOutput Port Configuration:\n"
for I in range(len(aDEVS.OPorts)):
text += "\t\t\tport <" + str(aDEVS.OPorts[I].getPortName()) + ">:\n"
for msg in aDEVS.my_output.get(aDEVS.OPorts[I], []):
text += "\t\t\t\t" + str(msg) + "\n"
# Don't show the age
text += "\t\tNext scheduled internal transition at time %.2f\n" \
% (aDEVS.time_next[0])
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.time_last, '"' + text + '"'])
def traceConfluent(self, aDEVS):
"""
Tracing done for the confluent transition function
:param aDEVS: the model that transitioned
"""
text = "\n"
text += "\tCONFLUENT TRANSITION in model <%s>\n" % aDEVS.getModelFullName()
text += "\t\tInput Port Configuration:\n"
for I in range(len(aDEVS.IPorts)):
text += "\t\t\tport <" + str(aDEVS.IPorts[I].getPortName()) + ">: \n"
for msg in aDEVS.my_input.get(aDEVS.IPorts[I], []):
text += "\t\t\t\t" + str(msg) + "\n"
text += "\t\tNew State: %s\n" % str(aDEVS.state)
text += "\t\tOutput Port Configuration:\n"
for I in range(len(aDEVS.OPorts)):
text += "\t\t\tport <" + str(aDEVS.OPorts[I].getPortName()) + ">:\n"
for msg in aDEVS.my_output.get(aDEVS.OPorts[I], []):
text += "\t\t\t\t" + str(msg) + "\n"
# Don't show the age
text += "\t\tNext scheduled internal transition at time %.2f\n" \
% (aDEVS.time_next[0])
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.time_last, '"' + text + '"'])
def traceExternal(self, aDEVS):
"""
Tracing done for the external transition function
:param aDEVS: the model that transitioned
"""
text = "\n"
text += "\tEXTERNAL TRANSITION in model <%s>\n" % aDEVS.getModelFullName()
text += "\t\tInput Port Configuration:\n"
for I in range(len(aDEVS.IPorts)):
text += "\t\t\tport <" + str(aDEVS.IPorts[I].getPortName()) + ">:\n"
for msg in aDEVS.my_input.get(aDEVS.IPorts[I], []):
text += "\t\t\t\t" + str(msg) + "\n"
text += "\t\tNew State: %s\n" % str(aDEVS.state)
# Don't show the age
text += "\t\tNext scheduled internal transition at time %.2f\n" \
% (aDEVS.time_next[0])
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.time_last, '"' + text + '"'])
def traceInit(self, aDEVS, t):
"""
Tracing done for the initialisation
:param aDEVS: the model that was initialised
:param t: time at which it should be traced
"""
text = "\n"
text += "\tINITIAL CONDITIONS in model <%s>\n" % aDEVS.getModelFullName()
text += "\t\tInitial State: %s\n" % str(aDEVS.state)
# Don't show the age
text += "\t\tNext scheduled internal transition at time %.2f\n" \
% (aDEVS.time_next[0])
runTraceAtController(self.server,
self.uid,
aDEVS,
[t, '"' + text + '"'])
def traceUser(self, time, aDEVS, variable, value):
text = "\n"
text += "\tUSER CHANGE in model <%s>\n" % aDEVS.getModelFullName()
text += "\t\tAltered attribute <%s> to value <%s>\n" % (variable, value)
# Is only called at the controller, outside of the GVT loop, so commit directly
self.trace(time, text)

View file

@ -0,0 +1,174 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pypdevs.util import runTraceAtController, toStr
import sys
class TracerXML(object):
"""
A tracer for XML tracing output
"""
def __init__(self, uid, server, filename):
"""
Constructor
:param uid: the UID of this tracer
:param server: the server to make remote calls on
:param filename: file to save the trace to
"""
if server.getName() == 0:
self.filename = filename
else:
self.filename = None
self.server = server
self.uid = uid
def startTracer(self, recover):
"""
Starts up the tracer
:param recover: whether or not this is a recovery call (so whether or not the file should be appended to)
"""
if self.filename is None:
# Nothing to do here as we aren't the controller
return
elif recover:
self.xml_file = open(self.filename, 'a+')
else:
self.xml_file = open(self.filename, 'w')
self.xml_file.write(("<?xml version=\"1.0\"?>\n" + "<trace>\n").encode())
def stopTracer(self):
"""
Stop the tracer
"""
self.xml_file.write("</trace>")
self.xml_file.flush()
def trace(self, model_name, timestamp, event_kind, port_info, xml_state, str_state):
"""
Save an XML entry for the provided parameters, basically wraps it in the necessary tags
:param model_name: name of the model
:param timestamp: timestamp of the transition
:param event_kind: kind of event that happened, e.g. internal, external, ...
:param port_info: actual information about the port
:param xml_state: XML representation of the state
:param str_state: normal string representation of the state
"""
self.xml_file.write(("<event>\n"
+ "<model>" + model_name + "</model>\n"
+ "<time>" + str(timestamp[0]) + "</time>\n"
+ "<kind>" + event_kind + "</kind>\n"
+ port_info
+ "<state>\n"+ xml_state + "<![CDATA[" + str_state + "]]>\n</state>\n"
+ "</event>\n").encode())
def traceInternal(self, aDEVS):
"""
The trace functionality for XML output at an internal transition
:param aDEVS: the model that transitioned
"""
port_info = ""
for I in range(len(aDEVS.OPorts)):
if (aDEVS.OPorts[I] in aDEVS.my_output and
aDEVS.my_output[aDEVS.OPorts[I]] is not None):
port_info += '<port name="' + aDEVS.OPorts[I].getPortName() + '" category="O">\n'
for j in aDEVS.my_output[aDEVS.OPorts[I]]:
port_info += "<message>" + str(j) + "</message>\n</port>\n"
runTraceAtController(self.server,
self.uid,
aDEVS,
[toStr(aDEVS.getModelFullName()),
aDEVS.time_last,
"'IN'",
toStr(port_info),
toStr(aDEVS.state.toXML()),
toStr(aDEVS.state)])
def traceExternal(self, aDEVS):
"""
The trace functionality for XML output at an external transition
:param aDEVS: the model that transitioned
"""
port_info = ""
for I in range(len(aDEVS.IPorts)):
port_info += '<port name="' + aDEVS.IPorts[I].getPortName() + '" category="I">\n'
for j in aDEVS.my_input[aDEVS.IPorts[I]]:
port_info += "<message>" + str(j) + "</message>\n</port>\n"
runTraceAtController(self.server,
self.uid,
aDEVS,
[toStr(aDEVS.getModelFullName()),
aDEVS.time_last,
"'EX'",
toStr(port_info),
toStr(aDEVS.state.toXML()),
toStr(aDEVS.state)])
def traceConfluent(self, aDEVS):
"""
The trace functionality for XML output at a confluent transition
:param aDEVS: the model that transitioned
"""
port_info = ""
for I in range(len(aDEVS.IPorts)):
port_info += '<port name="' + aDEVS.IPorts[I].getPortName() + '" category="I">\n'
for j in aDEVS.my_input[aDEVS.IPorts[I]]:
port_info += "<message>" + str(j) + "</message>\n</port>\n"
runTraceAtController(self.server,
self.uid,
aDEVS,
[toStr(aDEVS.getModelFullName()),
aDEVS.time_last,
"'EX'",
toStr(port_info),
toStr(aDEVS.state.toXML()),
toStr(aDEVS.state)])
port_info = ""
for I in range(len(aDEVS.OPorts)):
if aDEVS.OPorts[I] in aDEVS.my_output:
port_info += '<port name="' + aDEVS.OPorts[I].getPortName() + '" category="O">\n'
for j in aDEVS.my_output[aDEVS.OPorts[I]]:
port_info += "<message>" + str(j) + "</message>\n</port>\n"
runTraceAtController(self.server,
self.uid,
aDEVS,
[toStr(aDEVS.getModelFullName()),
aDEVS.time_last,
"'IN'",
toStr(port_info),
toStr(aDEVS.state.toXML()),
toStr(aDEVS.state)])
def traceInit(self, aDEVS, t):
"""
The trace functionality for XML output at initialization
:param aDEVS: the model that transitioned
:param t: time at which it should be traced
"""
runTraceAtController(self.server,
self.uid,
aDEVS,
[toStr(aDEVS.getModelFullName()),
t,
"'EX'",
"''",
toStr(aDEVS.state.toXML()),
toStr(aDEVS.state)])

193
src/pypdevs/util.py Normal file
View file

@ -0,0 +1,193 @@
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common utility functions used in PyPDEVS
"""
import pypdevs.middleware as middleware
from pypdevs.MPIRedirect import MPIRedirect
from collections import defaultdict
EPSILON = 1E-6
try:
import cPickle as pickle
except ImportError:
import pickle
def broadcastModel(data, proxies, allow_reinit, scheduler_locations):
"""
Broadcast the model to simulate to the provided proxies
:param data: data to be broadcasted to everywhere
:param proxies: iterable containing all proxies
:param allowReinit: should reinitialisation be allowed
"""
if (len(proxies) == 1) and not allow_reinit:
# Shortcut for local simulation with the garantee that no reinits will happen
proxies[0].sendModel(data, scheduler_locations[0])
return
# Otherwise, we always have to pickle
pickled_data = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)
if len(proxies) == 1:
proxies[0].saveAndProcessModel(pickled_data, scheduler_locations[0])
else:
for i, proxy in enumerate(proxies[1:]):
# Prepare by setting up the broadcast receiving
proxy.prepare(scheduler_locations[i+1])
# Pickle the data ourselves, to avoid an MPI error when this goes wrong (as we can likely back-up from this error)
# Broadcast the model to everywhere
middleware.COMM_WORLD.bcast(pickled_data, root=0)
# Immediately wait for a barrier, this will be OK as soon as all models have initted their model
# Still send to ourselves, as we don't receive it from the broadcast
# Local calls, so no real overhead
proxies[0].sendModel(data, scheduler_locations[0])
proxies[0].setPickledData(pickled_data)
middleware.COMM_WORLD.barrier()
def broadcastCancel():
"""
Cancel the broadcast receiving in a nice way, to prevent MPI errors
"""
middleware.COMM_WORLD.bcast(None, root=0)
def toStr(inp):
"""
Return a string representation of the input, enclosed with ' characters
:param inp: the input value
:returns: string -- input value, enclosed by ' characters
"""
return "'%s'" % inp
def addDict(destination, source):
"""
Adds 2 dicts together in the first dictionary
:param destination: the destination dictionary to merge the source into
:param source: the dictionary to merge in
.. note:: the *destination* parameter will be modified and no return value is provided. The *source* parameter is not modified.
"""
for i in source:
destination[i] = destination.get(i, 0) + source[i]
def allZeroDict(source):
"""
Checks whether or not a dictionary contains only 0 items
:param source: a dictionary to test
:returns: bool -- whether or not all entries in the dictionary are equal to zero
"""
for i in source.values():
if i != 0:
return False
return True
def runTraceAtController(server, uid, model, args):
"""
Run a trace command on our version that is running at the constroller
:param server: the server to ask the proxy from
:param uid: the UID of the tracer (identical throughout the simulation)
:param model: the model that transitions
:param args: the arguments for the trace function
"""
to_run = easyCommand("self.tracers.getByID(%i).trace" % uid,
args).replace("\n", "\\n")
if server.getName() == 0:
server.getProxy(0).delayedAction(model.time_last, model.model_id, to_run)
else:
server.queueMessage(model.time_last, model.model_id, to_run)
def easyCommand(function, args):
"""
Easy wrapper to create a string representation of function calls
:param function: the function should be called
:param args: list of all the arguments for the function
:returns: str -- string representation to be passed to *exec*
"""
text = str(function) + "("
for i in range(len(args)):
if i != 0:
text += ", "
text += str(args[i])
text += ")"
return text
class DEVSException(Exception):
"""
DEVS specific exceptions
"""
def __init__(self, message="not specified in source"):
"""
Constructor
:param message: error message to print
"""
Exception.__init__(self, message)
def __str__(self):
"""
String representation of the exception
"""
return "DEVS Exception: " + str(self.message)
class QuickStopException(Exception):
"""
An exception specifically to stop the simulation and perform a relocation ASAP
"""
def __init__(self):
Exception.__init__(self, "(none)")
def __str__(self):
"""
Should be unused
"""
return "Quick Stop Exception"
def saveLocations(filename, model_locations, model_ids):
"""
Save an allocation specified by the parameter.
:param filename: filename to save the allocation to
:param modellocations: allocation to save to file
:param model_ids: all model_ids to model mappings
"""
# Save the locations
f = open(filename, 'w')
for model_id in model_locations:
# Format:
# model_id location fullname
f.write("%s %s %s\n" % (model_id,
model_locations[model_id],
model_ids[model_id].getModelFullName()))
f.close()
def constructGraph(models):
"""
Construct a graph from the model, containing the weight (= number of messages) on a connection
between two components.
:param models: the root model to use for graph construction
:returns: dict -- all from-to edges with their number of events
"""
edges = defaultdict(lambda: defaultdict(int))
for model in models.component_set:
for outport in model.OPorts:
for inport in outport.outline:
edges[outport.host_DEVS][inport.host_DEVS] += outport.msg_count
return edges

10
src/setup.py Normal file
View file

@ -0,0 +1,10 @@
from distutils.core import setup
setup(name="PyPDEVS",
version="2.4.0",
description="Python Parallel DEVS simulator",
author="Yentl Van Tendeloo",
author_email="Yentl.VanTendeloo@uantwerpen.be",
url="http://msdl.cs.mcgill.ca/people/yentl",
packages=['pypdevs', 'pypdevs.allocators', 'pypdevs.realtime', 'pypdevs.relocators', 'pypdevs.schedulers', 'pypdevs.templates', 'pypdevs.tracers']
)