Commit b4049b58 authored by panos's avatar panos Committed by Georgios Dagkakis

Add the Assembly line example

parent c43bfe1f
'''
Created on 23 Sep 2014
@author: Panos
'''
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
#================= Main script of KE tool =====================================#
from __future__ import division
from StatisticalMeasures import BasicStatisticalMeasures
from DistributionFitting import Distributions
from DistributionFitting import DistFittest
from ReplaceMissingValues import HandleMissingValues
from ImportExceldata import Import_Excel
from DetectOutliers import HandleOutliers
from JSON_Output import JSON_example
from WIP_Identifier import currentWIP
import xlrd
from dateutil.parser import *
import datetime
from time import mktime
#Read from the given directory the Excel document with the input data
workbook = xlrd.open_workbook('prod_data.xls')
worksheets = workbook.sheet_names()
main= workbook.sheet_by_name('Export Worksheet')
worksheet_ProdData = worksheets[0] #Define the worksheet with the production data
A=Import_Excel() #Call the Python object Import_Excel
ProdData= A.Input_data(worksheet_ProdData, workbook) #Create the Production data dictionary with keys the labels of Excel's different columns
##Get from the ProdData dictionary the different keys and define the following lists
contIds = ProdData.get('CONTAINERNAME',[])
prodName = ProdData.get('PRODUCTNAME',[])
prodDesc= ProdData.get('PRODUCTDESCRIPTION',[])
taskdate= ProdData.get('TASKDATE',[])
taskName= ProdData.get('TASKTYPENAME',[])
statName= ProdData.get('STATIONNAME',[])
contQuant= ProdData.get('CONTAINERQTYATTXN',[])
# columns that are used (static)
CONTAINERNAME=0
PRODUCTNAME=1
PRODUCTDESCRIPTION=2
TASKDATE=3
STATIONNAME=5
# method that returns the processStory dictionary, which contains the production steps of the container ids
def contProcessStory(contId):
processStory[contId]={}
for sheet in workbook.sheets():
if worksheet_ProdData:
for i in range(1,main.nrows):
if sheet.cell(i,0).value==contId:
stationName=sheet.cell(i,5).value
processStory[contId][stationName]=[]
for i in range(1,main.nrows):
if sheet.cell(i,0).value==contId:
taskType=sheet.cell(i,4).value
stationName=sheet.cell(i,5).value
time=parse(sheet.cell(i,3).value)
contQuant=sheet.cell(i,6).value
if taskType=='Start Station' or taskType=='Finish Station':
processStory[contId][stationName].append([time, contQuant])
# method that returns the timestamps from the Excel document (real MES data) in a form that can be handled by the KE tool
def days_hours_minutes(td):
return td.days, td.seconds//3600, (td.seconds//60)%60
# Creation of two dictionaries (processStory,contDetails ) and one list (contIds)
processStory={}
contDetails={}
contIds=[]
for sheet in workbook.sheets():
if worksheet_ProdData:
for i in range(1,main.nrows):
Id=main.cell(i,CONTAINERNAME).value
if not Id in contIds:
contIds.append(Id)
contDetails[Id]=[]
prodId=main.cell(i,PRODUCTNAME).value
prodDescName=main.cell(i,PRODUCTDESCRIPTION).value
time=main.cell(i,TASKDATE).value
statName=main.cell(i,STATIONNAME).value
contDetails[Id].append(prodId)
contDetails[Id].append(prodDescName)
contDetails[Id].append(statName)
for elem in contIds:
contProcessStory(elem)
#Creation and initialization of dictionaries, one for each station with keys the static 'ProcTime' and 'ScrapQuant'
MA={}
MA['ProcTime']=[]
MA['ScrapQuant']=[]
M1A={}
M1A['ProcTime']=[]
M1A['ScrapQuant']=[]
M1B={}
M1B['ProcTime']=[]
M1B['ScrapQuant']=[]
M2A={}
M2A['ProcTime']=[]
M2A['ScrapQuant']=[]
M2B={}
M2B['ProcTime']=[]
M2B['ScrapQuant']=[]
M3A={}
M3A['ProcTime']=[]
M3A['ScrapQuant']=[]
M3B={}
M3B['ScrapQuant']=[]
M3B['ProcTime']=[]
MM={}
MM['ProcTime']=[]
MM['ScrapQuant']=[]
PrA={}
PrA['ProcTime']=[]
PrA['ScrapQuant']=[]
PrB={}
PrB['ProcTime']=[]
PrB['ScrapQuant']=[]
PaA={}
PaA['ProcTime']=[]
PaA['ScrapQuant']=[]
PaB={}
PaB['ProcTime']=[]
PaB['ScrapQuant']=[]
#Define the number of units for each batch
batchSize= 80
#With the following loop statement, the lists inside the dictionaries with the processing times and the scrap quantity for each station are created
for key in processStory.keys():
for elem in processStory[key].keys():
if elem=='MA':
try:
t1= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr1=processStory[key][elem][0][1]-processStory[key][elem][1][1]
MA['ProcTime'].append(t1)
if scr1:
MA['ScrapQuant'].append(scr1)
except IndexError:
continue
elif elem=='M1A':
try:
t2= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr2=processStory[key][elem][0][1]-processStory[key][elem][1][1]
M1A['ProcTime'].append(t2)
if scr2:
M1A['ScrapQuant'].append(scr2)
except IndexError:
continue
elif elem=='M1B':
try:
t3= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr3=processStory[key][elem][0][1]-processStory[key][elem][1][1]
M1B['ProcTime'].append(t3)
if scr3:
M1B['ScrapQuant'].append(scr3)
except IndexError:
continue
elif elem=='M2A':
try:
t4= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr4=processStory[key][elem][0][1]-processStory[key][elem][1][1]
M2A['ProcTime'].append(t4)
if scr4:
M2A['ScrapQuant'].append(scr4)
except IndexError:
continue
elif elem=='M2B':
try:
t5= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr5=processStory[key][elem][0][1]-processStory[key][elem][1][1]
M2B['ProcTime'].append(t5)
if scr5:
M2B['ScrapQuant'].append(scr5)
except IndexError:
continue
elif elem=='M3A':
try:
t6= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr6=processStory[key][elem][0][1]-processStory[key][elem][1][1]
M3A['ProcTime'].append(t6)
if scr6:
M3A['ScrapQuant'].append(scr6)
except IndexError:
continue
elif elem=='M3B':
try:
t7= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr7=processStory[key][elem][0][1]-processStory[key][elem][1][1]
M3B['ProcTime'].append(t7)
if scr7:
M3B['ScrapQuant'].append(scr7)
except IndexError:
continue
elif elem=='MM':
try:
t8= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr8=processStory[key][elem][0][1]-processStory[key][elem][1][1]
MM['ProcTime'].append(t8)
if scr8:
MM['ScrapQuant'].append(scr8)
except IndexError:
continue
elif elem=='PrA':
try:
t10= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr10=processStory[key][elem][0][1]-processStory[key][elem][1][1]
PrA['ProcTime'].append(t10)
if scr10:
PrA['ScrapQuant'].append(scr10)
except IndexError:
continue
elif elem=='PrB':
try:
t11= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr11=processStory[key][elem][0][1]-processStory[key][elem][1][1]
PrB['ProcTime'].append(t11)
if scr11:
PrB['ScrapQuant'].append(scr11)
except IndexError:
continue
elif elem=='PaA':
try:
t12= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr12=processStory[key][elem][0][1]-processStory[key][elem][1][1]
PaA['ProcTime'].append(t12)
if scr12:
PaA['ScrapQuant'].append(scr12)
except IndexError:
continue
elif elem=='PaB':
try:
t13= (((mktime(processStory[key][elem][1][0].timetuple()) - mktime(processStory[key][elem][0][0].timetuple())) / batchSize) / 60)
scr13=processStory[key][elem][0][1]-processStory[key][elem][1][1]
PaB['ProcTime'].append(t13)
if scr13:
PaB['ScrapQuant'].append(scr13)
except IndexError:
continue
#Call the HandleMissingValues object and delete the missing values in the lists with the scrap quantity and processing times data
B= HandleMissingValues()
MA_Scrap= B.DeleteMissingValue(MA.get('ScrapQuant',[]))
MA_Proc= B.DeleteMissingValue(MA.get('ProcTime',[]))
M1A_Scrap= B.DeleteMissingValue(M1A.get('ScrapQuant',[]))
M1A_Proc= B.DeleteMissingValue(M1A.get('ProcTime',[]))
M1B_Scrap= B.DeleteMissingValue(M1B.get('ScrapQuant',[]))
M1B_Proc= B.DeleteMissingValue(M1B.get('ProcTime',[]))
M2A_Scrap= B.DeleteMissingValue(M2A.get('ScrapQuant',[]))
M2A_Proc= B.DeleteMissingValue(M2A.get('ProcTime',[]))
M2B_Scrap= B.DeleteMissingValue(M2B.get('ScrapQuant',[]))
M2B_Proc= B.DeleteMissingValue(M2B.get('ProcTime',[]))
M3A_Scrap= B.DeleteMissingValue(M3A.get('ScrapQuant',[]))
M3A_Proc= B.DeleteMissingValue(M3A.get('ProcTime',[]))
M3B_Scrap= B.DeleteMissingValue(M3B.get('ScrapQuant',[]))
M3B_Proc= B.DeleteMissingValue(M3B.get('ProcTime',[]))
MM_Scrap= B.DeleteMissingValue(MM.get('ScrapQuant',[]))
MM_Proc= B.DeleteMissingValue(MM.get('ProcTime',[]))
PrA_Scrap= B.DeleteMissingValue(PrA.get('ScrapQuant',[]))
PrA_Proc= B.DeleteMissingValue(PrA.get('ProcTime',[]))
PrB_Scrap= B.DeleteMissingValue(PrB.get('ScrapQuant',[]))
PrB_Proc= B.DeleteMissingValue(PrB.get('ProcTime',[]))
PaA_Scrap= B.DeleteMissingValue(PaA.get('ScrapQuant',[]))
PaA_Proc= B.DeleteMissingValue(PaA.get('ProcTime',[]))
PaB_Scrap= B.DeleteMissingValue(PaB.get('ScrapQuant',[]))
PaB_Proc= B.DeleteMissingValue(PaB.get('ProcTime',[]))
#Call the HandleOutliers object and delete the outliers in the lists with the scrap quantity and processing times data of each station
C= HandleOutliers()
MA_Scrap= C.DeleteOutliers(MA_Scrap)
MA_Proc= C.DeleteOutliers(MA_Proc)
M1A_Scrap= C.DeleteOutliers(M1A_Scrap)
M1A_Proc= C.DeleteOutliers(M1A_Proc)
M1B_Scrap= C.DeleteOutliers(M1B_Scrap)
M1B_Proc= C.DeleteOutliers(M1B_Proc)
M2A_Scrap= C.DeleteOutliers(M2A_Scrap)
M2A_Proc= C.DeleteOutliers(M2A_Proc)
M2B_Scrap= C.DeleteOutliers(M2B_Scrap)
M2B_Proc= C.DeleteOutliers(M2B_Proc)
M3A_Scrap= C.DeleteOutliers(M3A_Scrap)
M3A_Proc= C.DeleteOutliers(M3A_Proc)
M3B_Scrap= C.DeleteOutliers(M3B_Scrap)
M3B_Proc= C.DeleteOutliers(M3B_Proc)
MM_Scrap= C.DeleteOutliers(MM_Scrap)
MM_Proc= C.DeleteOutliers(MM_Proc)
PrA_Scrap= C.DeleteOutliers(PrA_Scrap)
PrA_Proc= C.DeleteOutliers(PrA_Proc)
PrB_Scrap= C.DeleteOutliers(PrB_Scrap)
PrB_Proc= C.DeleteOutliers(PrB_Proc)
PaA_Scrap= C.DeleteOutliers(PaA_Scrap)
PaA_Proc= C.DeleteOutliers(PaA_Proc)
PaB_Scrap= C.DeleteOutliers(PaB_Scrap)
PaB_Proc= C.DeleteOutliers(PaB_Proc)
#Call the BasicStatisticalMeasures object and calculate the mean value of the processing times for each station
E= BasicStatisticalMeasures()
meanMA_Proc= E.mean(MA_Proc)
meanM1A_Proc= E.mean(M1A_Proc)
meanM2A_Proc= E.mean(M2A_Proc)
meanM3A_Proc= E.mean(M3A_Proc)
meanM1B_Proc= E.mean(M1B_Proc)
meanM2B_Proc= E.mean(M2B_Proc)
meanM3B_Proc= E.mean(M3B_Proc)
meanMM_Proc= E.mean(MM_Proc)
meanPrA_Proc= E.mean(PrA_Proc)
meanPrB_Proc= E.mean(PrB_Proc)
meanPaA_Proc= E.mean(PaA_Proc)
meanPaB_Proc= E.mean(PaB_Proc)
stopTime= datetime.datetime(2014,3,27,8,40,00) #Give the stop time, based on this the WIP levels in the assembly line are identified calling the WIP method
WIP=currentWIP(processStory, stopTime) #Call the currentWIP method, giving as attributes the processStory dict and the stopTime
#With the loop statement in the outcome of the currentWIP method, which is a dictionary with the name WIP, with a series of calculations the units to be processed are calculated by the WIP batches in the stations
for key in WIP.keys():
try:
if WIP[key][0]=='MA':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanMA_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='M1A':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanM1A_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='M2A':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanM2A_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='M3A':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanM3A_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='M1B':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanM1B_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='M2B':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanM2B_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='M3B':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanM3B_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='MM':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanMM_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='PrA':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanPrA_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='PrB':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanPrB_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='PaA':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanPaA_Proc))
WIP[key].append(unitsToProcess)
elif WIP[key][0]=='PaB':
secs = WIP[key][1].total_seconds()
minutes= int(secs / 60)
unitsToProcess= round(batchSize - (minutes / meanPaB_Proc))
WIP[key].append(unitsToProcess)
except IndexError:
continue
# Call the DistFittest object and conduct Kolmogorov-Smirnov distribution fitting test in the processing times lists of each station
D=DistFittest()
dictProc={} #Create a dictionary that holds the statistical distributions of the processing times of each station
dictProc['MA']= D.ks_test(MA_Proc)
dictProc['M1A']= D.ks_test(M1A_Proc)
dictProc['M1B']= D.ks_test(M1B_Proc)
dictProc['M2A']= D.ks_test(M2A_Proc)
dictProc['M2B']= D.ks_test(M2B_Proc)
dictProc['M3A']= D.ks_test(M3A_Proc)
dictProc['M3B']= D.ks_test(M3B_Proc)
dictProc['MM']= D.ks_test(MM_Proc)
dictProc['PrA']= D.ks_test(PrA_Proc)
dictProc['PrB']= D.ks_test(PrB_Proc)
dictProc['PaA']= D.ks_test(PaA_Proc)
dictProc['PaB']= D.ks_test(PaB_Proc)
#Call the Distributions object and fit (using the Maximum Likelihood Estimation) the lists with the scrap quantity into a discrete statistical distribution, i.e. Geometric distribution
D=Distributions()
dictScrap={} #Create a dictionary that holds the Geometric, which is a discrete statistical distribution of the processing times of each station
dictScrap['MA']= D.Geometric_distrfit(MA_Scrap)
dictScrap['M1A']= D.Geometric_distrfit(M1A_Scrap)
dictScrap['M1B']= D.Geometric_distrfit(M1B_Scrap)
dictScrap['M2A']= D.Geometric_distrfit(M2A_Scrap)
dictScrap['M2B']= D.Geometric_distrfit(M2B_Scrap)
dictScrap['M3A']= D.Geometric_distrfit(M3A_Scrap)
dictScrap['M3B']= D.Geometric_distrfit(M3B_Scrap)
dictScrap['MM']= D.Geometric_distrfit(MM_Scrap)
dictScrap['PrA']= D.Geometric_distrfit(PrA_Scrap)
dictScrap['PrB']= D.Geometric_distrfit(PrB_Scrap)
dictScrap['PaA']= D.Geometric_distrfit(PaA_Scrap)
dictScrap['PaB']= D.Geometric_distrfit(PaB_Scrap)
#Call the JSON_example method giving as attributes the dictionaries with the processing times distributions and the scrap quantities distributions and the WIP levels in the assembly line
JSON_example(dictProc,dictScrap,WIP)
\ No newline at end of file
'''
Created on 9 Oct 2014
@author: Panos
'''
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
import json
#Create a method that receives the three dictionaries from the KE tool main script, updates the JSON schema and returns it to the KE tool main script
def JSON_example(list1,list2,list3):
jsonFile= open('JSON_example.json', 'r') #open the JSON_example.json file
data = json.load(jsonFile)
jsonFile.close()
nodes=data.get('nodes',{})
batchWIP={}
for (element_id,element) in nodes.iteritems():
name=element.get('name')
wip=element.get('wip',[])
for key in list3.keys(): # conduct a loop statement in the keys of the list3, which actually is the WIP dict
batchWIP['_class']='Dream.Batch' # static inputs to batchWIP dict
batchWIP['numberOfUnits']="80"
batchWIP['name']='Batch'
if list3[key][0]== name: # condition that checks if the element in the list3 dict is the same as the name of the element in JSON file
batchWIP['id']=str(key) # input the container id of the WIP batch to the batchWIP dict
try:
if list3[key][2]: # a condition to check if the WIP is in a station and not in a buffer
batchWIP['unitsToProcess']=str(list3[key][2]) # input the unitsToProcess attribute to the batchWIP dict
wip.append(batchWIP) # append in the wip attribute in JSON the batchWIP dict
batchWIP={}
except IndexError:
wip.append(batchWIP) # in case the WIP is not in a station but it's in a buffer; append again the batchWIP dict (without the unitsToProcess this time)
batchWIP={}
else:
continue
if name in list1.keys(): # condition that checks if the element in the list1 is the same as the name of the element in JSON file
element['processingTime']= list1[name] # input the attributes of list1[name] to the JSON's element 'processingTime'
else:
continue
if name in list2.keys(): # condition that checks if the element in the list2 is the same as the name of the element in JSON file
element['scrapQuantity']= list2[name] # input the attributes of list2[name] to the JSON's element 'scrapQuantity'
else:
continue
jsonFile = open('JSON_exampleOutput.json',"w")
jsonFile.write(json.dumps(data, indent=True))
jsonFile.close()
return json.dumps(data, indent=True)
\ No newline at end of file
{
"edges": {
"con_140": [
"S1",
"QStart",
{}
],
"con_145": [
"QStart",
"BDA",
{}
],
"con_150": [
"QStart",
"BDB",
{}
],
"con_155": [
"BDA",
"M1A",
{}
],
"con_160": [
"M1A",
"Q2A",
{}
],
"con_165": [
"Q2A",
"M2A",
{}
],
"con_170": [
"M2A",
"Q3A",
{}
],
"con_175": [
"Q3A",
"M3A",
{}
],
"con_180": [
"M3A",
"BRA",
{}
],
"con_185": [
"BRA",
"QM",
{}
],
"con_190": [
"BDB",
"M1B",
{}
],
"con_195": [
"M1B",
"Q2B",
{}
],
"con_200": [
"Q2B",
"M2B",
{}
],
"con_205": [
"M2B",
"Q3B",
{}
],
"con_210": [
"Q3B",
"M3B",
{}
],
"con_215": [
"M3B",
"BRB",
{}
],
"con_220": [
"BRB",
"QM",
{}
],
"con_225": [
"QM",
"MM",
{}
],
"con_230": [
"MM",
"QPr",
{}
],
"con_235": [
"QPr",
"PrA",
{}
],
"con_240": [
"QPr",
"PrB",
{}
],
"con_245": [
"PrA",
"QPa",
{}
],
"con_250": [
"PrB",
"QPa",
{}
],
"con_255": [
"QPa",
"PaA",
{}
],
"con_260": [
"QPa",
"PaB",
{}
],
"con_265": [
"PaA",
"E1",
{}
],
"con_270": [
"PaB",
"E1",
{}
]
},
"general": {
"confidenceLevel": "0.95",
"maxSimTime": "2880",
"numberOfReplications": "1",
"processTimeout": "0.5",
"trace": "No"
},
"nodes": {
"BDA": {
"_class": "Dream.BatchDecompositionStartTime",
"element_id": "DreamNode_1",
"name": "Deco_A",
"numberOfSubBatches": 4,
"processingTime": {
"distributionType": "Fixed",
"mean": "0"
}
},
"BDB": {
"_class": "Dream.BatchDecompositionStartTime",
"element_id": "DreamNode_2",
"name": "Deco_B",
"numberOfSubBatches": 4,
"processingTime": {
"distributionType": "Fixed",
"mean": "0"
}
},
"BRA": {
"_class": "Dream.BatchReassembly",
"element_id": "DreamNode_3",
"name": "Assembly_A",
"numberOfSubBatches": 4,
"processingTime": {
"distributionType": "Fixed",
"mean": "0"
}
},
"BRB": {
"_class": "Dream.BatchReassembly",
"element_id": "DreamNode_4",
"name": "Assembly_B",
"numberOfSubBatches": 4,
"processingTime": {
"distributionType": "Fixed",
"mean": "0"
}
},
"E1": {
"_class": "Dream.Exit",
"element_id": "DreamNode_5",
"name": "Stock"
},
"M1A": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_6",
"failures": {},
"name": "M1A",
"shift": {
"shiftPattern": [[360,780], [1800,2220]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"M1B": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_7",
"failures": {},
"name": "M1B",
"shift": {
"shiftPattern": [[360,1260], [1800,2700]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"M2A": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_8",
"failures": {},
"name": "M2A",
"shift": {
"shiftPattern": [[360,780], [1800,2220]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"M2B": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_9",
"failures": {},
"name": "M2B",
"shift": {
"shiftPattern": [[360,1260], [1800,2700]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"M3A": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_10",
"failures": {},
"name": "M3A",
"shift": {
"shiftPattern": [[360,780], [1800,2220]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"M3B": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_11",
"failures": {},
"name": "M3B",
"shift": {
"shiftPattern": [[360,1260], [1800,2700]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"MM": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_12",
"failures": {},
"name": "MM",
"shift": {
"shiftPattern": [[360,1260], [1800,2700]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"PaA": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_13",
"failures": {},
"name": "PaA",
"shift": {
"shiftPattern": [[360,780], [1800,2220]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"PaB": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_14",
"failures": {},
"name": "PaB",
"shift": {
"shiftPattern": [[360,1260], [1800,2700]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"PrA": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_15",
"failures": {},
"name": "PrA",
"shift": {
"shiftPattern": [[360,780], [1800,2220]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"PrB": {
"_class": "Dream.BatchScrapMachine",
"element_id": "DreamNode_16",
"failures": {},
"name": "PrB",
"shift": {
"shiftPattern": [[360,1260], [1800,2700]],
"endUnfinished": 0
},
"scrapQuantity": {
},
"processingTime": {
},
"wip": [
]
},
"Q2A": {
"_class": "Dream.LineClearance",
"capacity": "2",
"element_id": "DreamNode_17",
"isDummy": "0",
"name": "Q2A",
"schedulingRule": "FIFO",
"wip": [
]
},
"Q2B": {
"_class": "Dream.LineClearance",
"capacity": "2",
"element_id": "DreamNode_18",
"isDummy": "0",
"name": "Q2B",
"schedulingRule": "FIFO",
"wip": [
]
},
"Q3A": {
"_class": "Dream.LineClearance",
"capacity": "2",
"element_id": "DreamNode_19",
"isDummy": "0",
"name": "Q3A",
"schedulingRule": "FIFO",
"wip": [
]
},
"Q3B": {
"_class": "Dream.LineClearance",
"capacity": "2",
"element_id": "DreamNode_20",
"isDummy": "0",
"name": "Q3B",
"schedulingRule": "FIFO",
"wip": [
]
},
"QM": {
"_class": "Dream.Queue",
"capacity": "3",
"element_id": "DreamNode_21",
"isDummy": "0",
"name": "QM",
"schedulingRule": "FIFO",
"wip": [
]
},
"QPa": {
"_class": "Dream.Queue",
"capacity": "3",
"element_id": "DreamNode_22",
"isDummy": "0",
"name": "QPa",
"schedulingRule": "FIFO",
"wip": [
]
},
"QPr": {
"_class": "Dream.Queue",
"capacity": "3",
"element_id": "DreamNode_23",
"isDummy": "0",
"name": "QPr",
"schedulingRule": "FIFO",
"wip": [
]
},
"QStart": {
"_class": "Dream.Queue",
"capacity": "1",
"element_id": "DreamNode_24",
"isDummy": "0",
"name": "StartQueue",
"schedulingRule": "FIFO",
"wip": [
]
},
"S1": {
"_class": "Dream.BatchSource",
"batchNumberOfUnits": 100,
"element_id": "DreamNode_25",
"entity": "Dream.Batch",
"interarrivalTime": {
"distributionType": "Fixed",
"mean": "0.5"
},
"name": "Source"
},
"EV": {
"_class": "Dream.EventGenerator",
"name": "attainment",
"start": "1440",
"interval": "1440",
"method": "Dream.Globals.countIntervalThroughput",
"argumentDict": {
}
}
},
"spreadsheet": [
[
"Jobs",
"ID",
"Order Date",
"Due Date",
"Priority",
"Material",
"Sequence",
"Processing Times"
],
[
null,
null,
null,
null,
null,
null,
null,
null
]
]
}
\ No newline at end of file
'''
Created on 9 Oct 2014
@author: Panos
'''
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
import datetime
import operator
# This method that returns the actual WIP (Work-In-Process containers ids) either in stations or buffers of the production line
def currentWIP(processStory, stopTime):
Stations={}
LastStation={}
for key in processStory.keys():
Stations[key]=[]
LastStation[key]=[]
sorted_proc=sorted(processStory[key].iteritems(), key=operator.itemgetter(1))
for i in range(len(sorted_proc)):
Stations[key].append(sorted_proc[i])
for key in Stations.keys():
for elem in range(len(Stations[key])):
try:
if Stations[key][elem][1][0][0] > stopTime and Stations[key][elem][1][1][0] > stopTime:
del Stations[key][elem]
elif Stations[key][elem][1][1][0] > stopTime:
del Stations[key][elem][1][1]
elif Stations[key][elem][1][0][0] > stopTime:
del Stations[key][elem][1][0]
except IndexError:
continue
try:
LastStation[key]=Stations[key][-1]
except KeyError:
continue
for key in LastStation.keys():
try:
if (LastStation[key][0]=='PaA' or LastStation[key][0]=='PaB') and (stopTime - LastStation[key][1][0][0] > datetime.timedelta(0,500)):
del LastStation[key]
except IndexError:
continue
WIP={}
for key in LastStation.keys():
WIP[key]=[]
try:
if not LastStation[key][1][1]:
continue
except IndexError:
WIP[key].append(LastStation[key][0])
try:
dif= stopTime - LastStation[key][1][0][0]
WIP[key].append(dif)
except IndexError:
continue
for key in LastStation.keys():
try:
if LastStation[key][0]=='MA' and LastStation[key][1][1]:
WIP[key].append('QStart')
elif LastStation[key][0]=='M1A' and LastStation[key][1][1]:
WIP[key].append('Q2A')
elif LastStation[key][0]=='M1B' and LastStation[key][1][1]:
WIP[key].append('Q2B')
elif LastStation[key][0]=='M2A' and LastStation[key][1][1]:
WIP[key].append('Q3A')
elif LastStation[key][0]=='M2B' and LastStation[key][1][1]:
WIP[key].append('Q3B')
elif (LastStation[key][0]=='M3A' or LastStation[key][0]=='M3B') and LastStation[key][1][1]:
WIP[key].append('QM')
elif LastStation[key][0]=='MM' and LastStation[key][1][1]:
WIP[key].append('QPr')
elif (LastStation[key][0]=='PrA' and LastStation[key][1][1]) or (LastStation[key][0]=='PrB' and LastStation[key][1][1]):
WIP[key].append('QPa')
except IndexError:
continue
return WIP
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment