@inproceedings{conf/cla/BorchmannH16, author = {Borchmann, Daniel and Hanika, Tom}, booktitle = {CLA}, crossref = {conf/cla/2016}, editor = {Huchard, Marianne and Kuznetsov, Sergei}, ee = {http://ceur-ws.org/Vol-1624/paper5.pdf}, interhash = {e8ddef8aeb9b874f97a1a8230332b7c4}, intrahash = {8af414e45f306527e8316ac681fe7f08}, pages = {57-69}, publisher = {CEUR-WS.org}, series = {CEUR Workshop Proceedings}, title = {Some Experimental Results on Randomly Generating Formal Contexts.}, url = {http://dblp.uni-trier.de/db/conf/cla/cla2016.html#BorchmannH16}, volume = 1624, year = 2016 } @inproceedings{Marshall:2003:SW:900051.900063, abstract = {Through scenarios in the popular press and technical papers in the research literature, the promise of the Semantic Web has raised a number of different expectations. These expectations can be traced to three different perspectives on the Semantic Web. The Semantic Web is portrayed as: (1) a universal library, to be readily accessed and used by humans in a variety of information use contexts; (2) the backdrop for the work of computational agents completing sophisticated activities on behalf of their human counterparts; and (3) a method for federating particular knowledge bases and databases to perform anticipated tasks for humans and their agents. Each of these perspectives has both theoretical and pragmatic entailments, and a wealth of past experiences to guide and temper our expectations. In this paper, we examine all three perspectives from rhetorical, theoretical, and pragmatic viewpoints with an eye toward possible outcomes as Semantic Web efforts move forward.}, acmid = {900063}, address = {New York, NY, USA}, author = {Marshall, Catherine C. and Shipman, Frank M.}, booktitle = {Proceedings of the Fourteenth ACM Conference on Hypertext and Hypermedia}, doi = {10.1145/900051.900063}, interhash = {cccbc678523895b9b1fc30ed70b1f4a1}, intrahash = {e37d0f8f37acc22dd66acaa0767bb953}, isbn = {1-58113-704-4}, location = {Nottingham, UK}, numpages = {10}, pages = {57--66}, publisher = {ACM}, series = {HYPERTEXT '03}, title = {Which Semantic Web?}, url = {http://doi.acm.org/10.1145/900051.900063}, year = 2003 } @article{ConverisID31537, abstract = {

DEUTSCH: Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.

}, author = {Knierim, Markus and Kladt, Viktor}, doi = {doi-TEST}, interhash = {9bfec3c91404d85706675e6468758904}, intrahash = {abf439dbf5f32720be2f554c7de14928}, journal = {Language Awareness}, pages = {99-999}, title = {TEST Aufsatz in einer Fachzeitschrift}, url = {url-TEST}, volume = 99, year = 2099 } @book{ConverisID31538, abstract = {

DEUTSCH: Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.

}, author = {Knierim, Markus and Kladt, Viktor}, doi = {doi-TEST}, interhash = {698d53b231e7308bae991be86c4bdcb9}, intrahash = {f76e1a0c1e99a899a4ff74e9e113ab20}, journal = {Language Awareness}, pages = {## Kopie ## 99-999}, title = {TEST Monographie}, url = {url-TEST}, volume = {## Kopie ## 99}, year = 2099 } @inbook{ConverisID15789, booktitle = {Aufgaben als Katalysatoren von Lernprozessen}, editor = {J, Thonhauser}, interhash = {d199791edce2c23a01cc83da71753f35}, intrahash = {9a3430f54e4fddc918f40cea91641633}, pages = {149–167}, publisher = {Waxmann}, title = {Aufgabenorientiertes Lernen im Fremdsprachenunterricht: Beispiele zur Förderung kognitiver, metakognitiver und sozioaffektiver Lernprozesse}, type = {null}, year = 2008 } @phdthesis{knell2016interferometrische, abstract = {Ziel dieser Dissertation ist es, eine Klasse interferometrischer Messgeräte zu charakterisieren und weiter zu entwickeln. Die Modulation der optischen Weglänge (OPLM) im Referenzarm eines interferometrischen Messsystems ist ein anpassungsfähiger Ansatz. Sie ist zur Messung von Oberächenprolen mit einer Auösung bis in den sub-nm-Bereich bei einem Messbereich von bis zu 100 m geeignet. Wird ein statisches Messobjekt gemessen, tritt durch die Modulation im Referenzarm am Detektor ein periodisches Interferenzmuster auf. Dies ist in der unten stehenden Abbildung schematisch dargestellt. Bei einer Veränderung des Abstandes zwischen Objekt und Messgerät kann aus der Phasen- und/oder Hüllkurvenverschiebung im Interferenzmuster die Abstandsänderung abgeleitet werden. Im Rahmen der Arbeit sind zwei funktionsfähige OPLM-Messsysteme entwickelt, aufgebaut und getestet worden. Diese demonstrieren, dass der OPLM-Ansatz ein breites Spektrum an Anwendungen durch eine optische Messung abdecken kann. Allerdings zeigen sich an den Messsystemen auch die Limitierungen des OPLM-Ansatzes. Die Systeme basieren auf einer Punktmessung mittels einer fasergekoppelten Sonde sowie auf einer linienförmigen Messung durch eine Zeilenkamera. Um eine hohe laterale Auösung zu erzielen, wird die Zeilenkamera mit einem Mikroskop kombiniert. Damit ächenhaft gemessen werden kann, ist es notwendig, Messobjekt und Sensor zueinander zu verschieben. Daher wird eine Theorie entwickelt, unter welchen Randbedingungen bewegte Objekte von einem OPLM-Messsystem aufgelöst werden können. Die Theorie wird anschlieÿend experimentell überprüft und bestätigt. Für die Auswertung der bei der Modulation der optischen Weglänge entstehenden Interferenzen existieren bereits einige erprobte Algorithmen, welche auf ihre Eignung hin untersucht und mit selbst entwickelten Algorithmen verglichen werden. Auch wird darauf eingegangen, welches die zentralen Herausforderungen bei der Planung von OPLM-Interferometern sind und wie sich insbesondere die Wahl des Aktors für die OPLM auf das gesamte Messsystem auswirkt. Bei den beiden Messsystemen werden jeweils wichtige Komponenten wie analoge Elektronik und Aktorik sowie ihre Funktionsweise erläutert. Es wird detailliert beschrieben, wie ein OPLMMesssystem charakterisiert und kalibriert werden muss, um möglichst zuverlässige Messwerte zu liefern. Abschlieÿend werden die Möglichkeiten der beiden entwickelten Systeme durch Beispielmessungen demonstriert, sowie ihre Messgenauigkeit charakterisiert.}, address = {Kassel}, author = {Knell, Holger Werner}, editor = {Knell, Holger Werner}, interhash = {4836dffdfcfcf9ab12bad2131bcf8ed5}, intrahash = {1decd66a54b7e8023ecbed3977a9e7f2}, month = feb, school = {Universität Kassel}, title = {Interferometrische Sensoren mit Modulation der optischen Weglänge für die Fertigungsmesstechnik}, type = {Dissertation}, year = 2016 } @inproceedings{ames2007motivations, abstract = {Why do people tag? Users have mostly avoided annotating media such as photos -- both in desktop and mobile environments -- despite the many potential uses for annotations, including recall and retrieval. We investigate the incentives for annotation in Flickr, a popular web-based photo-sharing system, and ZoneTag, a cameraphone photo capture and annotation tool that uploads images to Flickr. In Flickr, annotation (as textual tags) serves both personal and social purposes, increasing incentives for tagging and resulting in a relatively high number of annotations. ZoneTag, in turn, makes it easier to tag cameraphone photos that are uploaded to Flickr by allowing annotation and suggesting relevant tags immediately after capture.

A qualitative study of ZoneTag/Flickr users exposed various tagging patterns and emerging motivations for photo annotation. We offer a taxonomy of motivations for annotation in this system along two dimensions (sociality and function), and explore the various factors that people consider when tagging their photos. Our findings suggest implications for the design of digital photo organization and sharing applications, as well as other applications that incorporate user-based annotation.}, acmid = {1240772}, address = {New York, NY, USA}, author = {Ames, Morgan and Naaman, Mor}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems}, doi = {10.1145/1240624.1240772}, interhash = {bd24c17d66d2b904b3fc9444c2b64b44}, intrahash = {bc08f76536610f3f376bece5f0b46ad5}, isbn = {978-1-59593-593-9}, location = {San Jose, California, USA}, numpages = {10}, pages = {971--980}, publisher = {ACM}, series = {CHI '07}, title = {Why we tag: motivations for annotation in mobile and online media}, url = {http://doi.acm.org/10.1145/1240624.1240772}, year = 2007 } @article{10.1371/journal.pone.0136763, abstract = {

The issue of sustainability is at the top of the political and societal agenda, being considered of extreme importance and urgency. Human individual action impacts the environment both locally (e.g., local air/water quality, noise disturbance) and globally (e.g., climate change, resource use). Urban environments represent a crucial example, with an increasing realization that the most effective way of producing a change is involving the citizens themselves in monitoring campaigns (a citizen science bottom-up approach). This is possible by developing novel technologies and IT infrastructures enabling large citizen participation. Here, in the wider framework of one of the first such projects, we show results from an international competition where citizens were involved in mobile air pollution monitoring using low cost sensing devices, combined with a web-based game to monitor perceived levels of pollution. Measures of shift in perceptions over the course of the campaign are provided, together with insights into participatory patterns emerging from this study. Interesting effects related to inertia and to direct involvement in measurement activities rather than indirect information exposure are also highlighted, indicating that direct involvement can enhance learning and environmental awareness. In the future, this could result in better adoption of policies towards decreasing pollution.

}, author = {Sîrbu, Alina and Becker, Martin and Caminiti, Saverio and De Baets, Bernard and Elen, Bart and Francis, Louise and Gravino, Pietro and Hotho, Andreas and Ingarra, Stefano and Loreto, Vittorio and Molino, Andrea and Mueller, Juergen and Peters, Jan and Ricchiuti, Ferdinando and Saracino, Fabio and Servedio, Vito D. P. and Stumme, Gerd and Theunis, Jan and Tria, Francesca and Van den Bossche, Joris}, doi = {10.1371/journal.pone.0136763}, interhash = {6abb09b5ac2137e557a84d7be10009b4}, intrahash = {f35761dd0fbd9ad8af7c8099e0b6aac4}, journal = {PLoS ONE}, month = {08}, number = 8, pages = {e0136763}, publisher = {Public Library of Science}, title = {Participatory Patterns in an International Air Quality Monitoring Initiative}, url = {http://dx.doi.org/10.1371%2Fjournal.pone.0136763}, volume = 10, year = 2015 } @article{kluegl2013exploiting, abstract = {Conditional Random Fields (CRF) are popular methods for labeling unstructured or textual data. Like many machine learning approaches, these undirected graphical models assume the instances to be independently distributed. However, in real-world applications data is grouped in a natural way, e.g., by its creation context. The instances in each group often share additional structural consistencies. This paper proposes a domain-independent method for exploiting these consistencies by combining two CRFs in a stacked learning framework. We apply rule learning collectively on the predictions of an initial CRF for one context to acquire descriptions of its specific properties. Then, we utilize these descriptions as dynamic and high quality features in an additional (stacked) CRF. The presented approach is evaluated with a real-world dataset for the segmentation of references and achieves a significant reduction of the labeling error.}, author = {Kluegl, Peter and Toepfer, Martin and Lemmerich, Florian and Hotho, Andreas and Puppe, Frank}, interhash = {9ef3f543e4cc9e2b0ef078595f92013b}, intrahash = {fbaab25e96dd20d96ece9d7fefdc3b4f}, journal = {Mathematical Methodologies in Pattern Recognition and Machine Learning Springer Proceedings in Mathematics & Statistics}, pages = {111-125}, title = {Exploiting Structural Consistencies with Stacked Conditional Random Fields}, volume = 30, year = 2013 } @misc{becker2014subjective, abstract = {Sensor data is objective. But when measuring our environment, measured values are contrasted with our perception, which is always subjective. This makes interpreting sensor measurements difficult for a single person in her personal environment. In this context, the EveryAware projects directly connects the concepts of objective sensor data with subjective impressions and perceptions by providing a collective sensing platform with several client applications allowing to explicitly associate those two data types. The goal is to provide the user with personalized feedback, a characterization of the global as well as her personal environment, and enable her to position her perceptions in this global context. In this poster we summarize the collected data of two EveryAware applications, namely WideNoise for noise measurements and AirProbe for participatory air quality sensing. Basic insights are presented including user activity, learning processes and sensor data to perception correlations. These results provide an outlook on how this data can further be used to understand the connection between sensor data and perceptions. }, author = {Becker, Martin and Hotho, Andreas and Mueller, Juergen and Kibanov, Mark and Atzmueller, Martin and Stumme, Gerd}, howpublished = {CSSWS 2014, Poster}, interhash = {615afda9869c5e0facc8bdb5534760aa}, intrahash = {33cf40cc46170f51767c46d2ec14a495}, title = {Subjective vs. Objective Data: Bridging the Gap}, url = {http://www.gesis.org/en/events/css-wintersymposium/poster-presentation/}, year = 2014 } @inproceedings{vkistowski2015modeling, abstract = {Today’s system developers and operators face the challenge of creating software systems that make efficient use of dynamically allocated resources under highly variable and dynamic load profiles, while at the same time delivering reliable performance. Benchmarking of systems under these constraints is difficult, as state-of-the-art benchmarking frameworks provide only limited support for emulating such dynamic and highly vari- able load profiles for the creation of realistic workload scenarios. Industrial benchmarks typically confine themselves to workloads with constant or stepwise increasing loads. Alternatively, they support replaying of recorded load traces. Statistical load inten- sity descriptions also do not sufficiently capture concrete pattern load profile variations over time. To address these issues, we present the Descartes Load Intensity Model (DLIM). DLIM provides a modeling formalism for describing load intensity variations over time. A DLIM instance can be used as a compact representation of a recorded load intensity trace, providing a powerful tool for benchmarking and performance analysis. As manually obtaining DLIM instances can be time consuming, we present three different automated extraction methods, which also help to enable autonomous system analysis for self-adaptive systems. Model expressiveness is validated using the presented extraction methods. Extracted DLIM instances exhibit a median modeling error of 12.4% on average over nine different real-world traces covering between two weeks and seven months. Additionally, extraction methods perform orders of magnitude faster than existing time series decomposition approaches.}, author = {v. Kistowski, Jóakim and Nikolas, Herbst. and Zoller, Daniel and Kounev, Samuel and Hotho, Andreas}, booktitle = {Proceedings of the 10th International Symposium on Software Engineering for Adaptive and Self-Managing Systems (SEAMS)}, interhash = {9f0be929d7bcc057c778f6b44e73cf4c}, intrahash = {f449d3cf35941636f96d72aaf620a275}, title = {Modeling and Extracting Load Intensity Profiles}, year = 2015 } @inproceedings{zoller2015publication, abstract = {Scholarly success is traditionally measured in terms of citations to publications. With the advent of publication man- agement and digital libraries on the web, scholarly usage data has become a target of investigation and new impact metrics computed on such usage data have been proposed – so called altmetrics. In scholarly social bookmarking sys- tems, scientists collect and manage publication meta data and thus reveal their interest in these publications. In this work, we investigate connections between usage metrics and citations, and find posts, exports, and page views of publications to be correlated to citations.}, author = {Zoller, Daniel and Doerfel, Stephan and Jäschke, Robert and Stumme, Gerd and Hotho, Andreas}, booktitle = {Proceedings of the 2015 ACM Conference on Web Science}, interhash = {3515b34cd19959cee5fafbf4467a75ed}, intrahash = {548a7010ee2726f28e04e5c6e5fd6e2d}, title = {On Publication Usage in a Social Bookmarking System}, year = 2015 } @incollection{singer2014folksonomies, author = {Singer, Philipp and Niebler, Thomas and Hotho, Andreas and Strohmaier, Markus}, booktitle = {Encyclopedia of Social Network Analysis and Mining}, interhash = {3a55606e91328ca0191127b1fafe189e}, intrahash = {84d9498b73de976d8d550c6761d4be0d}, pages = {542--547}, publisher = {Springer}, title = {Folksonomies}, year = 2014 } @inproceedings{ring2015condist, author = {Ring, Markus and Otto, Florian and Becker, Martin and Niebler, Thomas and Landes, Dieter and Hotho, Andreas}, editor = {ECMLPKDD2015}, interhash = {c062a57a17a0910d6c27ecd664502ac1}, intrahash = {a2f9d649f2856677e4d886a3b517404d}, title = {ConDist: A Context-Driven Categorical Distance Measure}, year = 2015 } @inproceedings{dallmann2015media, address = {Cyprus, Turkey, September 1-4}, author = {Dallmann, Alexander and Lemmerich, Florian and Zoller, Daniel and Hotho, Andreas}, booktitle = {26th ACM Conference on Hypertext and Social Media}, interhash = {6b2daa7830c5e504543dcdaefed46285}, intrahash = {addfd0d84b4347392dc94a4bec400412}, publisher = {ACM}, title = {Media Bias in German Online Newspapers}, year = 2015 } @inproceedings{singer2015hyptrails, address = {Firenze, Italy}, author = {Singer, P. and Helic, D. and Hotho, A. and Strohmaier, M.}, booktitle = {24th International World Wide Web Conference (WWW2015)}, interhash = {d33e150aa37dcd618388960286f8a46a}, intrahash = {5d21e53dc91b35a4a6cb6b9ec858045d}, month = {May 18 - May 22}, organization = {ACM}, publisher = {ACM}, title = {Hyptrails: A bayesian approach for comparing hypotheses about human trails}, url = {http://www.www2015.it/documents/proceedings/proceedings/p1003.pdf}, year = 2015 } @article{atzmueller2015descriptionoriented, abstract = {Abstract Communities can intuitively be defined as subsets of nodes of a graph with a dense structure in the corresponding subgraph. However, for mining such communities usually only structural aspects are taken into account. Typically, no concise nor easily interpretable community description is provided. For tackling this issue, this paper focuses on description-oriented community detection using subgroup discovery. In order to provide both structurally valid and interpretable communities we utilize the graph structure as well as additional descriptive features of the graph’s nodes. A descriptive community pattern built upon these features then describes and identifies a community, i.e., a set of nodes, and vice versa. Essentially, we mine patterns in the “description space” characterizing interesting sets of nodes (i.e., subgroups) in the “graph space”; the interestingness of a community is evaluated by a selectable quality measure. We aim at identifying communities according to standard community quality measures, while providing characteristic descriptions of these communities at the same time. For this task, we propose several optimistic estimates of standard community quality functions to be used for efficient pruning of the search space in an exhaustive branch-and-bound algorithm. We demonstrate our approach in an evaluation using five real-world data sets, obtained from three different social media applications. }, author = {Atzmueller, Martin and Doerfel, Stephan and Mitzlaff, Folke}, doi = {http://dx.doi.org/10.1016/j.ins.2015.05.008}, interhash = {d87cc381289cd86387b81ff5b8646cb5}, intrahash = {fb7a824e273ab34db22f49d54b5d1e12}, issn = {0020-0255}, journal = {Information Sciences }, number = 0, pages = { - }, title = {Description-oriented community detection using exhaustive subgroup discovery }, url = {http://www.sciencedirect.com/science/article/pii/S0020025515003667}, year = 2015 } @article{noauthororeditor2013impact, author = {Sradnick, Andre and Murugan, Rajasekaran and Ingold, Mariko and Buekert, Andreas and Joergensen, Rainer}, interhash = {2718d2cb58f6bd43c81593ff4db96f4e}, intrahash = {36fa4b49ff61bd07da9a05fc2e098e84}, journal = {Biology and Fertility of Soils }, pages = {95-103.}, title = {Impact of activated charcoal and tannin amendments on microbial biomass and residues in an irrigated sandy soil under arid subtropical conditions}, volume = 50, year = 2013 } @presentation{boettger2014puma, abstract = {PUMA ist ein Publikationsmanagement-System für die Verwaltung von Internetlesezeichen und zur gemeinsamen Nutzung von wissenschaftlichen Publikationen. Die Organisation geschieht dabei über Tags. PUMA ist so konzipiert, dass es als alleiniges Eingabeportal für bibliografische Metadaten dienen kann. So können Forscher PUMA nicht nur als Online-Literaturverwaltung nutzen, sondern auch eigene Publikationen auf dem Dokumentenserver ihrer Universitätsbibliothek veröffentlichen. Ein Modul, das für das Discovery System VuFind entwickelt wurde, schlägt Brücken zwischen den Funktionen der klassischen VuFind-Favoritenliste und Cloud-Funktionen von PUMA. Favorisierte Literaturrecherchen können in einer Merkliste gespeichert werden und stehen sofort in PUMA zur Verfügung. Dadurch lassen sich die Publikationen in eine Vielzahl von Formaten und Zitierstilen exportieren. Zudem müssen die Einträge nicht über verschiedene Merklisten gruppiert werden, sondern lassen sich durch die freie Vergabe von Tags deutlich differenzierter organisieren. Mit PUMA können die Nutzer ihre Daten leicht mit anderen teilen, was es auch für den Einsatz in der Lehre und für Forschergruppen interessant macht. Eine Erweiterung für die eLearning-Plattform Moodle erlaubt die Literaturverwaltung für Online-Kurse. Ähnliche Anwendungen für weitere Plattformen werden derzeit entwickelt. Plugins für Content-Management-Systeme ermöglichen Wissenschaftlern sowie Forscher- und Seminargruppen die Darstellung von in PUMA gepflegten Publikationen auf ihren Webseiten. Metadaten aus Publikationswebseiten vieler wissenschaftlicher Verlage lassen sich über Browser-Plugins in PUMA importieren und stehen zur Weiterverarbeitung in Literaturverwaltungen wie Citavi oder zum Zitieren in einer Textverarbeitung zur Verfügung. PUMA fügt sich mit diesen Schnittstellen in die bestehenden Anwendungen einer Hochschulbibliothek ein, ergänzt sie als Werkzeug zur Publikationsverwaltung und unterstützt seine Anwender in Forschung und Lehre.}, address = {Reutlingen}, author = {Böttger, Sebastian}, interhash = {fe8d3f1d9a0c7f0b0f4c4ae156a9b00d}, intrahash = {4c4916e0eab92e3e6465711a2f978885}, month = {07}, publisher = {Berufsverband Information Bibliothek}, title = {PUMA als Schnittstelle zwischen Discovery Service, Institutional Repository und eLearning}, url = {http://www.opus-bayern.de/bib-info/volltexte//2014/1674}, urn = {urn:nbn:de:0290-opus-16749}, year = 2014 } @proceedings{gutfeld2014technical, abstract = {Customized planning, engineering and build-up of factory plants are very complex tasks, where project management contains lots of risks and uncertainties. Existing simulation techniques could help massively to evaluate these uncertainties and achieve improved and at least more robust plans during project management, but are typically not applied in industry, especially at SMEs (small and medium-sized enterprises). This paper presents some results of the joint research project simject of the Universities of Paderborn and Kassel, which aims at the development of a demonstrator for a simulation-based and logistic-integrated project planning and scheduling. Based on the researched state-of-the-art, requirements and a planning process are derived and described, as well as a draft of the current technical infrastructure of the intended modular prototype. First plug-ins for project simulation and multi-project optimization are implemented and already show possible benefits for the project management process.}, address = {Savannah}, author = {Gutfeld, Thomas and Jessen, Ulrich and Wenzel, Sigrid and Laroque, Christoph and Weber, Jens}, editor = {Tolk, A. and Diallo, S. Y. and Ryzhov, I. O. and Yilmaz, L. and Buckley, S. and Miller, J. A.}, interhash = {935822e53cf3e3d87de128388cb082c7}, intrahash = {3fd55ee9c247ab445252897b78e2b229}, month = dec, number = 1, organization = {IEEE}, publisher = {Omnipress}, title = {A TECHNICAL CONCEPT FOR PLANT ENGINEERING BY SIMULATION-BASED AND LOGISTIC-INTEGRATED PROJECT MANAGEMENT}, url = {http://informs-sim.org/wsc14papers/includes/files/305.pdf}, volume = {Proceedings of the 2014 Winter Simulation Conference}, year = 2014 }